net/mlx5: fix metadata endianness in modify field action
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1150                 if (conf->dst == REG_C_0) {
1151                         /* Copy to reg_c[0], within mask only. */
1152                         reg_dst.offset = rte_bsf32(reg_c0);
1153                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1154                 } else {
1155                         reg_dst.offset = 0;
1156                         mask = rte_cpu_to_be_32(reg_c0);
1157                 }
1158         }
1159         return flow_dv_convert_modify_action(&item,
1160                                              reg_src, &reg_dst, res,
1161                                              MLX5_MODIFICATION_TYPE_COPY,
1162                                              error);
1163 }
1164
1165 /**
1166  * Convert MARK action to DV specification. This routine is used
1167  * in extensive metadata only and requires metadata register to be
1168  * handled. In legacy mode hardware tag resource is engaged.
1169  *
1170  * @param[in] dev
1171  *   Pointer to the rte_eth_dev structure.
1172  * @param[in] conf
1173  *   Pointer to MARK action specification.
1174  * @param[in,out] resource
1175  *   Pointer to the modify-header resource.
1176  * @param[out] error
1177  *   Pointer to the error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1184                             const struct rte_flow_action_mark *conf,
1185                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1186                             struct rte_flow_error *error)
1187 {
1188         struct mlx5_priv *priv = dev->data->dev_private;
1189         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1190                                            priv->sh->dv_mark_mask);
1191         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1192         struct rte_flow_item item = {
1193                 .spec = &data,
1194                 .mask = &mask,
1195         };
1196         struct field_modify_info reg_c_x[] = {
1197                 [1] = {0, 0, 0},
1198         };
1199         int reg;
1200
1201         if (!mask)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1204                                           NULL, "zero mark action mask");
1205         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1206         if (reg < 0)
1207                 return reg;
1208         MLX5_ASSERT(reg > 0);
1209         if (reg == REG_C_0) {
1210                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1211                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1212
1213                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1214                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1215                 mask = rte_cpu_to_be_32(mask << shl_c0);
1216         }
1217         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1218         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1219                                              MLX5_MODIFICATION_TYPE_SET, error);
1220 }
1221
1222 /**
1223  * Get metadata register index for specified steering domain.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in] attr
1228  *   Attributes of flow to determine steering domain.
1229  * @param[out] error
1230  *   Pointer to the error structure.
1231  *
1232  * @return
1233  *   positive index on success, a negative errno value otherwise
1234  *   and rte_errno is set.
1235  */
1236 static enum modify_reg
1237 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1238                          const struct rte_flow_attr *attr,
1239                          struct rte_flow_error *error)
1240 {
1241         int reg =
1242                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1243                                           MLX5_METADATA_FDB :
1244                                             attr->egress ?
1245                                             MLX5_METADATA_TX :
1246                                             MLX5_METADATA_RX, 0, error);
1247         if (reg < 0)
1248                 return rte_flow_error_set(error,
1249                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1250                                           NULL, "unavailable "
1251                                           "metadata register");
1252         return reg;
1253 }
1254
1255 /**
1256  * Convert SET_META action to DV specification.
1257  *
1258  * @param[in] dev
1259  *   Pointer to the rte_eth_dev structure.
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] attr
1263  *   Attributes of flow that includes this item.
1264  * @param[in] conf
1265  *   Pointer to action specification.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_convert_action_set_meta
1274                         (struct rte_eth_dev *dev,
1275                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1276                          const struct rte_flow_attr *attr,
1277                          const struct rte_flow_action_set_meta *conf,
1278                          struct rte_flow_error *error)
1279 {
1280         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1281         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1282         struct rte_flow_item item = {
1283                 .spec = &data,
1284                 .mask = &mask,
1285         };
1286         struct field_modify_info reg_c_x[] = {
1287                 [1] = {0, 0, 0},
1288         };
1289         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1290
1291         if (reg < 0)
1292                 return reg;
1293         MLX5_ASSERT(reg != REG_NON);
1294         if (reg == REG_C_0) {
1295                 struct mlx5_priv *priv = dev->data->dev_private;
1296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1298
1299                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1300                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1301                 mask = rte_cpu_to_be_32(mask << shl_c0);
1302         }
1303         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1304         /* The routine expects parameters in memory as big-endian ones. */
1305         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1306                                              MLX5_MODIFICATION_TYPE_SET, error);
1307 }
1308
1309 /**
1310  * Convert modify-header set IPv4 DSCP action to DV specification.
1311  *
1312  * @param[in,out] resource
1313  *   Pointer to the modify-header resource.
1314  * @param[in] action
1315  *   Pointer to action specification.
1316  * @param[out] error
1317  *   Pointer to the error structure.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 static int
1323 flow_dv_convert_action_modify_ipv4_dscp
1324                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1325                          const struct rte_flow_action *action,
1326                          struct rte_flow_error *error)
1327 {
1328         const struct rte_flow_action_set_dscp *conf =
1329                 (const struct rte_flow_action_set_dscp *)(action->conf);
1330         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1331         struct rte_flow_item_ipv4 ipv4;
1332         struct rte_flow_item_ipv4 ipv4_mask;
1333
1334         memset(&ipv4, 0, sizeof(ipv4));
1335         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1336         ipv4.hdr.type_of_service = conf->dscp;
1337         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1338         item.spec = &ipv4;
1339         item.mask = &ipv4_mask;
1340         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1341                                              MLX5_MODIFICATION_TYPE_SET, error);
1342 }
1343
1344 /**
1345  * Convert modify-header set IPv6 DSCP action to DV specification.
1346  *
1347  * @param[in,out] resource
1348  *   Pointer to the modify-header resource.
1349  * @param[in] action
1350  *   Pointer to action specification.
1351  * @param[out] error
1352  *   Pointer to the error structure.
1353  *
1354  * @return
1355  *   0 on success, a negative errno value otherwise and rte_errno is set.
1356  */
1357 static int
1358 flow_dv_convert_action_modify_ipv6_dscp
1359                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1360                          const struct rte_flow_action *action,
1361                          struct rte_flow_error *error)
1362 {
1363         const struct rte_flow_action_set_dscp *conf =
1364                 (const struct rte_flow_action_set_dscp *)(action->conf);
1365         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1366         struct rte_flow_item_ipv6 ipv6;
1367         struct rte_flow_item_ipv6 ipv6_mask;
1368
1369         memset(&ipv6, 0, sizeof(ipv6));
1370         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1371         /*
1372          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1373          * rdma-core only accept the DSCP bits byte aligned start from
1374          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1375          * bits in IPv6 case as rdma-core requires byte aligned value.
1376          */
1377         ipv6.hdr.vtc_flow = conf->dscp;
1378         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1379         item.spec = &ipv6;
1380         item.mask = &ipv6_mask;
1381         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1382                                              MLX5_MODIFICATION_TYPE_SET, error);
1383 }
1384
1385 static int
1386 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1387                            enum rte_flow_field_id field, int inherit,
1388                            const struct rte_flow_attr *attr,
1389                            struct rte_flow_error *error)
1390 {
1391         struct mlx5_priv *priv = dev->data->dev_private;
1392
1393         switch (field) {
1394         case RTE_FLOW_FIELD_START:
1395                 return 32;
1396         case RTE_FLOW_FIELD_MAC_DST:
1397         case RTE_FLOW_FIELD_MAC_SRC:
1398                 return 48;
1399         case RTE_FLOW_FIELD_VLAN_TYPE:
1400                 return 16;
1401         case RTE_FLOW_FIELD_VLAN_ID:
1402                 return 12;
1403         case RTE_FLOW_FIELD_MAC_TYPE:
1404                 return 16;
1405         case RTE_FLOW_FIELD_IPV4_DSCP:
1406                 return 6;
1407         case RTE_FLOW_FIELD_IPV4_TTL:
1408                 return 8;
1409         case RTE_FLOW_FIELD_IPV4_SRC:
1410         case RTE_FLOW_FIELD_IPV4_DST:
1411                 return 32;
1412         case RTE_FLOW_FIELD_IPV6_DSCP:
1413                 return 6;
1414         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1415                 return 8;
1416         case RTE_FLOW_FIELD_IPV6_SRC:
1417         case RTE_FLOW_FIELD_IPV6_DST:
1418                 return 128;
1419         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1420         case RTE_FLOW_FIELD_TCP_PORT_DST:
1421                 return 16;
1422         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1423         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1424                 return 32;
1425         case RTE_FLOW_FIELD_TCP_FLAGS:
1426                 return 9;
1427         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1428         case RTE_FLOW_FIELD_UDP_PORT_DST:
1429                 return 16;
1430         case RTE_FLOW_FIELD_VXLAN_VNI:
1431         case RTE_FLOW_FIELD_GENEVE_VNI:
1432                 return 24;
1433         case RTE_FLOW_FIELD_GTP_TEID:
1434         case RTE_FLOW_FIELD_TAG:
1435                 return 32;
1436         case RTE_FLOW_FIELD_MARK:
1437                 return __builtin_popcount(priv->sh->dv_mark_mask);
1438         case RTE_FLOW_FIELD_META:
1439                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1440                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1441         case RTE_FLOW_FIELD_POINTER:
1442         case RTE_FLOW_FIELD_VALUE:
1443                 return inherit < 0 ? 0 : inherit;
1444         default:
1445                 MLX5_ASSERT(false);
1446         }
1447         return 0;
1448 }
1449
1450 static void
1451 mlx5_flow_field_id_to_modify_info
1452                 (const struct rte_flow_action_modify_data *data,
1453                  struct field_modify_info *info, uint32_t *mask,
1454                  uint32_t width, uint32_t *shift, struct rte_eth_dev *dev,
1455                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1456 {
1457         struct mlx5_priv *priv = dev->data->dev_private;
1458         uint32_t idx = 0;
1459         uint32_t off = 0;
1460
1461         switch (data->field) {
1462         case RTE_FLOW_FIELD_START:
1463                 /* not supported yet */
1464                 MLX5_ASSERT(false);
1465                 break;
1466         case RTE_FLOW_FIELD_MAC_DST:
1467                 off = data->offset > 16 ? data->offset - 16 : 0;
1468                 if (mask) {
1469                         if (data->offset < 16) {
1470                                 info[idx] = (struct field_modify_info){2, 4,
1471                                                 MLX5_MODI_OUT_DMAC_15_0};
1472                                 if (width < 16) {
1473                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1474                                                                  (16 - width));
1475                                         width = 0;
1476                                 } else {
1477                                         mask[1] = RTE_BE16(0xffff);
1478                                         width -= 16;
1479                                 }
1480                                 if (!width)
1481                                         break;
1482                                 ++idx;
1483                         }
1484                         info[idx] = (struct field_modify_info){4, 0,
1485                                                 MLX5_MODI_OUT_DMAC_47_16};
1486                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1487                                                     (32 - width)) << off);
1488                 } else {
1489                         if (data->offset < 16)
1490                                 info[idx++] = (struct field_modify_info){2, 0,
1491                                                 MLX5_MODI_OUT_DMAC_15_0};
1492                         info[idx] = (struct field_modify_info){4, off,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                 }
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_SRC:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 4,
1501                                                 MLX5_MODI_OUT_SMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[1] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 0,
1515                                                 MLX5_MODI_OUT_SMAC_47_16};
1516                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1517                                                     (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 0,
1521                                                 MLX5_MODI_OUT_SMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, off,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_VLAN_TYPE:
1527                 /* not supported yet */
1528                 break;
1529         case RTE_FLOW_FIELD_VLAN_ID:
1530                 info[idx] = (struct field_modify_info){2, 0,
1531                                         MLX5_MODI_OUT_FIRST_VID};
1532                 if (mask)
1533                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1534                 break;
1535         case RTE_FLOW_FIELD_MAC_TYPE:
1536                 info[idx] = (struct field_modify_info){2, 0,
1537                                         MLX5_MODI_OUT_ETHERTYPE};
1538                 if (mask)
1539                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV4_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV4_TTL:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV4_TTL};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV4_SRC:
1554                 info[idx] = (struct field_modify_info){4, 0,
1555                                         MLX5_MODI_OUT_SIPV4};
1556                 if (mask)
1557                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1558                                                      (32 - width));
1559                 break;
1560         case RTE_FLOW_FIELD_IPV4_DST:
1561                 info[idx] = (struct field_modify_info){4, 0,
1562                                         MLX5_MODI_OUT_DIPV4};
1563                 if (mask)
1564                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1565                                                      (32 - width));
1566                 break;
1567         case RTE_FLOW_FIELD_IPV6_DSCP:
1568                 info[idx] = (struct field_modify_info){1, 0,
1569                                         MLX5_MODI_OUT_IP_DSCP};
1570                 if (mask)
1571                         mask[idx] = 0x3f >> (6 - width);
1572                 break;
1573         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1574                 info[idx] = (struct field_modify_info){1, 0,
1575                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1576                 if (mask)
1577                         mask[idx] = 0xff >> (8 - width);
1578                 break;
1579         case RTE_FLOW_FIELD_IPV6_SRC:
1580                 if (mask) {
1581                         if (data->offset < 32) {
1582                                 info[idx] = (struct field_modify_info){4, 12,
1583                                                 MLX5_MODI_OUT_SIPV6_31_0};
1584                                 if (width < 32) {
1585                                         mask[3] =
1586                                                 rte_cpu_to_be_32(0xffffffff >>
1587                                                                  (32 - width));
1588                                         width = 0;
1589                                 } else {
1590                                         mask[3] = RTE_BE32(0xffffffff);
1591                                         width -= 32;
1592                                 }
1593                                 if (!width)
1594                                         break;
1595                                 ++idx;
1596                         }
1597                         if (data->offset < 64) {
1598                                 info[idx] = (struct field_modify_info){4, 8,
1599                                                 MLX5_MODI_OUT_SIPV6_63_32};
1600                                 if (width < 32) {
1601                                         mask[2] =
1602                                                 rte_cpu_to_be_32(0xffffffff >>
1603                                                                  (32 - width));
1604                                         width = 0;
1605                                 } else {
1606                                         mask[2] = RTE_BE32(0xffffffff);
1607                                         width -= 32;
1608                                 }
1609                                 if (!width)
1610                                         break;
1611                                 ++idx;
1612                         }
1613                         if (data->offset < 96) {
1614                                 info[idx] = (struct field_modify_info){4, 4,
1615                                                 MLX5_MODI_OUT_SIPV6_95_64};
1616                                 if (width < 32) {
1617                                         mask[1] =
1618                                                 rte_cpu_to_be_32(0xffffffff >>
1619                                                                  (32 - width));
1620                                         width = 0;
1621                                 } else {
1622                                         mask[1] = RTE_BE32(0xffffffff);
1623                                         width -= 32;
1624                                 }
1625                                 if (!width)
1626                                         break;
1627                                 ++idx;
1628                         }
1629                         info[idx] = (struct field_modify_info){4, 0,
1630                                                 MLX5_MODI_OUT_SIPV6_127_96};
1631                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1632                 } else {
1633                         if (data->offset < 32)
1634                                 info[idx++] = (struct field_modify_info){4, 0,
1635                                                 MLX5_MODI_OUT_SIPV6_31_0};
1636                         if (data->offset < 64)
1637                                 info[idx++] = (struct field_modify_info){4, 0,
1638                                                 MLX5_MODI_OUT_SIPV6_63_32};
1639                         if (data->offset < 96)
1640                                 info[idx++] = (struct field_modify_info){4, 0,
1641                                                 MLX5_MODI_OUT_SIPV6_95_64};
1642                         if (data->offset < 128)
1643                                 info[idx++] = (struct field_modify_info){4, 0,
1644                                                 MLX5_MODI_OUT_SIPV6_127_96};
1645                 }
1646                 break;
1647         case RTE_FLOW_FIELD_IPV6_DST:
1648                 if (mask) {
1649                         if (data->offset < 32) {
1650                                 info[idx] = (struct field_modify_info){4, 12,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[3] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[3] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4, 8,
1667                                                 MLX5_MODI_OUT_DIPV6_63_32};
1668                                 if (width < 32) {
1669                                         mask[2] =
1670                                                 rte_cpu_to_be_32(0xffffffff >>
1671                                                                  (32 - width));
1672                                         width = 0;
1673                                 } else {
1674                                         mask[2] = RTE_BE32(0xffffffff);
1675                                         width -= 32;
1676                                 }
1677                                 if (!width)
1678                                         break;
1679                                 ++idx;
1680                         }
1681                         if (data->offset < 96) {
1682                                 info[idx] = (struct field_modify_info){4, 4,
1683                                                 MLX5_MODI_OUT_DIPV6_95_64};
1684                                 if (width < 32) {
1685                                         mask[1] =
1686                                                 rte_cpu_to_be_32(0xffffffff >>
1687                                                                  (32 - width));
1688                                         width = 0;
1689                                 } else {
1690                                         mask[1] = RTE_BE32(0xffffffff);
1691                                         width -= 32;
1692                                 }
1693                                 if (!width)
1694                                         break;
1695                                 ++idx;
1696                         }
1697                         info[idx] = (struct field_modify_info){4, 0,
1698                                                 MLX5_MODI_OUT_DIPV6_127_96};
1699                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1700                 } else {
1701                         if (data->offset < 32)
1702                                 info[idx++] = (struct field_modify_info){4, 0,
1703                                                 MLX5_MODI_OUT_DIPV6_31_0};
1704                         if (data->offset < 64)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_63_32};
1707                         if (data->offset < 96)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_95_64};
1710                         if (data->offset < 128)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_127_96};
1713                 }
1714                 break;
1715         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1716                 info[idx] = (struct field_modify_info){2, 0,
1717                                         MLX5_MODI_OUT_TCP_SPORT};
1718                 if (mask)
1719                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TCP_PORT_DST:
1722                 info[idx] = (struct field_modify_info){2, 0,
1723                                         MLX5_MODI_OUT_TCP_DPORT};
1724                 if (mask)
1725                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1726                 break;
1727         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1728                 info[idx] = (struct field_modify_info){4, 0,
1729                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1730                 if (mask)
1731                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1732                                                      (32 - width));
1733                 break;
1734         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1735                 info[idx] = (struct field_modify_info){4, 0,
1736                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1737                 if (mask)
1738                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1739                                                      (32 - width));
1740                 break;
1741         case RTE_FLOW_FIELD_TCP_FLAGS:
1742                 info[idx] = (struct field_modify_info){2, 0,
1743                                         MLX5_MODI_OUT_TCP_FLAGS};
1744                 if (mask)
1745                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1746                 break;
1747         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1748                 info[idx] = (struct field_modify_info){2, 0,
1749                                         MLX5_MODI_OUT_UDP_SPORT};
1750                 if (mask)
1751                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1752                 break;
1753         case RTE_FLOW_FIELD_UDP_PORT_DST:
1754                 info[idx] = (struct field_modify_info){2, 0,
1755                                         MLX5_MODI_OUT_UDP_DPORT};
1756                 if (mask)
1757                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1758                 break;
1759         case RTE_FLOW_FIELD_VXLAN_VNI:
1760                 /* not supported yet */
1761                 break;
1762         case RTE_FLOW_FIELD_GENEVE_VNI:
1763                 /* not supported yet*/
1764                 break;
1765         case RTE_FLOW_FIELD_GTP_TEID:
1766                 info[idx] = (struct field_modify_info){4, 0,
1767                                         MLX5_MODI_GTP_TEID};
1768                 if (mask)
1769                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1770                                                      (32 - width));
1771                 break;
1772         case RTE_FLOW_FIELD_TAG:
1773                 {
1774                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1775                                                    data->level, error);
1776                         if (reg < 0)
1777                                 return;
1778                         MLX5_ASSERT(reg != REG_NON);
1779                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1780                         info[idx] = (struct field_modify_info){4, 0,
1781                                                 reg_to_field[reg]};
1782                         if (mask)
1783                                 mask[idx] =
1784                                         rte_cpu_to_be_32(0xffffffff >>
1785                                                          (32 - width));
1786                 }
1787                 break;
1788         case RTE_FLOW_FIELD_MARK:
1789                 {
1790                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1791                         uint32_t mark_count = __builtin_popcount(mark_mask);
1792                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1793                                                        0, error);
1794                         if (reg < 0)
1795                                 return;
1796                         MLX5_ASSERT(reg != REG_NON);
1797                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1798                         info[idx] = (struct field_modify_info){4, 0,
1799                                                 reg_to_field[reg]};
1800                         if (mask)
1801                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1802                                          (mark_count - width)) & mark_mask);
1803                 }
1804                 break;
1805         case RTE_FLOW_FIELD_META:
1806                 {
1807                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1808                         uint32_t meta_count = __builtin_popcount(meta_mask);
1809                         uint32_t msk_c0 =
1810                                 rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
1811                         uint32_t shl_c0 = rte_bsf32(msk_c0);
1812                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1813                         if (reg < 0)
1814                                 return;
1815                         MLX5_ASSERT(reg != REG_NON);
1816                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1817                         if (reg == REG_C_0)
1818                                 *shift = shl_c0;
1819                         info[idx] = (struct field_modify_info){4, 0,
1820                                                 reg_to_field[reg]};
1821                         if (mask)
1822                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1823                                         (meta_count - width)) & meta_mask);
1824                 }
1825                 break;
1826         case RTE_FLOW_FIELD_POINTER:
1827         case RTE_FLOW_FIELD_VALUE:
1828         default:
1829                 MLX5_ASSERT(false);
1830                 break;
1831         }
1832 }
1833
1834 /**
1835  * Convert modify_field action to DV specification.
1836  *
1837  * @param[in] dev
1838  *   Pointer to the rte_eth_dev structure.
1839  * @param[in,out] resource
1840  *   Pointer to the modify-header resource.
1841  * @param[in] action
1842  *   Pointer to action specification.
1843  * @param[in] attr
1844  *   Attributes of flow that includes this item.
1845  * @param[out] error
1846  *   Pointer to the error structure.
1847  *
1848  * @return
1849  *   0 on success, a negative errno value otherwise and rte_errno is set.
1850  */
1851 static int
1852 flow_dv_convert_action_modify_field
1853                         (struct rte_eth_dev *dev,
1854                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1855                          const struct rte_flow_action *action,
1856                          const struct rte_flow_attr *attr,
1857                          struct rte_flow_error *error)
1858 {
1859         const struct rte_flow_action_modify_field *conf =
1860                 (const struct rte_flow_action_modify_field *)(action->conf);
1861         struct rte_flow_item item = {
1862                 .spec = NULL,
1863                 .mask = NULL
1864         };
1865         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1866                                                                 {0, 0, 0} };
1867         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1868                                                                 {0, 0, 0} };
1869         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1870         uint32_t type, meta = 0;
1871         uint32_t shift = 0;
1872
1873         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1874             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1875                 type = MLX5_MODIFICATION_TYPE_SET;
1876                 /** For SET fill the destination field (field) first. */
1877                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1878                                                   conf->width, &shift, dev,
1879                                                   attr, error);
1880                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1881                                         (void *)(uintptr_t)conf->src.pvalue :
1882                                         (void *)(uintptr_t)&conf->src.value;
1883                 if (conf->dst.field == RTE_FLOW_FIELD_META) {
1884                         meta = *(const unaligned_uint32_t *)item.spec;
1885                         meta = rte_cpu_to_be_32(meta);
1886                         item.spec = &meta;
1887                 }
1888         } else {
1889                 type = MLX5_MODIFICATION_TYPE_COPY;
1890                 /** For COPY fill the destination field (dcopy) without mask. */
1891                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1892                                                   conf->width, &shift, dev,
1893                                                   attr, error);
1894                 /** Then construct the source field (field) with mask. */
1895                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1896                                                   conf->width, &shift,
1897                                                   dev, attr, error);
1898         }
1899         item.mask = &mask;
1900         return flow_dv_convert_modify_action(&item,
1901                         field, dcopy, resource, type, error);
1902 }
1903
1904 /**
1905  * Validate MARK item.
1906  *
1907  * @param[in] dev
1908  *   Pointer to the rte_eth_dev structure.
1909  * @param[in] item
1910  *   Item specification.
1911  * @param[in] attr
1912  *   Attributes of flow that includes this item.
1913  * @param[out] error
1914  *   Pointer to error structure.
1915  *
1916  * @return
1917  *   0 on success, a negative errno value otherwise and rte_errno is set.
1918  */
1919 static int
1920 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1921                            const struct rte_flow_item *item,
1922                            const struct rte_flow_attr *attr __rte_unused,
1923                            struct rte_flow_error *error)
1924 {
1925         struct mlx5_priv *priv = dev->data->dev_private;
1926         struct mlx5_dev_config *config = &priv->config;
1927         const struct rte_flow_item_mark *spec = item->spec;
1928         const struct rte_flow_item_mark *mask = item->mask;
1929         const struct rte_flow_item_mark nic_mask = {
1930                 .id = priv->sh->dv_mark_mask,
1931         };
1932         int ret;
1933
1934         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1935                 return rte_flow_error_set(error, ENOTSUP,
1936                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1937                                           "extended metadata feature"
1938                                           " isn't enabled");
1939         if (!mlx5_flow_ext_mreg_supported(dev))
1940                 return rte_flow_error_set(error, ENOTSUP,
1941                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1942                                           "extended metadata register"
1943                                           " isn't supported");
1944         if (!nic_mask.id)
1945                 return rte_flow_error_set(error, ENOTSUP,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "extended metadata register"
1948                                           " isn't available");
1949         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1950         if (ret < 0)
1951                 return ret;
1952         if (!spec)
1953                 return rte_flow_error_set(error, EINVAL,
1954                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1955                                           item->spec,
1956                                           "data cannot be empty");
1957         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1958                 return rte_flow_error_set(error, EINVAL,
1959                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1960                                           &spec->id,
1961                                           "mark id exceeds the limit");
1962         if (!mask)
1963                 mask = &nic_mask;
1964         if (!mask->id)
1965                 return rte_flow_error_set(error, EINVAL,
1966                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1967                                         "mask cannot be zero");
1968
1969         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1970                                         (const uint8_t *)&nic_mask,
1971                                         sizeof(struct rte_flow_item_mark),
1972                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1973         if (ret < 0)
1974                 return ret;
1975         return 0;
1976 }
1977
1978 /**
1979  * Validate META item.
1980  *
1981  * @param[in] dev
1982  *   Pointer to the rte_eth_dev structure.
1983  * @param[in] item
1984  *   Item specification.
1985  * @param[in] attr
1986  *   Attributes of flow that includes this item.
1987  * @param[out] error
1988  *   Pointer to error structure.
1989  *
1990  * @return
1991  *   0 on success, a negative errno value otherwise and rte_errno is set.
1992  */
1993 static int
1994 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1995                            const struct rte_flow_item *item,
1996                            const struct rte_flow_attr *attr,
1997                            struct rte_flow_error *error)
1998 {
1999         struct mlx5_priv *priv = dev->data->dev_private;
2000         struct mlx5_dev_config *config = &priv->config;
2001         const struct rte_flow_item_meta *spec = item->spec;
2002         const struct rte_flow_item_meta *mask = item->mask;
2003         struct rte_flow_item_meta nic_mask = {
2004                 .data = UINT32_MAX
2005         };
2006         int reg;
2007         int ret;
2008
2009         if (!spec)
2010                 return rte_flow_error_set(error, EINVAL,
2011                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2012                                           item->spec,
2013                                           "data cannot be empty");
2014         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2015                 if (!mlx5_flow_ext_mreg_supported(dev))
2016                         return rte_flow_error_set(error, ENOTSUP,
2017                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2018                                           "extended metadata register"
2019                                           " isn't supported");
2020                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2021                 if (reg < 0)
2022                         return reg;
2023                 if (reg == REG_NON)
2024                         return rte_flow_error_set(error, ENOTSUP,
2025                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2026                                         "unavailable extended metadata register");
2027                 if (reg == REG_B)
2028                         return rte_flow_error_set(error, ENOTSUP,
2029                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2030                                           "match on reg_b "
2031                                           "isn't supported");
2032                 if (reg != REG_A)
2033                         nic_mask.data = priv->sh->dv_meta_mask;
2034         } else {
2035                 if (attr->transfer)
2036                         return rte_flow_error_set(error, ENOTSUP,
2037                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2038                                         "extended metadata feature "
2039                                         "should be enabled when "
2040                                         "meta item is requested "
2041                                         "with e-switch mode ");
2042                 if (attr->ingress)
2043                         return rte_flow_error_set(error, ENOTSUP,
2044                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2045                                         "match on metadata for ingress "
2046                                         "is not supported in legacy "
2047                                         "metadata mode");
2048         }
2049         if (!mask)
2050                 mask = &rte_flow_item_meta_mask;
2051         if (!mask->data)
2052                 return rte_flow_error_set(error, EINVAL,
2053                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2054                                         "mask cannot be zero");
2055
2056         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2057                                         (const uint8_t *)&nic_mask,
2058                                         sizeof(struct rte_flow_item_meta),
2059                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2060         return ret;
2061 }
2062
2063 /**
2064  * Validate TAG item.
2065  *
2066  * @param[in] dev
2067  *   Pointer to the rte_eth_dev structure.
2068  * @param[in] item
2069  *   Item specification.
2070  * @param[in] attr
2071  *   Attributes of flow that includes this item.
2072  * @param[out] error
2073  *   Pointer to error structure.
2074  *
2075  * @return
2076  *   0 on success, a negative errno value otherwise and rte_errno is set.
2077  */
2078 static int
2079 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2080                           const struct rte_flow_item *item,
2081                           const struct rte_flow_attr *attr __rte_unused,
2082                           struct rte_flow_error *error)
2083 {
2084         const struct rte_flow_item_tag *spec = item->spec;
2085         const struct rte_flow_item_tag *mask = item->mask;
2086         const struct rte_flow_item_tag nic_mask = {
2087                 .data = RTE_BE32(UINT32_MAX),
2088                 .index = 0xff,
2089         };
2090         int ret;
2091
2092         if (!mlx5_flow_ext_mreg_supported(dev))
2093                 return rte_flow_error_set(error, ENOTSUP,
2094                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2095                                           "extensive metadata register"
2096                                           " isn't supported");
2097         if (!spec)
2098                 return rte_flow_error_set(error, EINVAL,
2099                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2100                                           item->spec,
2101                                           "data cannot be empty");
2102         if (!mask)
2103                 mask = &rte_flow_item_tag_mask;
2104         if (!mask->data)
2105                 return rte_flow_error_set(error, EINVAL,
2106                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2107                                         "mask cannot be zero");
2108
2109         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2110                                         (const uint8_t *)&nic_mask,
2111                                         sizeof(struct rte_flow_item_tag),
2112                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2113         if (ret < 0)
2114                 return ret;
2115         if (mask->index != 0xff)
2116                 return rte_flow_error_set(error, EINVAL,
2117                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2118                                           "partial mask for tag index"
2119                                           " is not supported");
2120         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2121         if (ret < 0)
2122                 return ret;
2123         MLX5_ASSERT(ret != REG_NON);
2124         return 0;
2125 }
2126
2127 /**
2128  * Validate vport item.
2129  *
2130  * @param[in] dev
2131  *   Pointer to the rte_eth_dev structure.
2132  * @param[in] item
2133  *   Item specification.
2134  * @param[in] attr
2135  *   Attributes of flow that includes this item.
2136  * @param[in] item_flags
2137  *   Bit-fields that holds the items detected until now.
2138  * @param[out] error
2139  *   Pointer to error structure.
2140  *
2141  * @return
2142  *   0 on success, a negative errno value otherwise and rte_errno is set.
2143  */
2144 static int
2145 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2146                               const struct rte_flow_item *item,
2147                               const struct rte_flow_attr *attr,
2148                               uint64_t item_flags,
2149                               struct rte_flow_error *error)
2150 {
2151         const struct rte_flow_item_port_id *spec = item->spec;
2152         const struct rte_flow_item_port_id *mask = item->mask;
2153         const struct rte_flow_item_port_id switch_mask = {
2154                         .id = 0xffffffff,
2155         };
2156         struct mlx5_priv *esw_priv;
2157         struct mlx5_priv *dev_priv;
2158         int ret;
2159
2160         if (!attr->transfer)
2161                 return rte_flow_error_set(error, EINVAL,
2162                                           RTE_FLOW_ERROR_TYPE_ITEM,
2163                                           NULL,
2164                                           "match on port id is valid only"
2165                                           " when transfer flag is enabled");
2166         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2167                 return rte_flow_error_set(error, ENOTSUP,
2168                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2169                                           "multiple source ports are not"
2170                                           " supported");
2171         if (!mask)
2172                 mask = &switch_mask;
2173         if (mask->id != 0xffffffff)
2174                 return rte_flow_error_set(error, ENOTSUP,
2175                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2176                                            mask,
2177                                            "no support for partial mask on"
2178                                            " \"id\" field");
2179         ret = mlx5_flow_item_acceptable
2180                                 (item, (const uint8_t *)mask,
2181                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2182                                  sizeof(struct rte_flow_item_port_id),
2183                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2184         if (ret)
2185                 return ret;
2186         if (!spec)
2187                 return 0;
2188         if (spec->id == MLX5_PORT_ESW_MGR)
2189                 return 0;
2190         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2191         if (!esw_priv)
2192                 return rte_flow_error_set(error, rte_errno,
2193                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2194                                           "failed to obtain E-Switch info for"
2195                                           " port");
2196         dev_priv = mlx5_dev_to_eswitch_info(dev);
2197         if (!dev_priv)
2198                 return rte_flow_error_set(error, rte_errno,
2199                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2200                                           NULL,
2201                                           "failed to obtain E-Switch info");
2202         if (esw_priv->domain_id != dev_priv->domain_id)
2203                 return rte_flow_error_set(error, EINVAL,
2204                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2205                                           "cannot match on a port from a"
2206                                           " different E-Switch");
2207         return 0;
2208 }
2209
2210 /**
2211  * Validate VLAN item.
2212  *
2213  * @param[in] item
2214  *   Item specification.
2215  * @param[in] item_flags
2216  *   Bit-fields that holds the items detected until now.
2217  * @param[in] dev
2218  *   Ethernet device flow is being created on.
2219  * @param[out] error
2220  *   Pointer to error structure.
2221  *
2222  * @return
2223  *   0 on success, a negative errno value otherwise and rte_errno is set.
2224  */
2225 static int
2226 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2227                            uint64_t item_flags,
2228                            struct rte_eth_dev *dev,
2229                            struct rte_flow_error *error)
2230 {
2231         const struct rte_flow_item_vlan *mask = item->mask;
2232         const struct rte_flow_item_vlan nic_mask = {
2233                 .tci = RTE_BE16(UINT16_MAX),
2234                 .inner_type = RTE_BE16(UINT16_MAX),
2235                 .has_more_vlan = 1,
2236         };
2237         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2238         int ret;
2239         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2240                                         MLX5_FLOW_LAYER_INNER_L4) :
2241                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2242                                         MLX5_FLOW_LAYER_OUTER_L4);
2243         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2244                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2245
2246         if (item_flags & vlanm)
2247                 return rte_flow_error_set(error, EINVAL,
2248                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2249                                           "multiple VLAN layers not supported");
2250         else if ((item_flags & l34m) != 0)
2251                 return rte_flow_error_set(error, EINVAL,
2252                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2253                                           "VLAN cannot follow L3/L4 layer");
2254         if (!mask)
2255                 mask = &rte_flow_item_vlan_mask;
2256         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2257                                         (const uint8_t *)&nic_mask,
2258                                         sizeof(struct rte_flow_item_vlan),
2259                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2260         if (ret)
2261                 return ret;
2262         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2263                 struct mlx5_priv *priv = dev->data->dev_private;
2264
2265                 if (priv->vmwa_context) {
2266                         /*
2267                          * Non-NULL context means we have a virtual machine
2268                          * and SR-IOV enabled, we have to create VLAN interface
2269                          * to make hypervisor to setup E-Switch vport
2270                          * context correctly. We avoid creating the multiple
2271                          * VLAN interfaces, so we cannot support VLAN tag mask.
2272                          */
2273                         return rte_flow_error_set(error, EINVAL,
2274                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2275                                                   item,
2276                                                   "VLAN tag mask is not"
2277                                                   " supported in virtual"
2278                                                   " environment");
2279                 }
2280         }
2281         return 0;
2282 }
2283
2284 /*
2285  * GTP flags are contained in 1 byte of the format:
2286  * -------------------------------------------
2287  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2288  * |-----------------------------------------|
2289  * | value | Version | PT | Res | E | S | PN |
2290  * -------------------------------------------
2291  *
2292  * Matching is supported only for GTP flags E, S, PN.
2293  */
2294 #define MLX5_GTP_FLAGS_MASK     0x07
2295
2296 /**
2297  * Validate GTP item.
2298  *
2299  * @param[in] dev
2300  *   Pointer to the rte_eth_dev structure.
2301  * @param[in] item
2302  *   Item specification.
2303  * @param[in] item_flags
2304  *   Bit-fields that holds the items detected until now.
2305  * @param[out] error
2306  *   Pointer to error structure.
2307  *
2308  * @return
2309  *   0 on success, a negative errno value otherwise and rte_errno is set.
2310  */
2311 static int
2312 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2313                           const struct rte_flow_item *item,
2314                           uint64_t item_flags,
2315                           struct rte_flow_error *error)
2316 {
2317         struct mlx5_priv *priv = dev->data->dev_private;
2318         const struct rte_flow_item_gtp *spec = item->spec;
2319         const struct rte_flow_item_gtp *mask = item->mask;
2320         const struct rte_flow_item_gtp nic_mask = {
2321                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2322                 .msg_type = 0xff,
2323                 .teid = RTE_BE32(0xffffffff),
2324         };
2325
2326         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2327                 return rte_flow_error_set(error, ENOTSUP,
2328                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2329                                           "GTP support is not enabled");
2330         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2331                 return rte_flow_error_set(error, ENOTSUP,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "multiple tunnel layers not"
2334                                           " supported");
2335         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2336                 return rte_flow_error_set(error, EINVAL,
2337                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2338                                           "no outer UDP layer found");
2339         if (!mask)
2340                 mask = &rte_flow_item_gtp_mask;
2341         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2342                 return rte_flow_error_set(error, ENOTSUP,
2343                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2344                                           "Match is supported for GTP"
2345                                           " flags only");
2346         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2347                                          (const uint8_t *)&nic_mask,
2348                                          sizeof(struct rte_flow_item_gtp),
2349                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2350 }
2351
2352 /**
2353  * Validate GTP PSC item.
2354  *
2355  * @param[in] item
2356  *   Item specification.
2357  * @param[in] last_item
2358  *   Previous validated item in the pattern items.
2359  * @param[in] gtp_item
2360  *   Previous GTP item specification.
2361  * @param[in] attr
2362  *   Pointer to flow attributes.
2363  * @param[out] error
2364  *   Pointer to error structure.
2365  *
2366  * @return
2367  *   0 on success, a negative errno value otherwise and rte_errno is set.
2368  */
2369 static int
2370 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2371                               uint64_t last_item,
2372                               const struct rte_flow_item *gtp_item,
2373                               const struct rte_flow_attr *attr,
2374                               struct rte_flow_error *error)
2375 {
2376         const struct rte_flow_item_gtp *gtp_spec;
2377         const struct rte_flow_item_gtp *gtp_mask;
2378         const struct rte_flow_item_gtp_psc *mask;
2379         const struct rte_flow_item_gtp_psc nic_mask = {
2380                 .hdr.type = 0xF,
2381                 .hdr.qfi = 0x3F,
2382         };
2383
2384         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2385                 return rte_flow_error_set
2386                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2387                          "GTP PSC item must be preceded with GTP item");
2388         gtp_spec = gtp_item->spec;
2389         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2390         /* GTP spec and E flag is requested to match zero. */
2391         if (gtp_spec &&
2392                 (gtp_mask->v_pt_rsv_flags &
2393                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2394                 return rte_flow_error_set
2395                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2396                          "GTP E flag must be 1 to match GTP PSC");
2397         /* Check the flow is not created in group zero. */
2398         if (!attr->transfer && !attr->group)
2399                 return rte_flow_error_set
2400                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2401                          "GTP PSC is not supported for group 0");
2402         /* GTP spec is here and E flag is requested to match zero. */
2403         if (!item->spec)
2404                 return 0;
2405         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2406         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2407                                          (const uint8_t *)&nic_mask,
2408                                          sizeof(struct rte_flow_item_gtp_psc),
2409                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2410 }
2411
2412 /**
2413  * Validate IPV4 item.
2414  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2415  * add specific validation of fragment_offset field,
2416  *
2417  * @param[in] item
2418  *   Item specification.
2419  * @param[in] item_flags
2420  *   Bit-fields that holds the items detected until now.
2421  * @param[out] error
2422  *   Pointer to error structure.
2423  *
2424  * @return
2425  *   0 on success, a negative errno value otherwise and rte_errno is set.
2426  */
2427 static int
2428 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2429                            const struct rte_flow_item *item,
2430                            uint64_t item_flags, uint64_t last_item,
2431                            uint16_t ether_type, struct rte_flow_error *error)
2432 {
2433         int ret;
2434         struct mlx5_priv *priv = dev->data->dev_private;
2435         const struct rte_flow_item_ipv4 *spec = item->spec;
2436         const struct rte_flow_item_ipv4 *last = item->last;
2437         const struct rte_flow_item_ipv4 *mask = item->mask;
2438         rte_be16_t fragment_offset_spec = 0;
2439         rte_be16_t fragment_offset_last = 0;
2440         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2441                 .hdr = {
2442                         .src_addr = RTE_BE32(0xffffffff),
2443                         .dst_addr = RTE_BE32(0xffffffff),
2444                         .type_of_service = 0xff,
2445                         .fragment_offset = RTE_BE16(0xffff),
2446                         .next_proto_id = 0xff,
2447                         .time_to_live = 0xff,
2448                 },
2449         };
2450
2451         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2452                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2453                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2454                                priv->config.hca_attr.inner_ipv4_ihl;
2455                 if (!ihl_cap)
2456                         return rte_flow_error_set(error, ENOTSUP,
2457                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2458                                                   item,
2459                                                   "IPV4 ihl offload not supported");
2460                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2461         }
2462         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2463                                            ether_type, &nic_ipv4_mask,
2464                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2465         if (ret < 0)
2466                 return ret;
2467         if (spec && mask)
2468                 fragment_offset_spec = spec->hdr.fragment_offset &
2469                                        mask->hdr.fragment_offset;
2470         if (!fragment_offset_spec)
2471                 return 0;
2472         /*
2473          * spec and mask are valid, enforce using full mask to make sure the
2474          * complete value is used correctly.
2475          */
2476         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2477                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2478                 return rte_flow_error_set(error, EINVAL,
2479                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2480                                           item, "must use full mask for"
2481                                           " fragment_offset");
2482         /*
2483          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2484          * indicating this is 1st fragment of fragmented packet.
2485          * This is not yet supported in MLX5, return appropriate error message.
2486          */
2487         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2488                 return rte_flow_error_set(error, ENOTSUP,
2489                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2490                                           "match on first fragment not "
2491                                           "supported");
2492         if (fragment_offset_spec && !last)
2493                 return rte_flow_error_set(error, ENOTSUP,
2494                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2495                                           "specified value not supported");
2496         /* spec and last are valid, validate the specified range. */
2497         fragment_offset_last = last->hdr.fragment_offset &
2498                                mask->hdr.fragment_offset;
2499         /*
2500          * Match on fragment_offset spec 0x2001 and last 0x3fff
2501          * means MF is 1 and frag-offset is > 0.
2502          * This packet is fragment 2nd and onward, excluding last.
2503          * This is not yet supported in MLX5, return appropriate
2504          * error message.
2505          */
2506         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2507             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2508                 return rte_flow_error_set(error, ENOTSUP,
2509                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2510                                           last, "match on following "
2511                                           "fragments not supported");
2512         /*
2513          * Match on fragment_offset spec 0x0001 and last 0x1fff
2514          * means MF is 0 and frag-offset is > 0.
2515          * This packet is last fragment of fragmented packet.
2516          * This is not yet supported in MLX5, return appropriate
2517          * error message.
2518          */
2519         if (fragment_offset_spec == RTE_BE16(1) &&
2520             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2521                 return rte_flow_error_set(error, ENOTSUP,
2522                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2523                                           last, "match on last "
2524                                           "fragment not supported");
2525         /*
2526          * Match on fragment_offset spec 0x0001 and last 0x3fff
2527          * means MF and/or frag-offset is not 0.
2528          * This is a fragmented packet.
2529          * Other range values are invalid and rejected.
2530          */
2531         if (!(fragment_offset_spec == RTE_BE16(1) &&
2532               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2533                 return rte_flow_error_set(error, ENOTSUP,
2534                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2535                                           "specified range not supported");
2536         return 0;
2537 }
2538
2539 /**
2540  * Validate IPV6 fragment extension item.
2541  *
2542  * @param[in] item
2543  *   Item specification.
2544  * @param[in] item_flags
2545  *   Bit-fields that holds the items detected until now.
2546  * @param[out] error
2547  *   Pointer to error structure.
2548  *
2549  * @return
2550  *   0 on success, a negative errno value otherwise and rte_errno is set.
2551  */
2552 static int
2553 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2554                                     uint64_t item_flags,
2555                                     struct rte_flow_error *error)
2556 {
2557         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2558         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2559         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2560         rte_be16_t frag_data_spec = 0;
2561         rte_be16_t frag_data_last = 0;
2562         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2563         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2564                                       MLX5_FLOW_LAYER_OUTER_L4;
2565         int ret = 0;
2566         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2567                 .hdr = {
2568                         .next_header = 0xff,
2569                         .frag_data = RTE_BE16(0xffff),
2570                 },
2571         };
2572
2573         if (item_flags & l4m)
2574                 return rte_flow_error_set(error, EINVAL,
2575                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2576                                           "ipv6 fragment extension item cannot "
2577                                           "follow L4 item.");
2578         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2579             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2580                 return rte_flow_error_set(error, EINVAL,
2581                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2582                                           "ipv6 fragment extension item must "
2583                                           "follow ipv6 item");
2584         if (spec && mask)
2585                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2586         if (!frag_data_spec)
2587                 return 0;
2588         /*
2589          * spec and mask are valid, enforce using full mask to make sure the
2590          * complete value is used correctly.
2591          */
2592         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2593                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2594                 return rte_flow_error_set(error, EINVAL,
2595                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2596                                           item, "must use full mask for"
2597                                           " frag_data");
2598         /*
2599          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2600          * This is 1st fragment of fragmented packet.
2601          */
2602         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2603                 return rte_flow_error_set(error, ENOTSUP,
2604                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2605                                           "match on first fragment not "
2606                                           "supported");
2607         if (frag_data_spec && !last)
2608                 return rte_flow_error_set(error, EINVAL,
2609                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2610                                           "specified value not supported");
2611         ret = mlx5_flow_item_acceptable
2612                                 (item, (const uint8_t *)mask,
2613                                  (const uint8_t *)&nic_mask,
2614                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2615                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2616         if (ret)
2617                 return ret;
2618         /* spec and last are valid, validate the specified range. */
2619         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2620         /*
2621          * Match on frag_data spec 0x0009 and last 0xfff9
2622          * means M is 1 and frag-offset is > 0.
2623          * This packet is fragment 2nd and onward, excluding last.
2624          * This is not yet supported in MLX5, return appropriate
2625          * error message.
2626          */
2627         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2628                                        RTE_IPV6_EHDR_MF_MASK) &&
2629             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2630                 return rte_flow_error_set(error, ENOTSUP,
2631                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2632                                           last, "match on following "
2633                                           "fragments not supported");
2634         /*
2635          * Match on frag_data spec 0x0008 and last 0xfff8
2636          * means M is 0 and frag-offset is > 0.
2637          * This packet is last fragment of fragmented packet.
2638          * This is not yet supported in MLX5, return appropriate
2639          * error message.
2640          */
2641         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2642             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2643                 return rte_flow_error_set(error, ENOTSUP,
2644                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2645                                           last, "match on last "
2646                                           "fragment not supported");
2647         /* Other range values are invalid and rejected. */
2648         return rte_flow_error_set(error, EINVAL,
2649                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2650                                   "specified range not supported");
2651 }
2652
2653 /*
2654  * Validate ASO CT item.
2655  *
2656  * @param[in] dev
2657  *   Pointer to the rte_eth_dev structure.
2658  * @param[in] item
2659  *   Item specification.
2660  * @param[in] item_flags
2661  *   Pointer to bit-fields that holds the items detected until now.
2662  * @param[out] error
2663  *   Pointer to error structure.
2664  *
2665  * @return
2666  *   0 on success, a negative errno value otherwise and rte_errno is set.
2667  */
2668 static int
2669 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2670                              const struct rte_flow_item *item,
2671                              uint64_t *item_flags,
2672                              struct rte_flow_error *error)
2673 {
2674         const struct rte_flow_item_conntrack *spec = item->spec;
2675         const struct rte_flow_item_conntrack *mask = item->mask;
2676         RTE_SET_USED(dev);
2677         uint32_t flags;
2678
2679         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2680                 return rte_flow_error_set(error, EINVAL,
2681                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2682                                           "Only one CT is supported");
2683         if (!mask)
2684                 mask = &rte_flow_item_conntrack_mask;
2685         flags = spec->flags & mask->flags;
2686         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2687             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2688              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2689              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2690                 return rte_flow_error_set(error, EINVAL,
2691                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2692                                           "Conflict status bits");
2693         /* State change also needs to be considered. */
2694         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2695         return 0;
2696 }
2697
2698 /**
2699  * Validate the pop VLAN action.
2700  *
2701  * @param[in] dev
2702  *   Pointer to the rte_eth_dev structure.
2703  * @param[in] action_flags
2704  *   Holds the actions detected until now.
2705  * @param[in] action
2706  *   Pointer to the pop vlan action.
2707  * @param[in] item_flags
2708  *   The items found in this flow rule.
2709  * @param[in] attr
2710  *   Pointer to flow attributes.
2711  * @param[out] error
2712  *   Pointer to error structure.
2713  *
2714  * @return
2715  *   0 on success, a negative errno value otherwise and rte_errno is set.
2716  */
2717 static int
2718 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2719                                  uint64_t action_flags,
2720                                  const struct rte_flow_action *action,
2721                                  uint64_t item_flags,
2722                                  const struct rte_flow_attr *attr,
2723                                  struct rte_flow_error *error)
2724 {
2725         const struct mlx5_priv *priv = dev->data->dev_private;
2726         struct mlx5_dev_ctx_shared *sh = priv->sh;
2727         bool direction_error = false;
2728
2729         if (!priv->sh->pop_vlan_action)
2730                 return rte_flow_error_set(error, ENOTSUP,
2731                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2732                                           NULL,
2733                                           "pop vlan action is not supported");
2734         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2735         if (attr->transfer) {
2736                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2737                 bool is_cx5 = sh->steering_format_version ==
2738                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2739
2740                 if (fdb_tx && is_cx5)
2741                         direction_error = true;
2742         } else if (attr->egress) {
2743                 direction_error = true;
2744         }
2745         if (direction_error)
2746                 return rte_flow_error_set(error, ENOTSUP,
2747                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2748                                           NULL,
2749                                           "pop vlan action not supported for egress");
2750         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2753                                           "no support for multiple VLAN "
2754                                           "actions");
2755         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2756         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2757             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2758                 return rte_flow_error_set(error, ENOTSUP,
2759                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2760                                           NULL,
2761                                           "cannot pop vlan after decap without "
2762                                           "match on inner vlan in the flow");
2763         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2764         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2765             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2766                 return rte_flow_error_set(error, ENOTSUP,
2767                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2768                                           NULL,
2769                                           "cannot pop vlan without a "
2770                                           "match on (outer) vlan in the flow");
2771         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2772                 return rte_flow_error_set(error, EINVAL,
2773                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2774                                           "wrong action order, port_id should "
2775                                           "be after pop VLAN action");
2776         if (!attr->transfer && priv->representor)
2777                 return rte_flow_error_set(error, ENOTSUP,
2778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2779                                           "pop vlan action for VF representor "
2780                                           "not supported on NIC table");
2781         return 0;
2782 }
2783
2784 /**
2785  * Get VLAN default info from vlan match info.
2786  *
2787  * @param[in] items
2788  *   the list of item specifications.
2789  * @param[out] vlan
2790  *   pointer VLAN info to fill to.
2791  *
2792  * @return
2793  *   0 on success, a negative errno value otherwise and rte_errno is set.
2794  */
2795 static void
2796 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2797                                   struct rte_vlan_hdr *vlan)
2798 {
2799         const struct rte_flow_item_vlan nic_mask = {
2800                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2801                                 MLX5DV_FLOW_VLAN_VID_MASK),
2802                 .inner_type = RTE_BE16(0xffff),
2803         };
2804
2805         if (items == NULL)
2806                 return;
2807         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2808                 int type = items->type;
2809
2810                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2811                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2812                         break;
2813         }
2814         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2815                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2816                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2817
2818                 /* If VLAN item in pattern doesn't contain data, return here. */
2819                 if (!vlan_v)
2820                         return;
2821                 if (!vlan_m)
2822                         vlan_m = &nic_mask;
2823                 /* Only full match values are accepted */
2824                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2825                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2826                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2827                         vlan->vlan_tci |=
2828                                 rte_be_to_cpu_16(vlan_v->tci &
2829                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2830                 }
2831                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2832                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2833                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2834                         vlan->vlan_tci |=
2835                                 rte_be_to_cpu_16(vlan_v->tci &
2836                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2837                 }
2838                 if (vlan_m->inner_type == nic_mask.inner_type)
2839                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2840                                                            vlan_m->inner_type);
2841         }
2842 }
2843
2844 /**
2845  * Validate the push VLAN action.
2846  *
2847  * @param[in] dev
2848  *   Pointer to the rte_eth_dev structure.
2849  * @param[in] action_flags
2850  *   Holds the actions detected until now.
2851  * @param[in] item_flags
2852  *   The items found in this flow rule.
2853  * @param[in] action
2854  *   Pointer to the action structure.
2855  * @param[in] attr
2856  *   Pointer to flow attributes
2857  * @param[out] error
2858  *   Pointer to error structure.
2859  *
2860  * @return
2861  *   0 on success, a negative errno value otherwise and rte_errno is set.
2862  */
2863 static int
2864 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2865                                   uint64_t action_flags,
2866                                   const struct rte_flow_item_vlan *vlan_m,
2867                                   const struct rte_flow_action *action,
2868                                   const struct rte_flow_attr *attr,
2869                                   struct rte_flow_error *error)
2870 {
2871         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2872         const struct mlx5_priv *priv = dev->data->dev_private;
2873         struct mlx5_dev_ctx_shared *sh = priv->sh;
2874         bool direction_error = false;
2875
2876         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2877             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2878                 return rte_flow_error_set(error, EINVAL,
2879                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2880                                           "invalid vlan ethertype");
2881         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2882                 return rte_flow_error_set(error, EINVAL,
2883                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2884                                           "wrong action order, port_id should "
2885                                           "be after push VLAN");
2886         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2887         if (attr->transfer) {
2888                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2889                 bool is_cx5 = sh->steering_format_version ==
2890                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2891
2892                 if (!fdb_tx && is_cx5)
2893                         direction_error = true;
2894         } else if (attr->ingress) {
2895                 direction_error = true;
2896         }
2897         if (direction_error)
2898                 return rte_flow_error_set(error, ENOTSUP,
2899                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2900                                           NULL,
2901                                           "push vlan action not supported for ingress");
2902         if (!attr->transfer && priv->representor)
2903                 return rte_flow_error_set(error, ENOTSUP,
2904                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2905                                           "push vlan action for VF representor "
2906                                           "not supported on NIC table");
2907         if (vlan_m &&
2908             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2909             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2910                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2911             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2912             !(mlx5_flow_find_action
2913                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2914                 return rte_flow_error_set(error, EINVAL,
2915                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2916                                           "not full match mask on VLAN PCP and "
2917                                           "there is no of_set_vlan_pcp action, "
2918                                           "push VLAN action cannot figure out "
2919                                           "PCP value");
2920         if (vlan_m &&
2921             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2922             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2923                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2924             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2925             !(mlx5_flow_find_action
2926                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2927                 return rte_flow_error_set(error, EINVAL,
2928                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2929                                           "not full match mask on VLAN VID and "
2930                                           "there is no of_set_vlan_vid action, "
2931                                           "push VLAN action cannot figure out "
2932                                           "VID value");
2933         (void)attr;
2934         return 0;
2935 }
2936
2937 /**
2938  * Validate the set VLAN PCP.
2939  *
2940  * @param[in] action_flags
2941  *   Holds the actions detected until now.
2942  * @param[in] actions
2943  *   Pointer to the list of actions remaining in the flow rule.
2944  * @param[out] error
2945  *   Pointer to error structure.
2946  *
2947  * @return
2948  *   0 on success, a negative errno value otherwise and rte_errno is set.
2949  */
2950 static int
2951 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2952                                      const struct rte_flow_action actions[],
2953                                      struct rte_flow_error *error)
2954 {
2955         const struct rte_flow_action *action = actions;
2956         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2957
2958         if (conf->vlan_pcp > 7)
2959                 return rte_flow_error_set(error, EINVAL,
2960                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2961                                           "VLAN PCP value is too big");
2962         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2963                 return rte_flow_error_set(error, ENOTSUP,
2964                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2965                                           "set VLAN PCP action must follow "
2966                                           "the push VLAN action");
2967         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2968                 return rte_flow_error_set(error, ENOTSUP,
2969                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2970                                           "Multiple VLAN PCP modification are "
2971                                           "not supported");
2972         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2973                 return rte_flow_error_set(error, EINVAL,
2974                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2975                                           "wrong action order, port_id should "
2976                                           "be after set VLAN PCP");
2977         return 0;
2978 }
2979
2980 /**
2981  * Validate the set VLAN VID.
2982  *
2983  * @param[in] item_flags
2984  *   Holds the items detected in this rule.
2985  * @param[in] action_flags
2986  *   Holds the actions detected until now.
2987  * @param[in] actions
2988  *   Pointer to the list of actions remaining in the flow rule.
2989  * @param[out] error
2990  *   Pointer to error structure.
2991  *
2992  * @return
2993  *   0 on success, a negative errno value otherwise and rte_errno is set.
2994  */
2995 static int
2996 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2997                                      uint64_t action_flags,
2998                                      const struct rte_flow_action actions[],
2999                                      struct rte_flow_error *error)
3000 {
3001         const struct rte_flow_action *action = actions;
3002         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3003
3004         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3005                 return rte_flow_error_set(error, EINVAL,
3006                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3007                                           "VLAN VID value is too big");
3008         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3009             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3010                 return rte_flow_error_set(error, ENOTSUP,
3011                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3012                                           "set VLAN VID action must follow push"
3013                                           " VLAN action or match on VLAN item");
3014         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3015                 return rte_flow_error_set(error, ENOTSUP,
3016                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3017                                           "Multiple VLAN VID modifications are "
3018                                           "not supported");
3019         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3020                 return rte_flow_error_set(error, EINVAL,
3021                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3022                                           "wrong action order, port_id should "
3023                                           "be after set VLAN VID");
3024         return 0;
3025 }
3026
3027 /*
3028  * Validate the FLAG action.
3029  *
3030  * @param[in] dev
3031  *   Pointer to the rte_eth_dev structure.
3032  * @param[in] action_flags
3033  *   Holds the actions detected until now.
3034  * @param[in] attr
3035  *   Pointer to flow attributes
3036  * @param[out] error
3037  *   Pointer to error structure.
3038  *
3039  * @return
3040  *   0 on success, a negative errno value otherwise and rte_errno is set.
3041  */
3042 static int
3043 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3044                              uint64_t action_flags,
3045                              const struct rte_flow_attr *attr,
3046                              struct rte_flow_error *error)
3047 {
3048         struct mlx5_priv *priv = dev->data->dev_private;
3049         struct mlx5_dev_config *config = &priv->config;
3050         int ret;
3051
3052         /* Fall back if no extended metadata register support. */
3053         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3054                 return mlx5_flow_validate_action_flag(action_flags, attr,
3055                                                       error);
3056         /* Extensive metadata mode requires registers. */
3057         if (!mlx5_flow_ext_mreg_supported(dev))
3058                 return rte_flow_error_set(error, ENOTSUP,
3059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3060                                           "no metadata registers "
3061                                           "to support flag action");
3062         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3063                 return rte_flow_error_set(error, ENOTSUP,
3064                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3065                                           "extended metadata register"
3066                                           " isn't available");
3067         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3068         if (ret < 0)
3069                 return ret;
3070         MLX5_ASSERT(ret > 0);
3071         if (action_flags & MLX5_FLOW_ACTION_MARK)
3072                 return rte_flow_error_set(error, EINVAL,
3073                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3074                                           "can't mark and flag in same flow");
3075         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3076                 return rte_flow_error_set(error, EINVAL,
3077                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3078                                           "can't have 2 flag"
3079                                           " actions in same flow");
3080         return 0;
3081 }
3082
3083 /**
3084  * Validate MARK action.
3085  *
3086  * @param[in] dev
3087  *   Pointer to the rte_eth_dev structure.
3088  * @param[in] action
3089  *   Pointer to action.
3090  * @param[in] action_flags
3091  *   Holds the actions detected until now.
3092  * @param[in] attr
3093  *   Pointer to flow attributes
3094  * @param[out] error
3095  *   Pointer to error structure.
3096  *
3097  * @return
3098  *   0 on success, a negative errno value otherwise and rte_errno is set.
3099  */
3100 static int
3101 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3102                              const struct rte_flow_action *action,
3103                              uint64_t action_flags,
3104                              const struct rte_flow_attr *attr,
3105                              struct rte_flow_error *error)
3106 {
3107         struct mlx5_priv *priv = dev->data->dev_private;
3108         struct mlx5_dev_config *config = &priv->config;
3109         const struct rte_flow_action_mark *mark = action->conf;
3110         int ret;
3111
3112         if (is_tunnel_offload_active(dev))
3113                 return rte_flow_error_set(error, ENOTSUP,
3114                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3115                                           "no mark action "
3116                                           "if tunnel offload active");
3117         /* Fall back if no extended metadata register support. */
3118         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3119                 return mlx5_flow_validate_action_mark(action, action_flags,
3120                                                       attr, error);
3121         /* Extensive metadata mode requires registers. */
3122         if (!mlx5_flow_ext_mreg_supported(dev))
3123                 return rte_flow_error_set(error, ENOTSUP,
3124                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3125                                           "no metadata registers "
3126                                           "to support mark action");
3127         if (!priv->sh->dv_mark_mask)
3128                 return rte_flow_error_set(error, ENOTSUP,
3129                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3130                                           "extended metadata register"
3131                                           " isn't available");
3132         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3133         if (ret < 0)
3134                 return ret;
3135         MLX5_ASSERT(ret > 0);
3136         if (!mark)
3137                 return rte_flow_error_set(error, EINVAL,
3138                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3139                                           "configuration cannot be null");
3140         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3141                 return rte_flow_error_set(error, EINVAL,
3142                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3143                                           &mark->id,
3144                                           "mark id exceeds the limit");
3145         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3146                 return rte_flow_error_set(error, EINVAL,
3147                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3148                                           "can't flag and mark in same flow");
3149         if (action_flags & MLX5_FLOW_ACTION_MARK)
3150                 return rte_flow_error_set(error, EINVAL,
3151                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3152                                           "can't have 2 mark actions in same"
3153                                           " flow");
3154         return 0;
3155 }
3156
3157 /**
3158  * Validate SET_META action.
3159  *
3160  * @param[in] dev
3161  *   Pointer to the rte_eth_dev structure.
3162  * @param[in] action
3163  *   Pointer to the action structure.
3164  * @param[in] action_flags
3165  *   Holds the actions detected until now.
3166  * @param[in] attr
3167  *   Pointer to flow attributes
3168  * @param[out] error
3169  *   Pointer to error structure.
3170  *
3171  * @return
3172  *   0 on success, a negative errno value otherwise and rte_errno is set.
3173  */
3174 static int
3175 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3176                                  const struct rte_flow_action *action,
3177                                  uint64_t action_flags __rte_unused,
3178                                  const struct rte_flow_attr *attr,
3179                                  struct rte_flow_error *error)
3180 {
3181         struct mlx5_priv *priv = dev->data->dev_private;
3182         struct mlx5_dev_config *config = &priv->config;
3183         const struct rte_flow_action_set_meta *conf;
3184         uint32_t nic_mask = UINT32_MAX;
3185         int reg;
3186
3187         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3188             !mlx5_flow_ext_mreg_supported(dev))
3189                 return rte_flow_error_set(error, ENOTSUP,
3190                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3191                                           "extended metadata register"
3192                                           " isn't supported");
3193         reg = flow_dv_get_metadata_reg(dev, attr, error);
3194         if (reg < 0)
3195                 return reg;
3196         if (reg == REG_NON)
3197                 return rte_flow_error_set(error, ENOTSUP,
3198                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3199                                           "unavailable extended metadata register");
3200         if (reg != REG_A && reg != REG_B) {
3201                 struct mlx5_priv *priv = dev->data->dev_private;
3202
3203                 nic_mask = priv->sh->dv_meta_mask;
3204         }
3205         if (!(action->conf))
3206                 return rte_flow_error_set(error, EINVAL,
3207                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3208                                           "configuration cannot be null");
3209         conf = (const struct rte_flow_action_set_meta *)action->conf;
3210         if (!conf->mask)
3211                 return rte_flow_error_set(error, EINVAL,
3212                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3213                                           "zero mask doesn't have any effect");
3214         if (conf->mask & ~nic_mask)
3215                 return rte_flow_error_set(error, EINVAL,
3216                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3217                                           "meta data must be within reg C0");
3218         return 0;
3219 }
3220
3221 /**
3222  * Validate SET_TAG action.
3223  *
3224  * @param[in] dev
3225  *   Pointer to the rte_eth_dev structure.
3226  * @param[in] action
3227  *   Pointer to the action structure.
3228  * @param[in] action_flags
3229  *   Holds the actions detected until now.
3230  * @param[in] attr
3231  *   Pointer to flow attributes
3232  * @param[out] error
3233  *   Pointer to error structure.
3234  *
3235  * @return
3236  *   0 on success, a negative errno value otherwise and rte_errno is set.
3237  */
3238 static int
3239 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3240                                 const struct rte_flow_action *action,
3241                                 uint64_t action_flags,
3242                                 const struct rte_flow_attr *attr,
3243                                 struct rte_flow_error *error)
3244 {
3245         const struct rte_flow_action_set_tag *conf;
3246         const uint64_t terminal_action_flags =
3247                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3248                 MLX5_FLOW_ACTION_RSS;
3249         int ret;
3250
3251         if (!mlx5_flow_ext_mreg_supported(dev))
3252                 return rte_flow_error_set(error, ENOTSUP,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "extensive metadata register"
3255                                           " isn't supported");
3256         if (!(action->conf))
3257                 return rte_flow_error_set(error, EINVAL,
3258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3259                                           "configuration cannot be null");
3260         conf = (const struct rte_flow_action_set_tag *)action->conf;
3261         if (!conf->mask)
3262                 return rte_flow_error_set(error, EINVAL,
3263                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3264                                           "zero mask doesn't have any effect");
3265         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3266         if (ret < 0)
3267                 return ret;
3268         if (!attr->transfer && attr->ingress &&
3269             (action_flags & terminal_action_flags))
3270                 return rte_flow_error_set(error, EINVAL,
3271                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3272                                           "set_tag has no effect"
3273                                           " with terminal actions");
3274         return 0;
3275 }
3276
3277 /**
3278  * Validate count action.
3279  *
3280  * @param[in] dev
3281  *   Pointer to rte_eth_dev structure.
3282  * @param[in] shared
3283  *   Indicator if action is shared.
3284  * @param[in] action_flags
3285  *   Holds the actions detected until now.
3286  * @param[out] error
3287  *   Pointer to error structure.
3288  *
3289  * @return
3290  *   0 on success, a negative errno value otherwise and rte_errno is set.
3291  */
3292 static int
3293 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3294                               uint64_t action_flags,
3295                               struct rte_flow_error *error)
3296 {
3297         struct mlx5_priv *priv = dev->data->dev_private;
3298
3299         if (!priv->sh->devx)
3300                 goto notsup_err;
3301         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3302                 return rte_flow_error_set(error, EINVAL,
3303                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3304                                           "duplicate count actions set");
3305         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3306             !priv->sh->flow_hit_aso_en)
3307                 return rte_flow_error_set(error, EINVAL,
3308                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3309                                           "old age and shared count combination is not supported");
3310 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3311         return 0;
3312 #endif
3313 notsup_err:
3314         return rte_flow_error_set
3315                       (error, ENOTSUP,
3316                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3317                        NULL,
3318                        "count action not supported");
3319 }
3320
3321 /**
3322  * Validate the L2 encap action.
3323  *
3324  * @param[in] dev
3325  *   Pointer to the rte_eth_dev structure.
3326  * @param[in] action_flags
3327  *   Holds the actions detected until now.
3328  * @param[in] action
3329  *   Pointer to the action structure.
3330  * @param[in] attr
3331  *   Pointer to flow attributes.
3332  * @param[out] error
3333  *   Pointer to error structure.
3334  *
3335  * @return
3336  *   0 on success, a negative errno value otherwise and rte_errno is set.
3337  */
3338 static int
3339 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3340                                  uint64_t action_flags,
3341                                  const struct rte_flow_action *action,
3342                                  const struct rte_flow_attr *attr,
3343                                  struct rte_flow_error *error)
3344 {
3345         const struct mlx5_priv *priv = dev->data->dev_private;
3346
3347         if (!(action->conf))
3348                 return rte_flow_error_set(error, EINVAL,
3349                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3350                                           "configuration cannot be null");
3351         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3352                 return rte_flow_error_set(error, EINVAL,
3353                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3354                                           "can only have a single encap action "
3355                                           "in a flow");
3356         if (!attr->transfer && priv->representor)
3357                 return rte_flow_error_set(error, ENOTSUP,
3358                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3359                                           "encap action for VF representor "
3360                                           "not supported on NIC table");
3361         return 0;
3362 }
3363
3364 /**
3365  * Validate a decap action.
3366  *
3367  * @param[in] dev
3368  *   Pointer to the rte_eth_dev structure.
3369  * @param[in] action_flags
3370  *   Holds the actions detected until now.
3371  * @param[in] action
3372  *   Pointer to the action structure.
3373  * @param[in] item_flags
3374  *   Holds the items detected.
3375  * @param[in] attr
3376  *   Pointer to flow attributes
3377  * @param[out] error
3378  *   Pointer to error structure.
3379  *
3380  * @return
3381  *   0 on success, a negative errno value otherwise and rte_errno is set.
3382  */
3383 static int
3384 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3385                               uint64_t action_flags,
3386                               const struct rte_flow_action *action,
3387                               const uint64_t item_flags,
3388                               const struct rte_flow_attr *attr,
3389                               struct rte_flow_error *error)
3390 {
3391         const struct mlx5_priv *priv = dev->data->dev_private;
3392
3393         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3394             !priv->config.decap_en)
3395                 return rte_flow_error_set(error, ENOTSUP,
3396                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3397                                           "decap is not enabled");
3398         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3401                                           action_flags &
3402                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3403                                           "have a single decap action" : "decap "
3404                                           "after encap is not supported");
3405         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3406                 return rte_flow_error_set(error, EINVAL,
3407                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3408                                           "can't have decap action after"
3409                                           " modify action");
3410         if (attr->egress)
3411                 return rte_flow_error_set(error, ENOTSUP,
3412                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3413                                           NULL,
3414                                           "decap action not supported for "
3415                                           "egress");
3416         if (!attr->transfer && priv->representor)
3417                 return rte_flow_error_set(error, ENOTSUP,
3418                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3419                                           "decap action for VF representor "
3420                                           "not supported on NIC table");
3421         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3422             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3423                 return rte_flow_error_set(error, ENOTSUP,
3424                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3425                                 "VXLAN item should be present for VXLAN decap");
3426         return 0;
3427 }
3428
3429 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3430
3431 /**
3432  * Validate the raw encap and decap actions.
3433  *
3434  * @param[in] dev
3435  *   Pointer to the rte_eth_dev structure.
3436  * @param[in] decap
3437  *   Pointer to the decap action.
3438  * @param[in] encap
3439  *   Pointer to the encap action.
3440  * @param[in] attr
3441  *   Pointer to flow attributes
3442  * @param[in/out] action_flags
3443  *   Holds the actions detected until now.
3444  * @param[out] actions_n
3445  *   pointer to the number of actions counter.
3446  * @param[in] action
3447  *   Pointer to the action structure.
3448  * @param[in] item_flags
3449  *   Holds the items detected.
3450  * @param[out] error
3451  *   Pointer to error structure.
3452  *
3453  * @return
3454  *   0 on success, a negative errno value otherwise and rte_errno is set.
3455  */
3456 static int
3457 flow_dv_validate_action_raw_encap_decap
3458         (struct rte_eth_dev *dev,
3459          const struct rte_flow_action_raw_decap *decap,
3460          const struct rte_flow_action_raw_encap *encap,
3461          const struct rte_flow_attr *attr, uint64_t *action_flags,
3462          int *actions_n, const struct rte_flow_action *action,
3463          uint64_t item_flags, struct rte_flow_error *error)
3464 {
3465         const struct mlx5_priv *priv = dev->data->dev_private;
3466         int ret;
3467
3468         if (encap && (!encap->size || !encap->data))
3469                 return rte_flow_error_set(error, EINVAL,
3470                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3471                                           "raw encap data cannot be empty");
3472         if (decap && encap) {
3473                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3474                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3475                         /* L3 encap. */
3476                         decap = NULL;
3477                 else if (encap->size <=
3478                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3479                            decap->size >
3480                            MLX5_ENCAPSULATION_DECISION_SIZE)
3481                         /* L3 decap. */
3482                         encap = NULL;
3483                 else if (encap->size >
3484                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3485                            decap->size >
3486                            MLX5_ENCAPSULATION_DECISION_SIZE)
3487                         /* 2 L2 actions: encap and decap. */
3488                         ;
3489                 else
3490                         return rte_flow_error_set(error,
3491                                 ENOTSUP,
3492                                 RTE_FLOW_ERROR_TYPE_ACTION,
3493                                 NULL, "unsupported too small "
3494                                 "raw decap and too small raw "
3495                                 "encap combination");
3496         }
3497         if (decap) {
3498                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3499                                                     item_flags, attr, error);
3500                 if (ret < 0)
3501                         return ret;
3502                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3503                 ++(*actions_n);
3504         }
3505         if (encap) {
3506                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3507                         return rte_flow_error_set(error, ENOTSUP,
3508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3509                                                   NULL,
3510                                                   "small raw encap size");
3511                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3512                         return rte_flow_error_set(error, EINVAL,
3513                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3514                                                   NULL,
3515                                                   "more than one encap action");
3516                 if (!attr->transfer && priv->representor)
3517                         return rte_flow_error_set
3518                                         (error, ENOTSUP,
3519                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3520                                          "encap action for VF representor "
3521                                          "not supported on NIC table");
3522                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3523                 ++(*actions_n);
3524         }
3525         return 0;
3526 }
3527
3528 /*
3529  * Validate the ASO CT action.
3530  *
3531  * @param[in] dev
3532  *   Pointer to the rte_eth_dev structure.
3533  * @param[in] action_flags
3534  *   Holds the actions detected until now.
3535  * @param[in] item_flags
3536  *   The items found in this flow rule.
3537  * @param[in] attr
3538  *   Pointer to flow attributes.
3539  * @param[out] error
3540  *   Pointer to error structure.
3541  *
3542  * @return
3543  *   0 on success, a negative errno value otherwise and rte_errno is set.
3544  */
3545 static int
3546 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3547                                uint64_t action_flags,
3548                                uint64_t item_flags,
3549                                const struct rte_flow_attr *attr,
3550                                struct rte_flow_error *error)
3551 {
3552         RTE_SET_USED(dev);
3553
3554         if (attr->group == 0 && !attr->transfer)
3555                 return rte_flow_error_set(error, ENOTSUP,
3556                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3557                                           NULL,
3558                                           "Only support non-root table");
3559         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3560                 return rte_flow_error_set(error, ENOTSUP,
3561                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3562                                           "CT cannot follow a fate action");
3563         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3564             (action_flags & MLX5_FLOW_ACTION_AGE))
3565                 return rte_flow_error_set(error, EINVAL,
3566                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3567                                           "Only one ASO action is supported");
3568         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3569                 return rte_flow_error_set(error, EINVAL,
3570                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3571                                           "Encap cannot exist before CT");
3572         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3573                 return rte_flow_error_set(error, EINVAL,
3574                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3575                                           "Not a outer TCP packet");
3576         return 0;
3577 }
3578
3579 int
3580 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3581                              struct mlx5_list_entry *entry, void *cb_ctx)
3582 {
3583         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3584         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3585         struct mlx5_flow_dv_encap_decap_resource *resource;
3586
3587         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3588                                 entry);
3589         if (resource->reformat_type == ctx_resource->reformat_type &&
3590             resource->ft_type == ctx_resource->ft_type &&
3591             resource->flags == ctx_resource->flags &&
3592             resource->size == ctx_resource->size &&
3593             !memcmp((const void *)resource->buf,
3594                     (const void *)ctx_resource->buf,
3595                     resource->size))
3596                 return 0;
3597         return -1;
3598 }
3599
3600 struct mlx5_list_entry *
3601 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3602 {
3603         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3604         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3605         struct mlx5dv_dr_domain *domain;
3606         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3607         struct mlx5_flow_dv_encap_decap_resource *resource;
3608         uint32_t idx;
3609         int ret;
3610
3611         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3612                 domain = sh->fdb_domain;
3613         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3614                 domain = sh->rx_domain;
3615         else
3616                 domain = sh->tx_domain;
3617         /* Register new encap/decap resource. */
3618         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3619         if (!resource) {
3620                 rte_flow_error_set(ctx->error, ENOMEM,
3621                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3622                                    "cannot allocate resource memory");
3623                 return NULL;
3624         }
3625         *resource = *ctx_resource;
3626         resource->idx = idx;
3627         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3628                                                               domain, resource,
3629                                                              &resource->action);
3630         if (ret) {
3631                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3632                 rte_flow_error_set(ctx->error, ENOMEM,
3633                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3634                                    NULL, "cannot create action");
3635                 return NULL;
3636         }
3637
3638         return &resource->entry;
3639 }
3640
3641 struct mlx5_list_entry *
3642 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3643                              void *cb_ctx)
3644 {
3645         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3646         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3647         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3648         uint32_t idx;
3649
3650         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3651                                            &idx);
3652         if (!cache_resource) {
3653                 rte_flow_error_set(ctx->error, ENOMEM,
3654                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3655                                    "cannot allocate resource memory");
3656                 return NULL;
3657         }
3658         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3659         cache_resource->idx = idx;
3660         return &cache_resource->entry;
3661 }
3662
3663 void
3664 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3665 {
3666         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3667         struct mlx5_flow_dv_encap_decap_resource *res =
3668                                        container_of(entry, typeof(*res), entry);
3669
3670         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3671 }
3672
3673 /**
3674  * Find existing encap/decap resource or create and register a new one.
3675  *
3676  * @param[in, out] dev
3677  *   Pointer to rte_eth_dev structure.
3678  * @param[in, out] resource
3679  *   Pointer to encap/decap resource.
3680  * @parm[in, out] dev_flow
3681  *   Pointer to the dev_flow.
3682  * @param[out] error
3683  *   pointer to error structure.
3684  *
3685  * @return
3686  *   0 on success otherwise -errno and errno is set.
3687  */
3688 static int
3689 flow_dv_encap_decap_resource_register
3690                         (struct rte_eth_dev *dev,
3691                          struct mlx5_flow_dv_encap_decap_resource *resource,
3692                          struct mlx5_flow *dev_flow,
3693                          struct rte_flow_error *error)
3694 {
3695         struct mlx5_priv *priv = dev->data->dev_private;
3696         struct mlx5_dev_ctx_shared *sh = priv->sh;
3697         struct mlx5_list_entry *entry;
3698         union {
3699                 struct {
3700                         uint32_t ft_type:8;
3701                         uint32_t refmt_type:8;
3702                         /*
3703                          * Header reformat actions can be shared between
3704                          * non-root tables. One bit to indicate non-root
3705                          * table or not.
3706                          */
3707                         uint32_t is_root:1;
3708                         uint32_t reserve:15;
3709                 };
3710                 uint32_t v32;
3711         } encap_decap_key = {
3712                 {
3713                         .ft_type = resource->ft_type,
3714                         .refmt_type = resource->reformat_type,
3715                         .is_root = !!dev_flow->dv.group,
3716                         .reserve = 0,
3717                 }
3718         };
3719         struct mlx5_flow_cb_ctx ctx = {
3720                 .error = error,
3721                 .data = resource,
3722         };
3723         struct mlx5_hlist *encaps_decaps;
3724         uint64_t key64;
3725
3726         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3727                                 "encaps_decaps",
3728                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3729                                 true, true, sh,
3730                                 flow_dv_encap_decap_create_cb,
3731                                 flow_dv_encap_decap_match_cb,
3732                                 flow_dv_encap_decap_remove_cb,
3733                                 flow_dv_encap_decap_clone_cb,
3734                                 flow_dv_encap_decap_clone_free_cb);
3735         if (unlikely(!encaps_decaps))
3736                 return -rte_errno;
3737         resource->flags = dev_flow->dv.group ? 0 : 1;
3738         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3739                                  sizeof(encap_decap_key.v32), 0);
3740         if (resource->reformat_type !=
3741             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3742             resource->size)
3743                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3744         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3745         if (!entry)
3746                 return -rte_errno;
3747         resource = container_of(entry, typeof(*resource), entry);
3748         dev_flow->dv.encap_decap = resource;
3749         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3750         return 0;
3751 }
3752
3753 /**
3754  * Find existing table jump resource or create and register a new one.
3755  *
3756  * @param[in, out] dev
3757  *   Pointer to rte_eth_dev structure.
3758  * @param[in, out] tbl
3759  *   Pointer to flow table resource.
3760  * @parm[in, out] dev_flow
3761  *   Pointer to the dev_flow.
3762  * @param[out] error
3763  *   pointer to error structure.
3764  *
3765  * @return
3766  *   0 on success otherwise -errno and errno is set.
3767  */
3768 static int
3769 flow_dv_jump_tbl_resource_register
3770                         (struct rte_eth_dev *dev __rte_unused,
3771                          struct mlx5_flow_tbl_resource *tbl,
3772                          struct mlx5_flow *dev_flow,
3773                          struct rte_flow_error *error __rte_unused)
3774 {
3775         struct mlx5_flow_tbl_data_entry *tbl_data =
3776                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3777
3778         MLX5_ASSERT(tbl);
3779         MLX5_ASSERT(tbl_data->jump.action);
3780         dev_flow->handle->rix_jump = tbl_data->idx;
3781         dev_flow->dv.jump = &tbl_data->jump;
3782         return 0;
3783 }
3784
3785 int
3786 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3787                          struct mlx5_list_entry *entry, void *cb_ctx)
3788 {
3789         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3790         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3791         struct mlx5_flow_dv_port_id_action_resource *res =
3792                                        container_of(entry, typeof(*res), entry);
3793
3794         return ref->port_id != res->port_id;
3795 }
3796
3797 struct mlx5_list_entry *
3798 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3799 {
3800         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3801         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3802         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3803         struct mlx5_flow_dv_port_id_action_resource *resource;
3804         uint32_t idx;
3805         int ret;
3806
3807         /* Register new port id action resource. */
3808         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3809         if (!resource) {
3810                 rte_flow_error_set(ctx->error, ENOMEM,
3811                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3812                                    "cannot allocate port_id action memory");
3813                 return NULL;
3814         }
3815         *resource = *ref;
3816         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3817                                                         ref->port_id,
3818                                                         &resource->action);
3819         if (ret) {
3820                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3821                 rte_flow_error_set(ctx->error, ENOMEM,
3822                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3823                                    "cannot create action");
3824                 return NULL;
3825         }
3826         resource->idx = idx;
3827         return &resource->entry;
3828 }
3829
3830 struct mlx5_list_entry *
3831 flow_dv_port_id_clone_cb(void *tool_ctx,
3832                          struct mlx5_list_entry *entry __rte_unused,
3833                          void *cb_ctx)
3834 {
3835         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3836         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3837         struct mlx5_flow_dv_port_id_action_resource *resource;
3838         uint32_t idx;
3839
3840         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3841         if (!resource) {
3842                 rte_flow_error_set(ctx->error, ENOMEM,
3843                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3844                                    "cannot allocate port_id action memory");
3845                 return NULL;
3846         }
3847         memcpy(resource, entry, sizeof(*resource));
3848         resource->idx = idx;
3849         return &resource->entry;
3850 }
3851
3852 void
3853 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3854 {
3855         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3856         struct mlx5_flow_dv_port_id_action_resource *resource =
3857                                   container_of(entry, typeof(*resource), entry);
3858
3859         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3860 }
3861
3862 /**
3863  * Find existing table port ID resource or create and register a new one.
3864  *
3865  * @param[in, out] dev
3866  *   Pointer to rte_eth_dev structure.
3867  * @param[in, out] ref
3868  *   Pointer to port ID action resource reference.
3869  * @parm[in, out] dev_flow
3870  *   Pointer to the dev_flow.
3871  * @param[out] error
3872  *   pointer to error structure.
3873  *
3874  * @return
3875  *   0 on success otherwise -errno and errno is set.
3876  */
3877 static int
3878 flow_dv_port_id_action_resource_register
3879                         (struct rte_eth_dev *dev,
3880                          struct mlx5_flow_dv_port_id_action_resource *ref,
3881                          struct mlx5_flow *dev_flow,
3882                          struct rte_flow_error *error)
3883 {
3884         struct mlx5_priv *priv = dev->data->dev_private;
3885         struct mlx5_list_entry *entry;
3886         struct mlx5_flow_dv_port_id_action_resource *resource;
3887         struct mlx5_flow_cb_ctx ctx = {
3888                 .error = error,
3889                 .data = ref,
3890         };
3891
3892         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3893         if (!entry)
3894                 return -rte_errno;
3895         resource = container_of(entry, typeof(*resource), entry);
3896         dev_flow->dv.port_id_action = resource;
3897         dev_flow->handle->rix_port_id_action = resource->idx;
3898         return 0;
3899 }
3900
3901 int
3902 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3903                            struct mlx5_list_entry *entry, void *cb_ctx)
3904 {
3905         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3906         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3907         struct mlx5_flow_dv_push_vlan_action_resource *res =
3908                                        container_of(entry, typeof(*res), entry);
3909
3910         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3911 }
3912
3913 struct mlx5_list_entry *
3914 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3915 {
3916         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3917         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3918         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3919         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3920         struct mlx5dv_dr_domain *domain;
3921         uint32_t idx;
3922         int ret;
3923
3924         /* Register new port id action resource. */
3925         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3926         if (!resource) {
3927                 rte_flow_error_set(ctx->error, ENOMEM,
3928                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3929                                    "cannot allocate push_vlan action memory");
3930                 return NULL;
3931         }
3932         *resource = *ref;
3933         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3934                 domain = sh->fdb_domain;
3935         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3936                 domain = sh->rx_domain;
3937         else
3938                 domain = sh->tx_domain;
3939         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3940                                                         &resource->action);
3941         if (ret) {
3942                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3943                 rte_flow_error_set(ctx->error, ENOMEM,
3944                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3945                                    "cannot create push vlan action");
3946                 return NULL;
3947         }
3948         resource->idx = idx;
3949         return &resource->entry;
3950 }
3951
3952 struct mlx5_list_entry *
3953 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3954                            struct mlx5_list_entry *entry __rte_unused,
3955                            void *cb_ctx)
3956 {
3957         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3958         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3959         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3960         uint32_t idx;
3961
3962         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3963         if (!resource) {
3964                 rte_flow_error_set(ctx->error, ENOMEM,
3965                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3966                                    "cannot allocate push_vlan action memory");
3967                 return NULL;
3968         }
3969         memcpy(resource, entry, sizeof(*resource));
3970         resource->idx = idx;
3971         return &resource->entry;
3972 }
3973
3974 void
3975 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3976 {
3977         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3978         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3979                                   container_of(entry, typeof(*resource), entry);
3980
3981         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3982 }
3983
3984 /**
3985  * Find existing push vlan resource or create and register a new one.
3986  *
3987  * @param [in, out] dev
3988  *   Pointer to rte_eth_dev structure.
3989  * @param[in, out] ref
3990  *   Pointer to port ID action resource reference.
3991  * @parm[in, out] dev_flow
3992  *   Pointer to the dev_flow.
3993  * @param[out] error
3994  *   pointer to error structure.
3995  *
3996  * @return
3997  *   0 on success otherwise -errno and errno is set.
3998  */
3999 static int
4000 flow_dv_push_vlan_action_resource_register
4001                        (struct rte_eth_dev *dev,
4002                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4003                         struct mlx5_flow *dev_flow,
4004                         struct rte_flow_error *error)
4005 {
4006         struct mlx5_priv *priv = dev->data->dev_private;
4007         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4008         struct mlx5_list_entry *entry;
4009         struct mlx5_flow_cb_ctx ctx = {
4010                 .error = error,
4011                 .data = ref,
4012         };
4013
4014         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4015         if (!entry)
4016                 return -rte_errno;
4017         resource = container_of(entry, typeof(*resource), entry);
4018
4019         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4020         dev_flow->dv.push_vlan_res = resource;
4021         return 0;
4022 }
4023
4024 /**
4025  * Get the size of specific rte_flow_item_type hdr size
4026  *
4027  * @param[in] item_type
4028  *   Tested rte_flow_item_type.
4029  *
4030  * @return
4031  *   sizeof struct item_type, 0 if void or irrelevant.
4032  */
4033 static size_t
4034 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4035 {
4036         size_t retval;
4037
4038         switch (item_type) {
4039         case RTE_FLOW_ITEM_TYPE_ETH:
4040                 retval = sizeof(struct rte_ether_hdr);
4041                 break;
4042         case RTE_FLOW_ITEM_TYPE_VLAN:
4043                 retval = sizeof(struct rte_vlan_hdr);
4044                 break;
4045         case RTE_FLOW_ITEM_TYPE_IPV4:
4046                 retval = sizeof(struct rte_ipv4_hdr);
4047                 break;
4048         case RTE_FLOW_ITEM_TYPE_IPV6:
4049                 retval = sizeof(struct rte_ipv6_hdr);
4050                 break;
4051         case RTE_FLOW_ITEM_TYPE_UDP:
4052                 retval = sizeof(struct rte_udp_hdr);
4053                 break;
4054         case RTE_FLOW_ITEM_TYPE_TCP:
4055                 retval = sizeof(struct rte_tcp_hdr);
4056                 break;
4057         case RTE_FLOW_ITEM_TYPE_VXLAN:
4058         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4059                 retval = sizeof(struct rte_vxlan_hdr);
4060                 break;
4061         case RTE_FLOW_ITEM_TYPE_GRE:
4062         case RTE_FLOW_ITEM_TYPE_NVGRE:
4063                 retval = sizeof(struct rte_gre_hdr);
4064                 break;
4065         case RTE_FLOW_ITEM_TYPE_MPLS:
4066                 retval = sizeof(struct rte_mpls_hdr);
4067                 break;
4068         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4069         default:
4070                 retval = 0;
4071                 break;
4072         }
4073         return retval;
4074 }
4075
4076 #define MLX5_ENCAP_IPV4_VERSION         0x40
4077 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4078 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4079 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4080 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4081 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4082 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4083
4084 /**
4085  * Convert the encap action data from list of rte_flow_item to raw buffer
4086  *
4087  * @param[in] items
4088  *   Pointer to rte_flow_item objects list.
4089  * @param[out] buf
4090  *   Pointer to the output buffer.
4091  * @param[out] size
4092  *   Pointer to the output buffer size.
4093  * @param[out] error
4094  *   Pointer to the error structure.
4095  *
4096  * @return
4097  *   0 on success, a negative errno value otherwise and rte_errno is set.
4098  */
4099 static int
4100 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4101                            size_t *size, struct rte_flow_error *error)
4102 {
4103         struct rte_ether_hdr *eth = NULL;
4104         struct rte_vlan_hdr *vlan = NULL;
4105         struct rte_ipv4_hdr *ipv4 = NULL;
4106         struct rte_ipv6_hdr *ipv6 = NULL;
4107         struct rte_udp_hdr *udp = NULL;
4108         struct rte_vxlan_hdr *vxlan = NULL;
4109         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4110         struct rte_gre_hdr *gre = NULL;
4111         size_t len;
4112         size_t temp_size = 0;
4113
4114         if (!items)
4115                 return rte_flow_error_set(error, EINVAL,
4116                                           RTE_FLOW_ERROR_TYPE_ACTION,
4117                                           NULL, "invalid empty data");
4118         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4119                 len = flow_dv_get_item_hdr_len(items->type);
4120                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4121                         return rte_flow_error_set(error, EINVAL,
4122                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4123                                                   (void *)items->type,
4124                                                   "items total size is too big"
4125                                                   " for encap action");
4126                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4127                 switch (items->type) {
4128                 case RTE_FLOW_ITEM_TYPE_ETH:
4129                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4130                         break;
4131                 case RTE_FLOW_ITEM_TYPE_VLAN:
4132                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4133                         if (!eth)
4134                                 return rte_flow_error_set(error, EINVAL,
4135                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4136                                                 (void *)items->type,
4137                                                 "eth header not found");
4138                         if (!eth->ether_type)
4139                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4140                         break;
4141                 case RTE_FLOW_ITEM_TYPE_IPV4:
4142                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4143                         if (!vlan && !eth)
4144                                 return rte_flow_error_set(error, EINVAL,
4145                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4146                                                 (void *)items->type,
4147                                                 "neither eth nor vlan"
4148                                                 " header found");
4149                         if (vlan && !vlan->eth_proto)
4150                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4151                         else if (eth && !eth->ether_type)
4152                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4153                         if (!ipv4->version_ihl)
4154                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4155                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4156                         if (!ipv4->time_to_live)
4157                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4158                         break;
4159                 case RTE_FLOW_ITEM_TYPE_IPV6:
4160                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4161                         if (!vlan && !eth)
4162                                 return rte_flow_error_set(error, EINVAL,
4163                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4164                                                 (void *)items->type,
4165                                                 "neither eth nor vlan"
4166                                                 " header found");
4167                         if (vlan && !vlan->eth_proto)
4168                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4169                         else if (eth && !eth->ether_type)
4170                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4171                         if (!ipv6->vtc_flow)
4172                                 ipv6->vtc_flow =
4173                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4174                         if (!ipv6->hop_limits)
4175                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4176                         break;
4177                 case RTE_FLOW_ITEM_TYPE_UDP:
4178                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4179                         if (!ipv4 && !ipv6)
4180                                 return rte_flow_error_set(error, EINVAL,
4181                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4182                                                 (void *)items->type,
4183                                                 "ip header not found");
4184                         if (ipv4 && !ipv4->next_proto_id)
4185                                 ipv4->next_proto_id = IPPROTO_UDP;
4186                         else if (ipv6 && !ipv6->proto)
4187                                 ipv6->proto = IPPROTO_UDP;
4188                         break;
4189                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4190                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4191                         if (!udp)
4192                                 return rte_flow_error_set(error, EINVAL,
4193                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4194                                                 (void *)items->type,
4195                                                 "udp header not found");
4196                         if (!udp->dst_port)
4197                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4198                         if (!vxlan->vx_flags)
4199                                 vxlan->vx_flags =
4200                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4201                         break;
4202                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4203                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4204                         if (!udp)
4205                                 return rte_flow_error_set(error, EINVAL,
4206                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4207                                                 (void *)items->type,
4208                                                 "udp header not found");
4209                         if (!vxlan_gpe->proto)
4210                                 return rte_flow_error_set(error, EINVAL,
4211                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4212                                                 (void *)items->type,
4213                                                 "next protocol not found");
4214                         if (!udp->dst_port)
4215                                 udp->dst_port =
4216                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4217                         if (!vxlan_gpe->vx_flags)
4218                                 vxlan_gpe->vx_flags =
4219                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4220                         break;
4221                 case RTE_FLOW_ITEM_TYPE_GRE:
4222                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4223                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4224                         if (!gre->proto)
4225                                 return rte_flow_error_set(error, EINVAL,
4226                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4227                                                 (void *)items->type,
4228                                                 "next protocol not found");
4229                         if (!ipv4 && !ipv6)
4230                                 return rte_flow_error_set(error, EINVAL,
4231                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4232                                                 (void *)items->type,
4233                                                 "ip header not found");
4234                         if (ipv4 && !ipv4->next_proto_id)
4235                                 ipv4->next_proto_id = IPPROTO_GRE;
4236                         else if (ipv6 && !ipv6->proto)
4237                                 ipv6->proto = IPPROTO_GRE;
4238                         break;
4239                 case RTE_FLOW_ITEM_TYPE_VOID:
4240                         break;
4241                 default:
4242                         return rte_flow_error_set(error, EINVAL,
4243                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4244                                                   (void *)items->type,
4245                                                   "unsupported item type");
4246                         break;
4247                 }
4248                 temp_size += len;
4249         }
4250         *size = temp_size;
4251         return 0;
4252 }
4253
4254 static int
4255 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4256 {
4257         struct rte_ether_hdr *eth = NULL;
4258         struct rte_vlan_hdr *vlan = NULL;
4259         struct rte_ipv6_hdr *ipv6 = NULL;
4260         struct rte_udp_hdr *udp = NULL;
4261         char *next_hdr;
4262         uint16_t proto;
4263
4264         eth = (struct rte_ether_hdr *)data;
4265         next_hdr = (char *)(eth + 1);
4266         proto = RTE_BE16(eth->ether_type);
4267
4268         /* VLAN skipping */
4269         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4270                 vlan = (struct rte_vlan_hdr *)next_hdr;
4271                 proto = RTE_BE16(vlan->eth_proto);
4272                 next_hdr += sizeof(struct rte_vlan_hdr);
4273         }
4274
4275         /* HW calculates IPv4 csum. no need to proceed */
4276         if (proto == RTE_ETHER_TYPE_IPV4)
4277                 return 0;
4278
4279         /* non IPv4/IPv6 header. not supported */
4280         if (proto != RTE_ETHER_TYPE_IPV6) {
4281                 return rte_flow_error_set(error, ENOTSUP,
4282                                           RTE_FLOW_ERROR_TYPE_ACTION,
4283                                           NULL, "Cannot offload non IPv4/IPv6");
4284         }
4285
4286         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4287
4288         /* ignore non UDP */
4289         if (ipv6->proto != IPPROTO_UDP)
4290                 return 0;
4291
4292         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4293         udp->dgram_cksum = 0;
4294
4295         return 0;
4296 }
4297
4298 /**
4299  * Convert L2 encap action to DV specification.
4300  *
4301  * @param[in] dev
4302  *   Pointer to rte_eth_dev structure.
4303  * @param[in] action
4304  *   Pointer to action structure.
4305  * @param[in, out] dev_flow
4306  *   Pointer to the mlx5_flow.
4307  * @param[in] transfer
4308  *   Mark if the flow is E-Switch flow.
4309  * @param[out] error
4310  *   Pointer to the error structure.
4311  *
4312  * @return
4313  *   0 on success, a negative errno value otherwise and rte_errno is set.
4314  */
4315 static int
4316 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4317                                const struct rte_flow_action *action,
4318                                struct mlx5_flow *dev_flow,
4319                                uint8_t transfer,
4320                                struct rte_flow_error *error)
4321 {
4322         const struct rte_flow_item *encap_data;
4323         const struct rte_flow_action_raw_encap *raw_encap_data;
4324         struct mlx5_flow_dv_encap_decap_resource res = {
4325                 .reformat_type =
4326                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4327                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4328                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4329         };
4330
4331         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4332                 raw_encap_data =
4333                         (const struct rte_flow_action_raw_encap *)action->conf;
4334                 res.size = raw_encap_data->size;
4335                 memcpy(res.buf, raw_encap_data->data, res.size);
4336         } else {
4337                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4338                         encap_data =
4339                                 ((const struct rte_flow_action_vxlan_encap *)
4340                                                 action->conf)->definition;
4341                 else
4342                         encap_data =
4343                                 ((const struct rte_flow_action_nvgre_encap *)
4344                                                 action->conf)->definition;
4345                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4346                                                &res.size, error))
4347                         return -rte_errno;
4348         }
4349         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4350                 return -rte_errno;
4351         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4352                 return rte_flow_error_set(error, EINVAL,
4353                                           RTE_FLOW_ERROR_TYPE_ACTION,
4354                                           NULL, "can't create L2 encap action");
4355         return 0;
4356 }
4357
4358 /**
4359  * Convert L2 decap action to DV specification.
4360  *
4361  * @param[in] dev
4362  *   Pointer to rte_eth_dev structure.
4363  * @param[in, out] dev_flow
4364  *   Pointer to the mlx5_flow.
4365  * @param[in] transfer
4366  *   Mark if the flow is E-Switch flow.
4367  * @param[out] error
4368  *   Pointer to the error structure.
4369  *
4370  * @return
4371  *   0 on success, a negative errno value otherwise and rte_errno is set.
4372  */
4373 static int
4374 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4375                                struct mlx5_flow *dev_flow,
4376                                uint8_t transfer,
4377                                struct rte_flow_error *error)
4378 {
4379         struct mlx5_flow_dv_encap_decap_resource res = {
4380                 .size = 0,
4381                 .reformat_type =
4382                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4383                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4384                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4385         };
4386
4387         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4388                 return rte_flow_error_set(error, EINVAL,
4389                                           RTE_FLOW_ERROR_TYPE_ACTION,
4390                                           NULL, "can't create L2 decap action");
4391         return 0;
4392 }
4393
4394 /**
4395  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4396  *
4397  * @param[in] dev
4398  *   Pointer to rte_eth_dev structure.
4399  * @param[in] action
4400  *   Pointer to action structure.
4401  * @param[in, out] dev_flow
4402  *   Pointer to the mlx5_flow.
4403  * @param[in] attr
4404  *   Pointer to the flow attributes.
4405  * @param[out] error
4406  *   Pointer to the error structure.
4407  *
4408  * @return
4409  *   0 on success, a negative errno value otherwise and rte_errno is set.
4410  */
4411 static int
4412 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4413                                 const struct rte_flow_action *action,
4414                                 struct mlx5_flow *dev_flow,
4415                                 const struct rte_flow_attr *attr,
4416                                 struct rte_flow_error *error)
4417 {
4418         const struct rte_flow_action_raw_encap *encap_data;
4419         struct mlx5_flow_dv_encap_decap_resource res;
4420
4421         memset(&res, 0, sizeof(res));
4422         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4423         res.size = encap_data->size;
4424         memcpy(res.buf, encap_data->data, res.size);
4425         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4426                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4427                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4428         if (attr->transfer)
4429                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4430         else
4431                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4432                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4433         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4434                 return rte_flow_error_set(error, EINVAL,
4435                                           RTE_FLOW_ERROR_TYPE_ACTION,
4436                                           NULL, "can't create encap action");
4437         return 0;
4438 }
4439
4440 /**
4441  * Create action push VLAN.
4442  *
4443  * @param[in] dev
4444  *   Pointer to rte_eth_dev structure.
4445  * @param[in] attr
4446  *   Pointer to the flow attributes.
4447  * @param[in] vlan
4448  *   Pointer to the vlan to push to the Ethernet header.
4449  * @param[in, out] dev_flow
4450  *   Pointer to the mlx5_flow.
4451  * @param[out] error
4452  *   Pointer to the error structure.
4453  *
4454  * @return
4455  *   0 on success, a negative errno value otherwise and rte_errno is set.
4456  */
4457 static int
4458 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4459                                 const struct rte_flow_attr *attr,
4460                                 const struct rte_vlan_hdr *vlan,
4461                                 struct mlx5_flow *dev_flow,
4462                                 struct rte_flow_error *error)
4463 {
4464         struct mlx5_flow_dv_push_vlan_action_resource res;
4465
4466         memset(&res, 0, sizeof(res));
4467         res.vlan_tag =
4468                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4469                                  vlan->vlan_tci);
4470         if (attr->transfer)
4471                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4472         else
4473                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4474                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4475         return flow_dv_push_vlan_action_resource_register
4476                                             (dev, &res, dev_flow, error);
4477 }
4478
4479 /**
4480  * Validate the modify-header actions.
4481  *
4482  * @param[in] action_flags
4483  *   Holds the actions detected until now.
4484  * @param[in] action
4485  *   Pointer to the modify action.
4486  * @param[out] error
4487  *   Pointer to error structure.
4488  *
4489  * @return
4490  *   0 on success, a negative errno value otherwise and rte_errno is set.
4491  */
4492 static int
4493 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4494                                    const struct rte_flow_action *action,
4495                                    struct rte_flow_error *error)
4496 {
4497         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4498                 return rte_flow_error_set(error, EINVAL,
4499                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4500                                           NULL, "action configuration not set");
4501         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4502                 return rte_flow_error_set(error, EINVAL,
4503                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4504                                           "can't have encap action before"
4505                                           " modify action");
4506         return 0;
4507 }
4508
4509 /**
4510  * Validate the modify-header MAC address actions.
4511  *
4512  * @param[in] action_flags
4513  *   Holds the actions detected until now.
4514  * @param[in] action
4515  *   Pointer to the modify action.
4516  * @param[in] item_flags
4517  *   Holds the items detected.
4518  * @param[out] error
4519  *   Pointer to error structure.
4520  *
4521  * @return
4522  *   0 on success, a negative errno value otherwise and rte_errno is set.
4523  */
4524 static int
4525 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4526                                    const struct rte_flow_action *action,
4527                                    const uint64_t item_flags,
4528                                    struct rte_flow_error *error)
4529 {
4530         int ret = 0;
4531
4532         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4533         if (!ret) {
4534                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4535                         return rte_flow_error_set(error, EINVAL,
4536                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4537                                                   NULL,
4538                                                   "no L2 item in pattern");
4539         }
4540         return ret;
4541 }
4542
4543 /**
4544  * Validate the modify-header IPv4 address actions.
4545  *
4546  * @param[in] action_flags
4547  *   Holds the actions detected until now.
4548  * @param[in] action
4549  *   Pointer to the modify action.
4550  * @param[in] item_flags
4551  *   Holds the items detected.
4552  * @param[out] error
4553  *   Pointer to error structure.
4554  *
4555  * @return
4556  *   0 on success, a negative errno value otherwise and rte_errno is set.
4557  */
4558 static int
4559 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4560                                     const struct rte_flow_action *action,
4561                                     const uint64_t item_flags,
4562                                     struct rte_flow_error *error)
4563 {
4564         int ret = 0;
4565         uint64_t layer;
4566
4567         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4568         if (!ret) {
4569                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4570                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4571                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4572                 if (!(item_flags & layer))
4573                         return rte_flow_error_set(error, EINVAL,
4574                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4575                                                   NULL,
4576                                                   "no ipv4 item in pattern");
4577         }
4578         return ret;
4579 }
4580
4581 /**
4582  * Validate the modify-header IPv6 address actions.
4583  *
4584  * @param[in] action_flags
4585  *   Holds the actions detected until now.
4586  * @param[in] action
4587  *   Pointer to the modify action.
4588  * @param[in] item_flags
4589  *   Holds the items detected.
4590  * @param[out] error
4591  *   Pointer to error structure.
4592  *
4593  * @return
4594  *   0 on success, a negative errno value otherwise and rte_errno is set.
4595  */
4596 static int
4597 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4598                                     const struct rte_flow_action *action,
4599                                     const uint64_t item_flags,
4600                                     struct rte_flow_error *error)
4601 {
4602         int ret = 0;
4603         uint64_t layer;
4604
4605         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4606         if (!ret) {
4607                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4608                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4609                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4610                 if (!(item_flags & layer))
4611                         return rte_flow_error_set(error, EINVAL,
4612                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4613                                                   NULL,
4614                                                   "no ipv6 item in pattern");
4615         }
4616         return ret;
4617 }
4618
4619 /**
4620  * Validate the modify-header TP actions.
4621  *
4622  * @param[in] action_flags
4623  *   Holds the actions detected until now.
4624  * @param[in] action
4625  *   Pointer to the modify action.
4626  * @param[in] item_flags
4627  *   Holds the items detected.
4628  * @param[out] error
4629  *   Pointer to error structure.
4630  *
4631  * @return
4632  *   0 on success, a negative errno value otherwise and rte_errno is set.
4633  */
4634 static int
4635 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4636                                   const struct rte_flow_action *action,
4637                                   const uint64_t item_flags,
4638                                   struct rte_flow_error *error)
4639 {
4640         int ret = 0;
4641         uint64_t layer;
4642
4643         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4644         if (!ret) {
4645                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4646                                  MLX5_FLOW_LAYER_INNER_L4 :
4647                                  MLX5_FLOW_LAYER_OUTER_L4;
4648                 if (!(item_flags & layer))
4649                         return rte_flow_error_set(error, EINVAL,
4650                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4651                                                   NULL, "no transport layer "
4652                                                   "in pattern");
4653         }
4654         return ret;
4655 }
4656
4657 /**
4658  * Validate the modify-header actions of increment/decrement
4659  * TCP Sequence-number.
4660  *
4661  * @param[in] action_flags
4662  *   Holds the actions detected until now.
4663  * @param[in] action
4664  *   Pointer to the modify action.
4665  * @param[in] item_flags
4666  *   Holds the items detected.
4667  * @param[out] error
4668  *   Pointer to error structure.
4669  *
4670  * @return
4671  *   0 on success, a negative errno value otherwise and rte_errno is set.
4672  */
4673 static int
4674 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4675                                        const struct rte_flow_action *action,
4676                                        const uint64_t item_flags,
4677                                        struct rte_flow_error *error)
4678 {
4679         int ret = 0;
4680         uint64_t layer;
4681
4682         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4683         if (!ret) {
4684                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4685                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4686                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4687                 if (!(item_flags & layer))
4688                         return rte_flow_error_set(error, EINVAL,
4689                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4690                                                   NULL, "no TCP item in"
4691                                                   " pattern");
4692                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4693                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4694                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4695                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4696                         return rte_flow_error_set(error, EINVAL,
4697                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4698                                                   NULL,
4699                                                   "cannot decrease and increase"
4700                                                   " TCP sequence number"
4701                                                   " at the same time");
4702         }
4703         return ret;
4704 }
4705
4706 /**
4707  * Validate the modify-header actions of increment/decrement
4708  * TCP Acknowledgment number.
4709  *
4710  * @param[in] action_flags
4711  *   Holds the actions detected until now.
4712  * @param[in] action
4713  *   Pointer to the modify action.
4714  * @param[in] item_flags
4715  *   Holds the items detected.
4716  * @param[out] error
4717  *   Pointer to error structure.
4718  *
4719  * @return
4720  *   0 on success, a negative errno value otherwise and rte_errno is set.
4721  */
4722 static int
4723 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4724                                        const struct rte_flow_action *action,
4725                                        const uint64_t item_flags,
4726                                        struct rte_flow_error *error)
4727 {
4728         int ret = 0;
4729         uint64_t layer;
4730
4731         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4732         if (!ret) {
4733                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4734                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4735                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4736                 if (!(item_flags & layer))
4737                         return rte_flow_error_set(error, EINVAL,
4738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4739                                                   NULL, "no TCP item in"
4740                                                   " pattern");
4741                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4742                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4743                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4744                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4745                         return rte_flow_error_set(error, EINVAL,
4746                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4747                                                   NULL,
4748                                                   "cannot decrease and increase"
4749                                                   " TCP acknowledgment number"
4750                                                   " at the same time");
4751         }
4752         return ret;
4753 }
4754
4755 /**
4756  * Validate the modify-header TTL actions.
4757  *
4758  * @param[in] action_flags
4759  *   Holds the actions detected until now.
4760  * @param[in] action
4761  *   Pointer to the modify action.
4762  * @param[in] item_flags
4763  *   Holds the items detected.
4764  * @param[out] error
4765  *   Pointer to error structure.
4766  *
4767  * @return
4768  *   0 on success, a negative errno value otherwise and rte_errno is set.
4769  */
4770 static int
4771 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4772                                    const struct rte_flow_action *action,
4773                                    const uint64_t item_flags,
4774                                    struct rte_flow_error *error)
4775 {
4776         int ret = 0;
4777         uint64_t layer;
4778
4779         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4780         if (!ret) {
4781                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4782                                  MLX5_FLOW_LAYER_INNER_L3 :
4783                                  MLX5_FLOW_LAYER_OUTER_L3;
4784                 if (!(item_flags & layer))
4785                         return rte_flow_error_set(error, EINVAL,
4786                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4787                                                   NULL,
4788                                                   "no IP protocol in pattern");
4789         }
4790         return ret;
4791 }
4792
4793 /**
4794  * Validate the generic modify field actions.
4795  * @param[in] dev
4796  *   Pointer to the rte_eth_dev structure.
4797  * @param[in] action_flags
4798  *   Holds the actions detected until now.
4799  * @param[in] action
4800  *   Pointer to the modify action.
4801  * @param[in] attr
4802  *   Pointer to the flow attributes.
4803  * @param[out] error
4804  *   Pointer to error structure.
4805  *
4806  * @return
4807  *   Number of header fields to modify (0 or more) on success,
4808  *   a negative errno value otherwise and rte_errno is set.
4809  */
4810 static int
4811 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4812                                    const uint64_t action_flags,
4813                                    const struct rte_flow_action *action,
4814                                    const struct rte_flow_attr *attr,
4815                                    struct rte_flow_error *error)
4816 {
4817         int ret = 0;
4818         struct mlx5_priv *priv = dev->data->dev_private;
4819         struct mlx5_dev_config *config = &priv->config;
4820         const struct rte_flow_action_modify_field *action_modify_field =
4821                 action->conf;
4822         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4823                                 action_modify_field->dst.field,
4824                                 -1, attr, error);
4825         uint32_t src_width = mlx5_flow_item_field_width(dev,
4826                                 action_modify_field->src.field,
4827                                 dst_width, attr, error);
4828
4829         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4830         if (ret)
4831                 return ret;
4832
4833         if (action_modify_field->width == 0)
4834                 return rte_flow_error_set(error, EINVAL,
4835                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4836                                 "no bits are requested to be modified");
4837         else if (action_modify_field->width > dst_width ||
4838                  action_modify_field->width > src_width)
4839                 return rte_flow_error_set(error, EINVAL,
4840                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4841                                 "cannot modify more bits than"
4842                                 " the width of a field");
4843         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4844             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4845                 if ((action_modify_field->dst.offset +
4846                      action_modify_field->width > dst_width) ||
4847                     (action_modify_field->dst.offset % 32))
4848                         return rte_flow_error_set(error, EINVAL,
4849                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4850                                         "destination offset is too big"
4851                                         " or not aligned to 4 bytes");
4852                 if (action_modify_field->dst.level &&
4853                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4854                         return rte_flow_error_set(error, ENOTSUP,
4855                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4856                                         "inner header fields modification"
4857                                         " is not supported");
4858         }
4859         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4860             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4861                 if (!attr->transfer && !attr->group)
4862                         return rte_flow_error_set(error, ENOTSUP,
4863                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4864                                         "modify field action is not"
4865                                         " supported for group 0");
4866                 if ((action_modify_field->src.offset +
4867                      action_modify_field->width > src_width) ||
4868                     (action_modify_field->src.offset % 32))
4869                         return rte_flow_error_set(error, EINVAL,
4870                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4871                                         "source offset is too big"
4872                                         " or not aligned to 4 bytes");
4873                 if (action_modify_field->src.level &&
4874                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4875                         return rte_flow_error_set(error, ENOTSUP,
4876                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4877                                         "inner header fields modification"
4878                                         " is not supported");
4879         }
4880         if ((action_modify_field->dst.field ==
4881              action_modify_field->src.field) &&
4882             (action_modify_field->dst.level ==
4883              action_modify_field->src.level))
4884                 return rte_flow_error_set(error, EINVAL,
4885                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4886                                 "source and destination fields"
4887                                 " cannot be the same");
4888         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4889             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4890             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4891                 return rte_flow_error_set(error, EINVAL,
4892                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4893                                 "mark, immediate value or a pointer to it"
4894                                 " cannot be used as a destination");
4895         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4896             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4897                 return rte_flow_error_set(error, ENOTSUP,
4898                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4899                                 "modifications of an arbitrary"
4900                                 " place in a packet is not supported");
4901         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4902             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4903                 return rte_flow_error_set(error, ENOTSUP,
4904                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4905                                 "modifications of the 802.1Q Tag"
4906                                 " Identifier is not supported");
4907         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4908             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4909                 return rte_flow_error_set(error, ENOTSUP,
4910                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4911                                 "modifications of the VXLAN Network"
4912                                 " Identifier is not supported");
4913         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4914             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4915                 return rte_flow_error_set(error, ENOTSUP,
4916                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4917                                 "modifications of the GENEVE Network"
4918                                 " Identifier is not supported");
4919         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4920             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4921                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4922                     !mlx5_flow_ext_mreg_supported(dev))
4923                         return rte_flow_error_set(error, ENOTSUP,
4924                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4925                                         "cannot modify mark in legacy mode"
4926                                         " or without extensive registers");
4927         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4928             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4929                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4930                     !mlx5_flow_ext_mreg_supported(dev))
4931                         return rte_flow_error_set(error, ENOTSUP,
4932                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4933                                         "cannot modify meta without"
4934                                         " extensive registers support");
4935                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4936                 if (ret < 0 || ret == REG_NON)
4937                         return rte_flow_error_set(error, ENOTSUP,
4938                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4939                                         "cannot modify meta without"
4940                                         " extensive registers available");
4941         }
4942         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4943                 return rte_flow_error_set(error, ENOTSUP,
4944                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4945                                 "add and sub operations"
4946                                 " are not supported");
4947         return (action_modify_field->width / 32) +
4948                !!(action_modify_field->width % 32);
4949 }
4950
4951 /**
4952  * Validate jump action.
4953  *
4954  * @param[in] action
4955  *   Pointer to the jump action.
4956  * @param[in] action_flags
4957  *   Holds the actions detected until now.
4958  * @param[in] attributes
4959  *   Pointer to flow attributes
4960  * @param[in] external
4961  *   Action belongs to flow rule created by request external to PMD.
4962  * @param[out] error
4963  *   Pointer to error structure.
4964  *
4965  * @return
4966  *   0 on success, a negative errno value otherwise and rte_errno is set.
4967  */
4968 static int
4969 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4970                              const struct mlx5_flow_tunnel *tunnel,
4971                              const struct rte_flow_action *action,
4972                              uint64_t action_flags,
4973                              const struct rte_flow_attr *attributes,
4974                              bool external, struct rte_flow_error *error)
4975 {
4976         uint32_t target_group, table = 0;
4977         int ret = 0;
4978         struct flow_grp_info grp_info = {
4979                 .external = !!external,
4980                 .transfer = !!attributes->transfer,
4981                 .fdb_def_rule = 1,
4982                 .std_tbl_fix = 0
4983         };
4984         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4985                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4988                                           "can't have 2 fate actions in"
4989                                           " same flow");
4990         if (!action->conf)
4991                 return rte_flow_error_set(error, EINVAL,
4992                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4993                                           NULL, "action configuration not set");
4994         target_group =
4995                 ((const struct rte_flow_action_jump *)action->conf)->group;
4996         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4997                                        &grp_info, error);
4998         if (ret)
4999                 return ret;
5000         if (attributes->group == target_group &&
5001             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
5002                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5003                 return rte_flow_error_set(error, EINVAL,
5004                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5005                                           "target group must be other than"
5006                                           " the current flow group");
5007         if (table == 0)
5008                 return rte_flow_error_set(error, EINVAL,
5009                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5010                                           NULL, "root table shouldn't be destination");
5011         return 0;
5012 }
5013
5014 /*
5015  * Validate action PORT_ID / REPRESENTED_PORT.
5016  *
5017  * @param[in] dev
5018  *   Pointer to rte_eth_dev structure.
5019  * @param[in] action_flags
5020  *   Bit-fields that holds the actions detected until now.
5021  * @param[in] action
5022  *   PORT_ID / REPRESENTED_PORT action structure.
5023  * @param[in] attr
5024  *   Attributes of flow that includes this action.
5025  * @param[out] error
5026  *   Pointer to error structure.
5027  *
5028  * @return
5029  *   0 on success, a negative errno value otherwise and rte_errno is set.
5030  */
5031 static int
5032 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5033                                 uint64_t action_flags,
5034                                 const struct rte_flow_action *action,
5035                                 const struct rte_flow_attr *attr,
5036                                 struct rte_flow_error *error)
5037 {
5038         const struct rte_flow_action_port_id *port_id;
5039         const struct rte_flow_action_ethdev *ethdev;
5040         struct mlx5_priv *act_priv;
5041         struct mlx5_priv *dev_priv;
5042         uint16_t port;
5043
5044         if (!attr->transfer)
5045                 return rte_flow_error_set(error, ENOTSUP,
5046                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5047                                           NULL,
5048                                           "port action is valid in transfer"
5049                                           " mode only");
5050         if (!action || !action->conf)
5051                 return rte_flow_error_set(error, ENOTSUP,
5052                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5053                                           NULL,
5054                                           "port action parameters must be"
5055                                           " specified");
5056         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5057                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5058                 return rte_flow_error_set(error, EINVAL,
5059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5060                                           "can have only one fate actions in"
5061                                           " a flow");
5062         dev_priv = mlx5_dev_to_eswitch_info(dev);
5063         if (!dev_priv)
5064                 return rte_flow_error_set(error, rte_errno,
5065                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5066                                           NULL,
5067                                           "failed to obtain E-Switch info");
5068         switch (action->type) {
5069         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5070                 port_id = action->conf;
5071                 port = port_id->original ? dev->data->port_id : port_id->id;
5072                 break;
5073         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5074                 ethdev = action->conf;
5075                 port = ethdev->port_id;
5076                 break;
5077         default:
5078                 MLX5_ASSERT(false);
5079                 return rte_flow_error_set
5080                                 (error, EINVAL,
5081                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5082                                  "unknown E-Switch action");
5083         }
5084         act_priv = mlx5_port_to_eswitch_info(port, false);
5085         if (!act_priv)
5086                 return rte_flow_error_set
5087                                 (error, rte_errno,
5088                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5089                                  "failed to obtain E-Switch port id for port");
5090         if (act_priv->domain_id != dev_priv->domain_id)
5091                 return rte_flow_error_set
5092                                 (error, EINVAL,
5093                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5094                                  "port does not belong to"
5095                                  " E-Switch being configured");
5096         return 0;
5097 }
5098
5099 /**
5100  * Get the maximum number of modify header actions.
5101  *
5102  * @param dev
5103  *   Pointer to rte_eth_dev structure.
5104  * @param root
5105  *   Whether action is on root table.
5106  *
5107  * @return
5108  *   Max number of modify header actions device can support.
5109  */
5110 static inline unsigned int
5111 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5112                               bool root)
5113 {
5114         /*
5115          * There's no way to directly query the max capacity from FW.
5116          * The maximal value on root table should be assumed to be supported.
5117          */
5118         if (!root)
5119                 return MLX5_MAX_MODIFY_NUM;
5120         else
5121                 return MLX5_ROOT_TBL_MODIFY_NUM;
5122 }
5123
5124 /**
5125  * Validate the meter action.
5126  *
5127  * @param[in] dev
5128  *   Pointer to rte_eth_dev structure.
5129  * @param[in] action_flags
5130  *   Bit-fields that holds the actions detected until now.
5131  * @param[in] item_flags
5132  *   Holds the items detected.
5133  * @param[in] action
5134  *   Pointer to the meter action.
5135  * @param[in] attr
5136  *   Attributes of flow that includes this action.
5137  * @param[in] port_id_item
5138  *   Pointer to item indicating port id.
5139  * @param[out] error
5140  *   Pointer to error structure.
5141  *
5142  * @return
5143  *   0 on success, a negative errno value otherwise and rte_errno is set.
5144  */
5145 static int
5146 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5147                                 uint64_t action_flags, uint64_t item_flags,
5148                                 const struct rte_flow_action *action,
5149                                 const struct rte_flow_attr *attr,
5150                                 const struct rte_flow_item *port_id_item,
5151                                 bool *def_policy,
5152                                 struct rte_flow_error *error)
5153 {
5154         struct mlx5_priv *priv = dev->data->dev_private;
5155         const struct rte_flow_action_meter *am = action->conf;
5156         struct mlx5_flow_meter_info *fm;
5157         struct mlx5_flow_meter_policy *mtr_policy;
5158         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5159
5160         if (!am)
5161                 return rte_flow_error_set(error, EINVAL,
5162                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5163                                           "meter action conf is NULL");
5164
5165         if (action_flags & MLX5_FLOW_ACTION_METER)
5166                 return rte_flow_error_set(error, ENOTSUP,
5167                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5168                                           "meter chaining not support");
5169         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5170                 return rte_flow_error_set(error, ENOTSUP,
5171                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5172                                           "meter with jump not support");
5173         if (!priv->mtr_en)
5174                 return rte_flow_error_set(error, ENOTSUP,
5175                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5176                                           NULL,
5177                                           "meter action not supported");
5178         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5179         if (!fm)
5180                 return rte_flow_error_set(error, EINVAL,
5181                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5182                                           "Meter not found");
5183         /* aso meter can always be shared by different domains */
5184         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5185             !(fm->transfer == attr->transfer ||
5186               (!fm->ingress && !attr->ingress && attr->egress) ||
5187               (!fm->egress && !attr->egress && attr->ingress)))
5188                 return rte_flow_error_set(error, EINVAL,
5189                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5190                         "Flow attributes domain are either invalid "
5191                         "or have a domain conflict with current "
5192                         "meter attributes");
5193         if (fm->def_policy) {
5194                 if (!((attr->transfer &&
5195                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5196                         (attr->egress &&
5197                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5198                         (attr->ingress &&
5199                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5200                         return rte_flow_error_set(error, EINVAL,
5201                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5202                                           "Flow attributes domain "
5203                                           "have a conflict with current "
5204                                           "meter domain attributes");
5205                 *def_policy = true;
5206         } else {
5207                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5208                                                 fm->policy_id, NULL);
5209                 if (!mtr_policy)
5210                         return rte_flow_error_set(error, EINVAL,
5211                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5212                                           "Invalid policy id for meter ");
5213                 if (!((attr->transfer && mtr_policy->transfer) ||
5214                         (attr->egress && mtr_policy->egress) ||
5215                         (attr->ingress && mtr_policy->ingress)))
5216                         return rte_flow_error_set(error, EINVAL,
5217                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5218                                           "Flow attributes domain "
5219                                           "have a conflict with current "
5220                                           "meter domain attributes");
5221                 if (attr->transfer && mtr_policy->dev) {
5222                         /**
5223                          * When policy has fate action of port_id,
5224                          * the flow should have the same src port as policy.
5225                          */
5226                         struct mlx5_priv *policy_port_priv =
5227                                         mtr_policy->dev->data->dev_private;
5228                         int32_t flow_src_port = priv->representor_id;
5229
5230                         if (port_id_item) {
5231                                 const struct rte_flow_item_port_id *spec =
5232                                                         port_id_item->spec;
5233                                 struct mlx5_priv *port_priv =
5234                                         mlx5_port_to_eswitch_info(spec->id,
5235                                                                   false);
5236                                 if (!port_priv)
5237                                         return rte_flow_error_set(error,
5238                                                 rte_errno,
5239                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5240                                                 spec,
5241                                                 "Failed to get port info.");
5242                                 flow_src_port = port_priv->representor_id;
5243                         }
5244                         if (flow_src_port != policy_port_priv->representor_id)
5245                                 return rte_flow_error_set(error,
5246                                                 rte_errno,
5247                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5248                                                 NULL,
5249                                                 "Flow and meter policy "
5250                                                 "have different src port.");
5251                 } else if (mtr_policy->is_rss) {
5252                         struct mlx5_flow_meter_policy *fp;
5253                         struct mlx5_meter_policy_action_container *acg;
5254                         struct mlx5_meter_policy_action_container *acy;
5255                         const struct rte_flow_action *rss_act;
5256                         int ret;
5257
5258                         fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5259                                                                 mtr_policy);
5260                         if (fp == NULL)
5261                                 return rte_flow_error_set(error, EINVAL,
5262                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5263                                                   "Unable to get the final "
5264                                                   "policy in the hierarchy");
5265                         acg = &fp->act_cnt[RTE_COLOR_GREEN];
5266                         acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5267                         MLX5_ASSERT(acg->fate_action ==
5268                                     MLX5_FLOW_FATE_SHARED_RSS ||
5269                                     acy->fate_action ==
5270                                     MLX5_FLOW_FATE_SHARED_RSS);
5271                         if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5272                                 rss_act = acg->rss;
5273                         else
5274                                 rss_act = acy->rss;
5275                         ret = mlx5_flow_validate_action_rss(rss_act,
5276                                         action_flags, dev, attr,
5277                                         item_flags, error);
5278                         if (ret)
5279                                 return ret;
5280                 }
5281                 *def_policy = false;
5282         }
5283         return 0;
5284 }
5285
5286 /**
5287  * Validate the age action.
5288  *
5289  * @param[in] action_flags
5290  *   Holds the actions detected until now.
5291  * @param[in] action
5292  *   Pointer to the age action.
5293  * @param[in] dev
5294  *   Pointer to the Ethernet device structure.
5295  * @param[out] error
5296  *   Pointer to error structure.
5297  *
5298  * @return
5299  *   0 on success, a negative errno value otherwise and rte_errno is set.
5300  */
5301 static int
5302 flow_dv_validate_action_age(uint64_t action_flags,
5303                             const struct rte_flow_action *action,
5304                             struct rte_eth_dev *dev,
5305                             struct rte_flow_error *error)
5306 {
5307         struct mlx5_priv *priv = dev->data->dev_private;
5308         const struct rte_flow_action_age *age = action->conf;
5309
5310         if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
5311             !priv->sh->aso_age_mng))
5312                 return rte_flow_error_set(error, ENOTSUP,
5313                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5314                                           NULL,
5315                                           "age action not supported");
5316         if (!(action->conf))
5317                 return rte_flow_error_set(error, EINVAL,
5318                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5319                                           "configuration cannot be null");
5320         if (!(age->timeout))
5321                 return rte_flow_error_set(error, EINVAL,
5322                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5323                                           "invalid timeout value 0");
5324         if (action_flags & MLX5_FLOW_ACTION_AGE)
5325                 return rte_flow_error_set(error, EINVAL,
5326                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5327                                           "duplicate age actions set");
5328         return 0;
5329 }
5330
5331 /**
5332  * Validate the modify-header IPv4 DSCP actions.
5333  *
5334  * @param[in] action_flags
5335  *   Holds the actions detected until now.
5336  * @param[in] action
5337  *   Pointer to the modify action.
5338  * @param[in] item_flags
5339  *   Holds the items detected.
5340  * @param[out] error
5341  *   Pointer to error structure.
5342  *
5343  * @return
5344  *   0 on success, a negative errno value otherwise and rte_errno is set.
5345  */
5346 static int
5347 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5348                                          const struct rte_flow_action *action,
5349                                          const uint64_t item_flags,
5350                                          struct rte_flow_error *error)
5351 {
5352         int ret = 0;
5353
5354         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5355         if (!ret) {
5356                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5357                         return rte_flow_error_set(error, EINVAL,
5358                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5359                                                   NULL,
5360                                                   "no ipv4 item in pattern");
5361         }
5362         return ret;
5363 }
5364
5365 /**
5366  * Validate the modify-header IPv6 DSCP actions.
5367  *
5368  * @param[in] action_flags
5369  *   Holds the actions detected until now.
5370  * @param[in] action
5371  *   Pointer to the modify action.
5372  * @param[in] item_flags
5373  *   Holds the items detected.
5374  * @param[out] error
5375  *   Pointer to error structure.
5376  *
5377  * @return
5378  *   0 on success, a negative errno value otherwise and rte_errno is set.
5379  */
5380 static int
5381 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5382                                          const struct rte_flow_action *action,
5383                                          const uint64_t item_flags,
5384                                          struct rte_flow_error *error)
5385 {
5386         int ret = 0;
5387
5388         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5389         if (!ret) {
5390                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5391                         return rte_flow_error_set(error, EINVAL,
5392                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5393                                                   NULL,
5394                                                   "no ipv6 item in pattern");
5395         }
5396         return ret;
5397 }
5398
5399 int
5400 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5401                         struct mlx5_list_entry *entry, void *cb_ctx)
5402 {
5403         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5404         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5405         struct mlx5_flow_dv_modify_hdr_resource *resource =
5406                                   container_of(entry, typeof(*resource), entry);
5407         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5408
5409         key_len += ref->actions_num * sizeof(ref->actions[0]);
5410         return ref->actions_num != resource->actions_num ||
5411                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5412 }
5413
5414 static struct mlx5_indexed_pool *
5415 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5416 {
5417         struct mlx5_indexed_pool *ipool = __atomic_load_n
5418                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5419
5420         if (!ipool) {
5421                 struct mlx5_indexed_pool *expected = NULL;
5422                 struct mlx5_indexed_pool_config cfg =
5423                     (struct mlx5_indexed_pool_config) {
5424                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5425                                                                    (index + 1) *
5426                                            sizeof(struct mlx5_modification_cmd),
5427                        .trunk_size = 64,
5428                        .grow_trunk = 3,
5429                        .grow_shift = 2,
5430                        .need_lock = 1,
5431                        .release_mem_en = !!sh->reclaim_mode,
5432                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5433                        .malloc = mlx5_malloc,
5434                        .free = mlx5_free,
5435                        .type = "mlx5_modify_action_resource",
5436                 };
5437
5438                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5439                 ipool = mlx5_ipool_create(&cfg);
5440                 if (!ipool)
5441                         return NULL;
5442                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5443                                                  &expected, ipool, false,
5444                                                  __ATOMIC_SEQ_CST,
5445                                                  __ATOMIC_SEQ_CST)) {
5446                         mlx5_ipool_destroy(ipool);
5447                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5448                                                 __ATOMIC_SEQ_CST);
5449                 }
5450         }
5451         return ipool;
5452 }
5453
5454 struct mlx5_list_entry *
5455 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5456 {
5457         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5458         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5459         struct mlx5dv_dr_domain *ns;
5460         struct mlx5_flow_dv_modify_hdr_resource *entry;
5461         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5462         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5463                                                           ref->actions_num - 1);
5464         int ret;
5465         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5466         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5467         uint32_t idx;
5468
5469         if (unlikely(!ipool)) {
5470                 rte_flow_error_set(ctx->error, ENOMEM,
5471                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5472                                    NULL, "cannot allocate modify ipool");
5473                 return NULL;
5474         }
5475         entry = mlx5_ipool_zmalloc(ipool, &idx);
5476         if (!entry) {
5477                 rte_flow_error_set(ctx->error, ENOMEM,
5478                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5479                                    "cannot allocate resource memory");
5480                 return NULL;
5481         }
5482         rte_memcpy(&entry->ft_type,
5483                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5484                    key_len + data_len);
5485         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5486                 ns = sh->fdb_domain;
5487         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5488                 ns = sh->tx_domain;
5489         else
5490                 ns = sh->rx_domain;
5491         ret = mlx5_flow_os_create_flow_action_modify_header
5492                                         (sh->cdev->ctx, ns, entry,
5493                                          data_len, &entry->action);
5494         if (ret) {
5495                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5496                 rte_flow_error_set(ctx->error, ENOMEM,
5497                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5498                                    NULL, "cannot create modification action");
5499                 return NULL;
5500         }
5501         entry->idx = idx;
5502         return &entry->entry;
5503 }
5504
5505 struct mlx5_list_entry *
5506 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5507                         void *cb_ctx)
5508 {
5509         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5510         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5511         struct mlx5_flow_dv_modify_hdr_resource *entry;
5512         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5513         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5514         uint32_t idx;
5515
5516         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5517                                   &idx);
5518         if (!entry) {
5519                 rte_flow_error_set(ctx->error, ENOMEM,
5520                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5521                                    "cannot allocate resource memory");
5522                 return NULL;
5523         }
5524         memcpy(entry, oentry, sizeof(*entry) + data_len);
5525         entry->idx = idx;
5526         return &entry->entry;
5527 }
5528
5529 void
5530 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5531 {
5532         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5533         struct mlx5_flow_dv_modify_hdr_resource *res =
5534                 container_of(entry, typeof(*res), entry);
5535
5536         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5537 }
5538
5539 /**
5540  * Validate the sample action.
5541  *
5542  * @param[in, out] action_flags
5543  *   Holds the actions detected until now.
5544  * @param[in] action
5545  *   Pointer to the sample action.
5546  * @param[in] dev
5547  *   Pointer to the Ethernet device structure.
5548  * @param[in] attr
5549  *   Attributes of flow that includes this action.
5550  * @param[in] item_flags
5551  *   Holds the items detected.
5552  * @param[in] rss
5553  *   Pointer to the RSS action.
5554  * @param[out] sample_rss
5555  *   Pointer to the RSS action in sample action list.
5556  * @param[out] count
5557  *   Pointer to the COUNT action in sample action list.
5558  * @param[out] fdb_mirror_limit
5559  *   Pointer to the FDB mirror limitation flag.
5560  * @param[out] error
5561  *   Pointer to error structure.
5562  *
5563  * @return
5564  *   0 on success, a negative errno value otherwise and rte_errno is set.
5565  */
5566 static int
5567 flow_dv_validate_action_sample(uint64_t *action_flags,
5568                                const struct rte_flow_action *action,
5569                                struct rte_eth_dev *dev,
5570                                const struct rte_flow_attr *attr,
5571                                uint64_t item_flags,
5572                                const struct rte_flow_action_rss *rss,
5573                                const struct rte_flow_action_rss **sample_rss,
5574                                const struct rte_flow_action_count **count,
5575                                int *fdb_mirror_limit,
5576                                struct rte_flow_error *error)
5577 {
5578         struct mlx5_priv *priv = dev->data->dev_private;
5579         struct mlx5_dev_config *dev_conf = &priv->config;
5580         const struct rte_flow_action_sample *sample = action->conf;
5581         const struct rte_flow_action *act;
5582         uint64_t sub_action_flags = 0;
5583         uint16_t queue_index = 0xFFFF;
5584         int actions_n = 0;
5585         int ret;
5586
5587         if (!sample)
5588                 return rte_flow_error_set(error, EINVAL,
5589                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5590                                           "configuration cannot be NULL");
5591         if (sample->ratio == 0)
5592                 return rte_flow_error_set(error, EINVAL,
5593                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5594                                           "ratio value starts from 1");
5595         if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
5596                 return rte_flow_error_set(error, ENOTSUP,
5597                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5598                                           NULL,
5599                                           "sample action not supported");
5600         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5601                 return rte_flow_error_set(error, EINVAL,
5602                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5603                                           "Multiple sample actions not "
5604                                           "supported");
5605         if (*action_flags & MLX5_FLOW_ACTION_METER)
5606                 return rte_flow_error_set(error, EINVAL,
5607                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5608                                           "wrong action order, meter should "
5609                                           "be after sample action");
5610         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5611                 return rte_flow_error_set(error, EINVAL,
5612                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5613                                           "wrong action order, jump should "
5614                                           "be after sample action");
5615         if (*action_flags & MLX5_FLOW_ACTION_CT)
5616                 return rte_flow_error_set(error, EINVAL,
5617                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5618                                           "Sample after CT not supported");
5619         act = sample->actions;
5620         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5621                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5622                         return rte_flow_error_set(error, ENOTSUP,
5623                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5624                                                   act, "too many actions");
5625                 switch (act->type) {
5626                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5627                         ret = mlx5_flow_validate_action_queue(act,
5628                                                               sub_action_flags,
5629                                                               dev,
5630                                                               attr, error);
5631                         if (ret < 0)
5632                                 return ret;
5633                         queue_index = ((const struct rte_flow_action_queue *)
5634                                                         (act->conf))->index;
5635                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5636                         ++actions_n;
5637                         break;
5638                 case RTE_FLOW_ACTION_TYPE_RSS:
5639                         *sample_rss = act->conf;
5640                         ret = mlx5_flow_validate_action_rss(act,
5641                                                             sub_action_flags,
5642                                                             dev, attr,
5643                                                             item_flags,
5644                                                             error);
5645                         if (ret < 0)
5646                                 return ret;
5647                         if (rss && *sample_rss &&
5648                             ((*sample_rss)->level != rss->level ||
5649                             (*sample_rss)->types != rss->types))
5650                                 return rte_flow_error_set(error, ENOTSUP,
5651                                         RTE_FLOW_ERROR_TYPE_ACTION,
5652                                         NULL,
5653                                         "Can't use the different RSS types "
5654                                         "or level in the same flow");
5655                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5656                                 queue_index = (*sample_rss)->queue[0];
5657                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5658                         ++actions_n;
5659                         break;
5660                 case RTE_FLOW_ACTION_TYPE_MARK:
5661                         ret = flow_dv_validate_action_mark(dev, act,
5662                                                            sub_action_flags,
5663                                                            attr, error);
5664                         if (ret < 0)
5665                                 return ret;
5666                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5667                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5668                                                 MLX5_FLOW_ACTION_MARK_EXT;
5669                         else
5670                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5671                         ++actions_n;
5672                         break;
5673                 case RTE_FLOW_ACTION_TYPE_COUNT:
5674                         ret = flow_dv_validate_action_count
5675                                 (dev, false, *action_flags | sub_action_flags,
5676                                  error);
5677                         if (ret < 0)
5678                                 return ret;
5679                         *count = act->conf;
5680                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5681                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5682                         ++actions_n;
5683                         break;
5684                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5685                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5686                         ret = flow_dv_validate_action_port_id(dev,
5687                                                               sub_action_flags,
5688                                                               act,
5689                                                               attr,
5690                                                               error);
5691                         if (ret)
5692                                 return ret;
5693                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5694                         ++actions_n;
5695                         break;
5696                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5697                         ret = flow_dv_validate_action_raw_encap_decap
5698                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5699                                  &actions_n, action, item_flags, error);
5700                         if (ret < 0)
5701                                 return ret;
5702                         ++actions_n;
5703                         break;
5704                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5705                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5706                         ret = flow_dv_validate_action_l2_encap(dev,
5707                                                                sub_action_flags,
5708                                                                act, attr,
5709                                                                error);
5710                         if (ret < 0)
5711                                 return ret;
5712                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5713                         ++actions_n;
5714                         break;
5715                 default:
5716                         return rte_flow_error_set(error, ENOTSUP,
5717                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5718                                                   NULL,
5719                                                   "Doesn't support optional "
5720                                                   "action");
5721                 }
5722         }
5723         if (attr->ingress && !attr->transfer) {
5724                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5725                                           MLX5_FLOW_ACTION_RSS)))
5726                         return rte_flow_error_set(error, EINVAL,
5727                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5728                                                   NULL,
5729                                                   "Ingress must has a dest "
5730                                                   "QUEUE for Sample");
5731         } else if (attr->egress && !attr->transfer) {
5732                 return rte_flow_error_set(error, ENOTSUP,
5733                                           RTE_FLOW_ERROR_TYPE_ACTION,
5734                                           NULL,
5735                                           "Sample Only support Ingress "
5736                                           "or E-Switch");
5737         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5738                 MLX5_ASSERT(attr->transfer);
5739                 if (sample->ratio > 1)
5740                         return rte_flow_error_set(error, ENOTSUP,
5741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5742                                                   NULL,
5743                                                   "E-Switch doesn't support "
5744                                                   "any optional action "
5745                                                   "for sampling");
5746                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5747                         return rte_flow_error_set(error, ENOTSUP,
5748                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5749                                                   NULL,
5750                                                   "unsupported action QUEUE");
5751                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5752                         return rte_flow_error_set(error, ENOTSUP,
5753                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5754                                                   NULL,
5755                                                   "unsupported action QUEUE");
5756                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5757                         return rte_flow_error_set(error, EINVAL,
5758                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5759                                                   NULL,
5760                                                   "E-Switch must has a dest "
5761                                                   "port for mirroring");
5762                 if (!priv->config.hca_attr.reg_c_preserve &&
5763                      priv->representor_id != UINT16_MAX)
5764                         *fdb_mirror_limit = 1;
5765         }
5766         /* Continue validation for Xcap actions.*/
5767         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5768             (queue_index == 0xFFFF ||
5769              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5770                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5771                      MLX5_FLOW_XCAP_ACTIONS)
5772                         return rte_flow_error_set(error, ENOTSUP,
5773                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5774                                                   NULL, "encap and decap "
5775                                                   "combination aren't "
5776                                                   "supported");
5777                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5778                                                         MLX5_FLOW_ACTION_ENCAP))
5779                         return rte_flow_error_set(error, ENOTSUP,
5780                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5781                                                   NULL, "encap is not supported"
5782                                                   " for ingress traffic");
5783         }
5784         return 0;
5785 }
5786
5787 /**
5788  * Find existing modify-header resource or create and register a new one.
5789  *
5790  * @param dev[in, out]
5791  *   Pointer to rte_eth_dev structure.
5792  * @param[in, out] resource
5793  *   Pointer to modify-header resource.
5794  * @parm[in, out] dev_flow
5795  *   Pointer to the dev_flow.
5796  * @param[out] error
5797  *   pointer to error structure.
5798  *
5799  * @return
5800  *   0 on success otherwise -errno and errno is set.
5801  */
5802 static int
5803 flow_dv_modify_hdr_resource_register
5804                         (struct rte_eth_dev *dev,
5805                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5806                          struct mlx5_flow *dev_flow,
5807                          struct rte_flow_error *error)
5808 {
5809         struct mlx5_priv *priv = dev->data->dev_private;
5810         struct mlx5_dev_ctx_shared *sh = priv->sh;
5811         uint32_t key_len = sizeof(*resource) -
5812                            offsetof(typeof(*resource), ft_type) +
5813                            resource->actions_num * sizeof(resource->actions[0]);
5814         struct mlx5_list_entry *entry;
5815         struct mlx5_flow_cb_ctx ctx = {
5816                 .error = error,
5817                 .data = resource,
5818         };
5819         struct mlx5_hlist *modify_cmds;
5820         uint64_t key64;
5821
5822         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5823                                 "hdr_modify",
5824                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5825                                 true, false, sh,
5826                                 flow_dv_modify_create_cb,
5827                                 flow_dv_modify_match_cb,
5828                                 flow_dv_modify_remove_cb,
5829                                 flow_dv_modify_clone_cb,
5830                                 flow_dv_modify_clone_free_cb);
5831         if (unlikely(!modify_cmds))
5832                 return -rte_errno;
5833         resource->root = !dev_flow->dv.group;
5834         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5835                                                                 resource->root))
5836                 return rte_flow_error_set(error, EOVERFLOW,
5837                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5838                                           "too many modify header items");
5839         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5840         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5841         if (!entry)
5842                 return -rte_errno;
5843         resource = container_of(entry, typeof(*resource), entry);
5844         dev_flow->handle->dvh.modify_hdr = resource;
5845         return 0;
5846 }
5847
5848 /**
5849  * Get DV flow counter by index.
5850  *
5851  * @param[in] dev
5852  *   Pointer to the Ethernet device structure.
5853  * @param[in] idx
5854  *   mlx5 flow counter index in the container.
5855  * @param[out] ppool
5856  *   mlx5 flow counter pool in the container.
5857  *
5858  * @return
5859  *   Pointer to the counter, NULL otherwise.
5860  */
5861 static struct mlx5_flow_counter *
5862 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5863                            uint32_t idx,
5864                            struct mlx5_flow_counter_pool **ppool)
5865 {
5866         struct mlx5_priv *priv = dev->data->dev_private;
5867         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5868         struct mlx5_flow_counter_pool *pool;
5869
5870         /* Decrease to original index and clear shared bit. */
5871         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5872         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5873         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5874         MLX5_ASSERT(pool);
5875         if (ppool)
5876                 *ppool = pool;
5877         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5878 }
5879
5880 /**
5881  * Check the devx counter belongs to the pool.
5882  *
5883  * @param[in] pool
5884  *   Pointer to the counter pool.
5885  * @param[in] id
5886  *   The counter devx ID.
5887  *
5888  * @return
5889  *   True if counter belongs to the pool, false otherwise.
5890  */
5891 static bool
5892 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5893 {
5894         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5895                    MLX5_COUNTERS_PER_POOL;
5896
5897         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5898                 return true;
5899         return false;
5900 }
5901
5902 /**
5903  * Get a pool by devx counter ID.
5904  *
5905  * @param[in] cmng
5906  *   Pointer to the counter management.
5907  * @param[in] id
5908  *   The counter devx ID.
5909  *
5910  * @return
5911  *   The counter pool pointer if exists, NULL otherwise,
5912  */
5913 static struct mlx5_flow_counter_pool *
5914 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5915 {
5916         uint32_t i;
5917         struct mlx5_flow_counter_pool *pool = NULL;
5918
5919         rte_spinlock_lock(&cmng->pool_update_sl);
5920         /* Check last used pool. */
5921         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5922             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5923                 pool = cmng->pools[cmng->last_pool_idx];
5924                 goto out;
5925         }
5926         /* ID out of range means no suitable pool in the container. */
5927         if (id > cmng->max_id || id < cmng->min_id)
5928                 goto out;
5929         /*
5930          * Find the pool from the end of the container, since mostly counter
5931          * ID is sequence increasing, and the last pool should be the needed
5932          * one.
5933          */
5934         i = cmng->n_valid;
5935         while (i--) {
5936                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5937
5938                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5939                         pool = pool_tmp;
5940                         break;
5941                 }
5942         }
5943 out:
5944         rte_spinlock_unlock(&cmng->pool_update_sl);
5945         return pool;
5946 }
5947
5948 /**
5949  * Resize a counter container.
5950  *
5951  * @param[in] dev
5952  *   Pointer to the Ethernet device structure.
5953  *
5954  * @return
5955  *   0 on success, otherwise negative errno value and rte_errno is set.
5956  */
5957 static int
5958 flow_dv_container_resize(struct rte_eth_dev *dev)
5959 {
5960         struct mlx5_priv *priv = dev->data->dev_private;
5961         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5962         void *old_pools = cmng->pools;
5963         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5964         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5965         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5966
5967         if (!pools) {
5968                 rte_errno = ENOMEM;
5969                 return -ENOMEM;
5970         }
5971         if (old_pools)
5972                 memcpy(pools, old_pools, cmng->n *
5973                                        sizeof(struct mlx5_flow_counter_pool *));
5974         cmng->n = resize;
5975         cmng->pools = pools;
5976         if (old_pools)
5977                 mlx5_free(old_pools);
5978         return 0;
5979 }
5980
5981 /**
5982  * Query a devx flow counter.
5983  *
5984  * @param[in] dev
5985  *   Pointer to the Ethernet device structure.
5986  * @param[in] counter
5987  *   Index to the flow counter.
5988  * @param[out] pkts
5989  *   The statistics value of packets.
5990  * @param[out] bytes
5991  *   The statistics value of bytes.
5992  *
5993  * @return
5994  *   0 on success, otherwise a negative errno value and rte_errno is set.
5995  */
5996 static inline int
5997 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5998                      uint64_t *bytes)
5999 {
6000         struct mlx5_priv *priv = dev->data->dev_private;
6001         struct mlx5_flow_counter_pool *pool = NULL;
6002         struct mlx5_flow_counter *cnt;
6003         int offset;
6004
6005         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6006         MLX5_ASSERT(pool);
6007         if (priv->sh->cmng.counter_fallback)
6008                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6009                                         0, pkts, bytes, 0, NULL, NULL, 0);
6010         rte_spinlock_lock(&pool->sl);
6011         if (!pool->raw) {
6012                 *pkts = 0;
6013                 *bytes = 0;
6014         } else {
6015                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6016                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6017                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6018         }
6019         rte_spinlock_unlock(&pool->sl);
6020         return 0;
6021 }
6022
6023 /**
6024  * Create and initialize a new counter pool.
6025  *
6026  * @param[in] dev
6027  *   Pointer to the Ethernet device structure.
6028  * @param[out] dcs
6029  *   The devX counter handle.
6030  * @param[in] age
6031  *   Whether the pool is for counter that was allocated for aging.
6032  * @param[in/out] cont_cur
6033  *   Pointer to the container pointer, it will be update in pool resize.
6034  *
6035  * @return
6036  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6037  */
6038 static struct mlx5_flow_counter_pool *
6039 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6040                     uint32_t age)
6041 {
6042         struct mlx5_priv *priv = dev->data->dev_private;
6043         struct mlx5_flow_counter_pool *pool;
6044         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6045         bool fallback = priv->sh->cmng.counter_fallback;
6046         uint32_t size = sizeof(*pool);
6047
6048         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6049         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6050         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6051         if (!pool) {
6052                 rte_errno = ENOMEM;
6053                 return NULL;
6054         }
6055         pool->raw = NULL;
6056         pool->is_aged = !!age;
6057         pool->query_gen = 0;
6058         pool->min_dcs = dcs;
6059         rte_spinlock_init(&pool->sl);
6060         rte_spinlock_init(&pool->csl);
6061         TAILQ_INIT(&pool->counters[0]);
6062         TAILQ_INIT(&pool->counters[1]);
6063         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6064         rte_spinlock_lock(&cmng->pool_update_sl);
6065         pool->index = cmng->n_valid;
6066         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6067                 mlx5_free(pool);
6068                 rte_spinlock_unlock(&cmng->pool_update_sl);
6069                 return NULL;
6070         }
6071         cmng->pools[pool->index] = pool;
6072         cmng->n_valid++;
6073         if (unlikely(fallback)) {
6074                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6075
6076                 if (base < cmng->min_id)
6077                         cmng->min_id = base;
6078                 if (base > cmng->max_id)
6079                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6080                 cmng->last_pool_idx = pool->index;
6081         }
6082         rte_spinlock_unlock(&cmng->pool_update_sl);
6083         return pool;
6084 }
6085
6086 /**
6087  * Prepare a new counter and/or a new counter pool.
6088  *
6089  * @param[in] dev
6090  *   Pointer to the Ethernet device structure.
6091  * @param[out] cnt_free
6092  *   Where to put the pointer of a new counter.
6093  * @param[in] age
6094  *   Whether the pool is for counter that was allocated for aging.
6095  *
6096  * @return
6097  *   The counter pool pointer and @p cnt_free is set on success,
6098  *   NULL otherwise and rte_errno is set.
6099  */
6100 static struct mlx5_flow_counter_pool *
6101 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6102                              struct mlx5_flow_counter **cnt_free,
6103                              uint32_t age)
6104 {
6105         struct mlx5_priv *priv = dev->data->dev_private;
6106         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6107         struct mlx5_flow_counter_pool *pool;
6108         struct mlx5_counters tmp_tq;
6109         struct mlx5_devx_obj *dcs = NULL;
6110         struct mlx5_flow_counter *cnt;
6111         enum mlx5_counter_type cnt_type =
6112                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6113         bool fallback = priv->sh->cmng.counter_fallback;
6114         uint32_t i;
6115
6116         if (fallback) {
6117                 /* bulk_bitmap must be 0 for single counter allocation. */
6118                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6119                 if (!dcs)
6120                         return NULL;
6121                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6122                 if (!pool) {
6123                         pool = flow_dv_pool_create(dev, dcs, age);
6124                         if (!pool) {
6125                                 mlx5_devx_cmd_destroy(dcs);
6126                                 return NULL;
6127                         }
6128                 }
6129                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6130                 cnt = MLX5_POOL_GET_CNT(pool, i);
6131                 cnt->pool = pool;
6132                 cnt->dcs_when_free = dcs;
6133                 *cnt_free = cnt;
6134                 return pool;
6135         }
6136         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6137         if (!dcs) {
6138                 rte_errno = ENODATA;
6139                 return NULL;
6140         }
6141         pool = flow_dv_pool_create(dev, dcs, age);
6142         if (!pool) {
6143                 mlx5_devx_cmd_destroy(dcs);
6144                 return NULL;
6145         }
6146         TAILQ_INIT(&tmp_tq);
6147         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6148                 cnt = MLX5_POOL_GET_CNT(pool, i);
6149                 cnt->pool = pool;
6150                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6151         }
6152         rte_spinlock_lock(&cmng->csl[cnt_type]);
6153         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6154         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6155         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6156         (*cnt_free)->pool = pool;
6157         return pool;
6158 }
6159
6160 /**
6161  * Allocate a flow counter.
6162  *
6163  * @param[in] dev
6164  *   Pointer to the Ethernet device structure.
6165  * @param[in] age
6166  *   Whether the counter was allocated for aging.
6167  *
6168  * @return
6169  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6170  */
6171 static uint32_t
6172 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6173 {
6174         struct mlx5_priv *priv = dev->data->dev_private;
6175         struct mlx5_flow_counter_pool *pool = NULL;
6176         struct mlx5_flow_counter *cnt_free = NULL;
6177         bool fallback = priv->sh->cmng.counter_fallback;
6178         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6179         enum mlx5_counter_type cnt_type =
6180                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6181         uint32_t cnt_idx;
6182
6183         if (!priv->sh->devx) {
6184                 rte_errno = ENOTSUP;
6185                 return 0;
6186         }
6187         /* Get free counters from container. */
6188         rte_spinlock_lock(&cmng->csl[cnt_type]);
6189         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6190         if (cnt_free)
6191                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6192         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6193         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6194                 goto err;
6195         pool = cnt_free->pool;
6196         if (fallback)
6197                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6198         /* Create a DV counter action only in the first time usage. */
6199         if (!cnt_free->action) {
6200                 uint16_t offset;
6201                 struct mlx5_devx_obj *dcs;
6202                 int ret;
6203
6204                 if (!fallback) {
6205                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6206                         dcs = pool->min_dcs;
6207                 } else {
6208                         offset = 0;
6209                         dcs = cnt_free->dcs_when_free;
6210                 }
6211                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6212                                                             &cnt_free->action);
6213                 if (ret) {
6214                         rte_errno = errno;
6215                         goto err;
6216                 }
6217         }
6218         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6219                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6220         /* Update the counter reset values. */
6221         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6222                                  &cnt_free->bytes))
6223                 goto err;
6224         if (!fallback && !priv->sh->cmng.query_thread_on)
6225                 /* Start the asynchronous batch query by the host thread. */
6226                 mlx5_set_query_alarm(priv->sh);
6227         /*
6228          * When the count action isn't shared (by ID), shared_info field is
6229          * used for indirect action API's refcnt.
6230          * When the counter action is not shared neither by ID nor by indirect
6231          * action API, shared info must be 1.
6232          */
6233         cnt_free->shared_info.refcnt = 1;
6234         return cnt_idx;
6235 err:
6236         if (cnt_free) {
6237                 cnt_free->pool = pool;
6238                 if (fallback)
6239                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6240                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6241                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6242                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6243         }
6244         return 0;
6245 }
6246
6247 /**
6248  * Get age param from counter index.
6249  *
6250  * @param[in] dev
6251  *   Pointer to the Ethernet device structure.
6252  * @param[in] counter
6253  *   Index to the counter handler.
6254  *
6255  * @return
6256  *   The aging parameter specified for the counter index.
6257  */
6258 static struct mlx5_age_param*
6259 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6260                                 uint32_t counter)
6261 {
6262         struct mlx5_flow_counter *cnt;
6263         struct mlx5_flow_counter_pool *pool = NULL;
6264
6265         flow_dv_counter_get_by_idx(dev, counter, &pool);
6266         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6267         cnt = MLX5_POOL_GET_CNT(pool, counter);
6268         return MLX5_CNT_TO_AGE(cnt);
6269 }
6270
6271 /**
6272  * Remove a flow counter from aged counter list.
6273  *
6274  * @param[in] dev
6275  *   Pointer to the Ethernet device structure.
6276  * @param[in] counter
6277  *   Index to the counter handler.
6278  * @param[in] cnt
6279  *   Pointer to the counter handler.
6280  */
6281 static void
6282 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6283                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6284 {
6285         struct mlx5_age_info *age_info;
6286         struct mlx5_age_param *age_param;
6287         struct mlx5_priv *priv = dev->data->dev_private;
6288         uint16_t expected = AGE_CANDIDATE;
6289
6290         age_info = GET_PORT_AGE_INFO(priv);
6291         age_param = flow_dv_counter_idx_get_age(dev, counter);
6292         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6293                                          AGE_FREE, false, __ATOMIC_RELAXED,
6294                                          __ATOMIC_RELAXED)) {
6295                 /**
6296                  * We need the lock even it is age timeout,
6297                  * since counter may still in process.
6298                  */
6299                 rte_spinlock_lock(&age_info->aged_sl);
6300                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6301                 rte_spinlock_unlock(&age_info->aged_sl);
6302                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6303         }
6304 }
6305
6306 /**
6307  * Release a flow counter.
6308  *
6309  * @param[in] dev
6310  *   Pointer to the Ethernet device structure.
6311  * @param[in] counter
6312  *   Index to the counter handler.
6313  */
6314 static void
6315 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6316 {
6317         struct mlx5_priv *priv = dev->data->dev_private;
6318         struct mlx5_flow_counter_pool *pool = NULL;
6319         struct mlx5_flow_counter *cnt;
6320         enum mlx5_counter_type cnt_type;
6321
6322         if (!counter)
6323                 return;
6324         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6325         MLX5_ASSERT(pool);
6326         if (pool->is_aged) {
6327                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6328         } else {
6329                 /*
6330                  * If the counter action is shared by indirect action API,
6331                  * the atomic function reduces its references counter.
6332                  * If after the reduction the action is still referenced, the
6333                  * function returns here and does not release it.
6334                  * When the counter action is not shared by
6335                  * indirect action API, shared info is 1 before the reduction,
6336                  * so this condition is failed and function doesn't return here.
6337                  */
6338                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6339                                        __ATOMIC_RELAXED))
6340                         return;
6341         }
6342         cnt->pool = pool;
6343         /*
6344          * Put the counter back to list to be updated in none fallback mode.
6345          * Currently, we are using two list alternately, while one is in query,
6346          * add the freed counter to the other list based on the pool query_gen
6347          * value. After query finishes, add counter the list to the global
6348          * container counter list. The list changes while query starts. In
6349          * this case, lock will not be needed as query callback and release
6350          * function both operate with the different list.
6351          */
6352         if (!priv->sh->cmng.counter_fallback) {
6353                 rte_spinlock_lock(&pool->csl);
6354                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6355                 rte_spinlock_unlock(&pool->csl);
6356         } else {
6357                 cnt->dcs_when_free = cnt->dcs_when_active;
6358                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6359                                            MLX5_COUNTER_TYPE_ORIGIN;
6360                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6361                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6362                                   cnt, next);
6363                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6364         }
6365 }
6366
6367 /**
6368  * Resize a meter id container.
6369  *
6370  * @param[in] dev
6371  *   Pointer to the Ethernet device structure.
6372  *
6373  * @return
6374  *   0 on success, otherwise negative errno value and rte_errno is set.
6375  */
6376 static int
6377 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6378 {
6379         struct mlx5_priv *priv = dev->data->dev_private;
6380         struct mlx5_aso_mtr_pools_mng *pools_mng =
6381                                 &priv->sh->mtrmng->pools_mng;
6382         void *old_pools = pools_mng->pools;
6383         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6384         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6385         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6386
6387         if (!pools) {
6388                 rte_errno = ENOMEM;
6389                 return -ENOMEM;
6390         }
6391         if (!pools_mng->n)
6392                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6393                         mlx5_free(pools);
6394                         return -ENOMEM;
6395                 }
6396         if (old_pools)
6397                 memcpy(pools, old_pools, pools_mng->n *
6398                                        sizeof(struct mlx5_aso_mtr_pool *));
6399         pools_mng->n = resize;
6400         pools_mng->pools = pools;
6401         if (old_pools)
6402                 mlx5_free(old_pools);
6403         return 0;
6404 }
6405
6406 /**
6407  * Prepare a new meter and/or a new meter pool.
6408  *
6409  * @param[in] dev
6410  *   Pointer to the Ethernet device structure.
6411  * @param[out] mtr_free
6412  *   Where to put the pointer of a new meter.g.
6413  *
6414  * @return
6415  *   The meter pool pointer and @mtr_free is set on success,
6416  *   NULL otherwise and rte_errno is set.
6417  */
6418 static struct mlx5_aso_mtr_pool *
6419 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6420 {
6421         struct mlx5_priv *priv = dev->data->dev_private;
6422         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6423         struct mlx5_aso_mtr_pool *pool = NULL;
6424         struct mlx5_devx_obj *dcs = NULL;
6425         uint32_t i;
6426         uint32_t log_obj_size;
6427
6428         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6429         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6430                                                       priv->sh->cdev->pdn,
6431                                                       log_obj_size);
6432         if (!dcs) {
6433                 rte_errno = ENODATA;
6434                 return NULL;
6435         }
6436         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6437         if (!pool) {
6438                 rte_errno = ENOMEM;
6439                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6440                 return NULL;
6441         }
6442         pool->devx_obj = dcs;
6443         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6444         pool->index = pools_mng->n_valid;
6445         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6446                 mlx5_free(pool);
6447                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6448                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6449                 return NULL;
6450         }
6451         pools_mng->pools[pool->index] = pool;
6452         pools_mng->n_valid++;
6453         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6454         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6455                 pool->mtrs[i].offset = i;
6456                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6457         }
6458         pool->mtrs[0].offset = 0;
6459         *mtr_free = &pool->mtrs[0];
6460         return pool;
6461 }
6462
6463 /**
6464  * Release a flow meter into pool.
6465  *
6466  * @param[in] dev
6467  *   Pointer to the Ethernet device structure.
6468  * @param[in] mtr_idx
6469  *   Index to aso flow meter.
6470  */
6471 static void
6472 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6473 {
6474         struct mlx5_priv *priv = dev->data->dev_private;
6475         struct mlx5_aso_mtr_pools_mng *pools_mng =
6476                                 &priv->sh->mtrmng->pools_mng;
6477         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6478
6479         MLX5_ASSERT(aso_mtr);
6480         rte_spinlock_lock(&pools_mng->mtrsl);
6481         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6482         aso_mtr->state = ASO_METER_FREE;
6483         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6484         rte_spinlock_unlock(&pools_mng->mtrsl);
6485 }
6486
6487 /**
6488  * Allocate a aso flow meter.
6489  *
6490  * @param[in] dev
6491  *   Pointer to the Ethernet device structure.
6492  *
6493  * @return
6494  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6495  */
6496 static uint32_t
6497 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6498 {
6499         struct mlx5_priv *priv = dev->data->dev_private;
6500         struct mlx5_aso_mtr *mtr_free = NULL;
6501         struct mlx5_aso_mtr_pools_mng *pools_mng =
6502                                 &priv->sh->mtrmng->pools_mng;
6503         struct mlx5_aso_mtr_pool *pool;
6504         uint32_t mtr_idx = 0;
6505
6506         if (!priv->sh->devx) {
6507                 rte_errno = ENOTSUP;
6508                 return 0;
6509         }
6510         /* Allocate the flow meter memory. */
6511         /* Get free meters from management. */
6512         rte_spinlock_lock(&pools_mng->mtrsl);
6513         mtr_free = LIST_FIRST(&pools_mng->meters);
6514         if (mtr_free)
6515                 LIST_REMOVE(mtr_free, next);
6516         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6517                 rte_spinlock_unlock(&pools_mng->mtrsl);
6518                 return 0;
6519         }
6520         mtr_free->state = ASO_METER_WAIT;
6521         rte_spinlock_unlock(&pools_mng->mtrsl);
6522         pool = container_of(mtr_free,
6523                         struct mlx5_aso_mtr_pool,
6524                         mtrs[mtr_free->offset]);
6525         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6526         if (!mtr_free->fm.meter_action) {
6527 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6528                 struct rte_flow_error error;
6529                 uint8_t reg_id;
6530
6531                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6532                 mtr_free->fm.meter_action =
6533                         mlx5_glue->dv_create_flow_action_aso
6534                                                 (priv->sh->rx_domain,
6535                                                  pool->devx_obj->obj,
6536                                                  mtr_free->offset,
6537                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6538                                                  reg_id - REG_C_0);
6539 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6540                 if (!mtr_free->fm.meter_action) {
6541                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6542                         return 0;
6543                 }
6544         }
6545         return mtr_idx;
6546 }
6547
6548 /**
6549  * Verify the @p attributes will be correctly understood by the NIC and store
6550  * them in the @p flow if everything is correct.
6551  *
6552  * @param[in] dev
6553  *   Pointer to dev struct.
6554  * @param[in] attributes
6555  *   Pointer to flow attributes
6556  * @param[in] external
6557  *   This flow rule is created by request external to PMD.
6558  * @param[out] error
6559  *   Pointer to error structure.
6560  *
6561  * @return
6562  *   - 0 on success and non root table.
6563  *   - 1 on success and root table.
6564  *   - a negative errno value otherwise and rte_errno is set.
6565  */
6566 static int
6567 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6568                             const struct mlx5_flow_tunnel *tunnel,
6569                             const struct rte_flow_attr *attributes,
6570                             const struct flow_grp_info *grp_info,
6571                             struct rte_flow_error *error)
6572 {
6573         struct mlx5_priv *priv = dev->data->dev_private;
6574         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6575         int ret = 0;
6576
6577 #ifndef HAVE_MLX5DV_DR
6578         RTE_SET_USED(tunnel);
6579         RTE_SET_USED(grp_info);
6580         if (attributes->group)
6581                 return rte_flow_error_set(error, ENOTSUP,
6582                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6583                                           NULL,
6584                                           "groups are not supported");
6585 #else
6586         uint32_t table = 0;
6587
6588         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6589                                        grp_info, error);
6590         if (ret)
6591                 return ret;
6592         if (!table)
6593                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6594 #endif
6595         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6596             attributes->priority > lowest_priority)
6597                 return rte_flow_error_set(error, ENOTSUP,
6598                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6599                                           NULL,
6600                                           "priority out of range");
6601         if (attributes->transfer) {
6602                 if (!priv->config.dv_esw_en)
6603                         return rte_flow_error_set
6604                                 (error, ENOTSUP,
6605                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6606                                  "E-Switch dr is not supported");
6607                 if (!(priv->representor || priv->master))
6608                         return rte_flow_error_set
6609                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6610                                  NULL, "E-Switch configuration can only be"
6611                                  " done by a master or a representor device");
6612                 if (attributes->egress)
6613                         return rte_flow_error_set
6614                                 (error, ENOTSUP,
6615                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6616                                  "egress is not supported");
6617         }
6618         if (!(attributes->egress ^ attributes->ingress))
6619                 return rte_flow_error_set(error, ENOTSUP,
6620                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6621                                           "must specify exactly one of "
6622                                           "ingress or egress");
6623         return ret;
6624 }
6625
6626 static int
6627 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6628                         int64_t pattern_flags, uint64_t l3_flags,
6629                         uint64_t l4_flags, uint64_t ip4_flag,
6630                         struct rte_flow_error *error)
6631 {
6632         if (mask->l3_ok && !(pattern_flags & l3_flags))
6633                 return rte_flow_error_set(error, EINVAL,
6634                                           RTE_FLOW_ERROR_TYPE_ITEM,
6635                                           NULL, "missing L3 protocol");
6636
6637         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6638                 return rte_flow_error_set(error, EINVAL,
6639                                           RTE_FLOW_ERROR_TYPE_ITEM,
6640                                           NULL, "missing IPv4 protocol");
6641
6642         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6643                 return rte_flow_error_set(error, EINVAL,
6644                                           RTE_FLOW_ERROR_TYPE_ITEM,
6645                                           NULL, "missing L4 protocol");
6646
6647         return 0;
6648 }
6649
6650 static int
6651 flow_dv_validate_item_integrity_post(const struct
6652                                      rte_flow_item *integrity_items[2],
6653                                      int64_t pattern_flags,
6654                                      struct rte_flow_error *error)
6655 {
6656         const struct rte_flow_item_integrity *mask;
6657         int ret;
6658
6659         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6660                 mask = (typeof(mask))integrity_items[0]->mask;
6661                 ret = validate_integrity_bits(mask, pattern_flags,
6662                                               MLX5_FLOW_LAYER_OUTER_L3,
6663                                               MLX5_FLOW_LAYER_OUTER_L4,
6664                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6665                                               error);
6666                 if (ret)
6667                         return ret;
6668         }
6669         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6670                 mask = (typeof(mask))integrity_items[1]->mask;
6671                 ret = validate_integrity_bits(mask, pattern_flags,
6672                                               MLX5_FLOW_LAYER_INNER_L3,
6673                                               MLX5_FLOW_LAYER_INNER_L4,
6674                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6675                                               error);
6676                 if (ret)
6677                         return ret;
6678         }
6679         return 0;
6680 }
6681
6682 static int
6683 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6684                                 const struct rte_flow_item *integrity_item,
6685                                 uint64_t pattern_flags, uint64_t *last_item,
6686                                 const struct rte_flow_item *integrity_items[2],
6687                                 struct rte_flow_error *error)
6688 {
6689         struct mlx5_priv *priv = dev->data->dev_private;
6690         const struct rte_flow_item_integrity *mask = (typeof(mask))
6691                                                      integrity_item->mask;
6692         const struct rte_flow_item_integrity *spec = (typeof(spec))
6693                                                      integrity_item->spec;
6694
6695         if (!priv->config.hca_attr.pkt_integrity_match)
6696                 return rte_flow_error_set(error, ENOTSUP,
6697                                           RTE_FLOW_ERROR_TYPE_ITEM,
6698                                           integrity_item,
6699                                           "packet integrity integrity_item not supported");
6700         if (!spec)
6701                 return rte_flow_error_set(error, ENOTSUP,
6702                                           RTE_FLOW_ERROR_TYPE_ITEM,
6703                                           integrity_item,
6704                                           "no spec for integrity item");
6705         if (!mask)
6706                 mask = &rte_flow_item_integrity_mask;
6707         if (!mlx5_validate_integrity_item(mask))
6708                 return rte_flow_error_set(error, ENOTSUP,
6709                                           RTE_FLOW_ERROR_TYPE_ITEM,
6710                                           integrity_item,
6711                                           "unsupported integrity filter");
6712         if (spec->level > 1) {
6713                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6714                         return rte_flow_error_set
6715                                 (error, ENOTSUP,
6716                                  RTE_FLOW_ERROR_TYPE_ITEM,
6717                                  NULL, "multiple inner integrity items not supported");
6718                 integrity_items[1] = integrity_item;
6719                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6720         } else {
6721                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6722                         return rte_flow_error_set
6723                                 (error, ENOTSUP,
6724                                  RTE_FLOW_ERROR_TYPE_ITEM,
6725                                  NULL, "multiple outer integrity items not supported");
6726                 integrity_items[0] = integrity_item;
6727                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6728         }
6729         return 0;
6730 }
6731
6732 static int
6733 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6734                            const struct rte_flow_item *item,
6735                            uint64_t item_flags,
6736                            uint64_t *last_item,
6737                            bool is_inner,
6738                            struct rte_flow_error *error)
6739 {
6740         const struct rte_flow_item_flex *flow_spec = item->spec;
6741         const struct rte_flow_item_flex *flow_mask = item->mask;
6742         struct mlx5_flex_item *flex;
6743
6744         if (!flow_spec)
6745                 return rte_flow_error_set(error, EINVAL,
6746                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6747                                           "flex flow item spec cannot be NULL");
6748         if (!flow_mask)
6749                 return rte_flow_error_set(error, EINVAL,
6750                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6751                                           "flex flow item mask cannot be NULL");
6752         if (item->last)
6753                 return rte_flow_error_set(error, ENOTSUP,
6754                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6755                                           "flex flow item last not supported");
6756         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6757                 return rte_flow_error_set(error, EINVAL,
6758                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6759                                           "invalid flex flow item handle");
6760         flex = (struct mlx5_flex_item *)flow_spec->handle;
6761         switch (flex->tunnel_mode) {
6762         case FLEX_TUNNEL_MODE_SINGLE:
6763                 if (item_flags &
6764                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6765                         rte_flow_error_set(error, EINVAL,
6766                                            RTE_FLOW_ERROR_TYPE_ITEM,
6767                                            NULL, "multiple flex items not supported");
6768                 break;
6769         case FLEX_TUNNEL_MODE_OUTER:
6770                 if (is_inner)
6771                         rte_flow_error_set(error, EINVAL,
6772                                            RTE_FLOW_ERROR_TYPE_ITEM,
6773                                            NULL, "inner flex item was not configured");
6774                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6775                         rte_flow_error_set(error, ENOTSUP,
6776                                            RTE_FLOW_ERROR_TYPE_ITEM,
6777                                            NULL, "multiple flex items not supported");
6778                 break;
6779         case FLEX_TUNNEL_MODE_INNER:
6780                 if (!is_inner)
6781                         rte_flow_error_set(error, EINVAL,
6782                                            RTE_FLOW_ERROR_TYPE_ITEM,
6783                                            NULL, "outer flex item was not configured");
6784                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6785                         rte_flow_error_set(error, EINVAL,
6786                                            RTE_FLOW_ERROR_TYPE_ITEM,
6787                                            NULL, "multiple flex items not supported");
6788                 break;
6789         case FLEX_TUNNEL_MODE_MULTI:
6790                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6791                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6792                         rte_flow_error_set(error, EINVAL,
6793                                            RTE_FLOW_ERROR_TYPE_ITEM,
6794                                            NULL, "multiple flex items not supported");
6795                 }
6796                 break;
6797         case FLEX_TUNNEL_MODE_TUNNEL:
6798                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6799                         rte_flow_error_set(error, EINVAL,
6800                                            RTE_FLOW_ERROR_TYPE_ITEM,
6801                                            NULL, "multiple flex tunnel items not supported");
6802                 break;
6803         default:
6804                 rte_flow_error_set(error, EINVAL,
6805                                    RTE_FLOW_ERROR_TYPE_ITEM,
6806                                    NULL, "invalid flex item configuration");
6807         }
6808         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6809                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6810                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6811         return 0;
6812 }
6813
6814 /**
6815  * Internal validation function. For validating both actions and items.
6816  *
6817  * @param[in] dev
6818  *   Pointer to the rte_eth_dev structure.
6819  * @param[in] attr
6820  *   Pointer to the flow attributes.
6821  * @param[in] items
6822  *   Pointer to the list of items.
6823  * @param[in] actions
6824  *   Pointer to the list of actions.
6825  * @param[in] external
6826  *   This flow rule is created by request external to PMD.
6827  * @param[in] hairpin
6828  *   Number of hairpin TX actions, 0 means classic flow.
6829  * @param[out] error
6830  *   Pointer to the error structure.
6831  *
6832  * @return
6833  *   0 on success, a negative errno value otherwise and rte_errno is set.
6834  */
6835 static int
6836 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6837                  const struct rte_flow_item items[],
6838                  const struct rte_flow_action actions[],
6839                  bool external, int hairpin, struct rte_flow_error *error)
6840 {
6841         int ret;
6842         uint64_t action_flags = 0;
6843         uint64_t item_flags = 0;
6844         uint64_t last_item = 0;
6845         uint8_t next_protocol = 0xff;
6846         uint16_t ether_type = 0;
6847         int actions_n = 0;
6848         uint8_t item_ipv6_proto = 0;
6849         int fdb_mirror_limit = 0;
6850         int modify_after_mirror = 0;
6851         const struct rte_flow_item *geneve_item = NULL;
6852         const struct rte_flow_item *gre_item = NULL;
6853         const struct rte_flow_item *gtp_item = NULL;
6854         const struct rte_flow_action_raw_decap *decap;
6855         const struct rte_flow_action_raw_encap *encap;
6856         const struct rte_flow_action_rss *rss = NULL;
6857         const struct rte_flow_action_rss *sample_rss = NULL;
6858         const struct rte_flow_action_count *sample_count = NULL;
6859         const struct rte_flow_item_tcp nic_tcp_mask = {
6860                 .hdr = {
6861                         .tcp_flags = 0xFF,
6862                         .src_port = RTE_BE16(UINT16_MAX),
6863                         .dst_port = RTE_BE16(UINT16_MAX),
6864                 }
6865         };
6866         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6867                 .hdr = {
6868                         .src_addr =
6869                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6870                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6871                         .dst_addr =
6872                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6873                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6874                         .vtc_flow = RTE_BE32(0xffffffff),
6875                         .proto = 0xff,
6876                         .hop_limits = 0xff,
6877                 },
6878                 .has_frag_ext = 1,
6879         };
6880         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6881                 .hdr = {
6882                         .common = {
6883                                 .u32 =
6884                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6885                                         .type = 0xFF,
6886                                         }).u32),
6887                         },
6888                         .dummy[0] = 0xffffffff,
6889                 },
6890         };
6891         struct mlx5_priv *priv = dev->data->dev_private;
6892         struct mlx5_dev_config *dev_conf = &priv->config;
6893         uint16_t queue_index = 0xFFFF;
6894         const struct rte_flow_item_vlan *vlan_m = NULL;
6895         uint32_t rw_act_num = 0;
6896         uint64_t is_root;
6897         const struct mlx5_flow_tunnel *tunnel;
6898         enum mlx5_tof_rule_type tof_rule_type;
6899         struct flow_grp_info grp_info = {
6900                 .external = !!external,
6901                 .transfer = !!attr->transfer,
6902                 .fdb_def_rule = !!priv->fdb_def_rule,
6903                 .std_tbl_fix = true,
6904         };
6905         const struct rte_eth_hairpin_conf *conf;
6906         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6907         const struct rte_flow_item *port_id_item = NULL;
6908         bool def_policy = false;
6909         uint16_t udp_dport = 0;
6910
6911         if (items == NULL)
6912                 return -1;
6913         tunnel = is_tunnel_offload_active(dev) ?
6914                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6915         if (tunnel) {
6916                 if (!priv->config.dv_flow_en)
6917                         return rte_flow_error_set
6918                                 (error, ENOTSUP,
6919                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6920                                  NULL, "tunnel offload requires DV flow interface");
6921                 if (priv->representor)
6922                         return rte_flow_error_set
6923                                 (error, ENOTSUP,
6924                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6925                                  NULL, "decap not supported for VF representor");
6926                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6927                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6928                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6929                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6930                                         MLX5_FLOW_ACTION_DECAP;
6931                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6932                                         (dev, attr, tunnel, tof_rule_type);
6933         }
6934         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6935         if (ret < 0)
6936                 return ret;
6937         is_root = (uint64_t)ret;
6938         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6939                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6940                 int type = items->type;
6941
6942                 if (!mlx5_flow_os_item_supported(type))
6943                         return rte_flow_error_set(error, ENOTSUP,
6944                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6945                                                   NULL, "item not supported");
6946                 switch (type) {
6947                 case RTE_FLOW_ITEM_TYPE_VOID:
6948                         break;
6949                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6950                         ret = flow_dv_validate_item_port_id
6951                                         (dev, items, attr, item_flags, error);
6952                         if (ret < 0)
6953                                 return ret;
6954                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6955                         port_id_item = items;
6956                         break;
6957                 case RTE_FLOW_ITEM_TYPE_ETH:
6958                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6959                                                           true, error);
6960                         if (ret < 0)
6961                                 return ret;
6962                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6963                                              MLX5_FLOW_LAYER_OUTER_L2;
6964                         if (items->mask != NULL && items->spec != NULL) {
6965                                 ether_type =
6966                                         ((const struct rte_flow_item_eth *)
6967                                          items->spec)->type;
6968                                 ether_type &=
6969                                         ((const struct rte_flow_item_eth *)
6970                                          items->mask)->type;
6971                                 ether_type = rte_be_to_cpu_16(ether_type);
6972                         } else {
6973                                 ether_type = 0;
6974                         }
6975                         break;
6976                 case RTE_FLOW_ITEM_TYPE_VLAN:
6977                         ret = flow_dv_validate_item_vlan(items, item_flags,
6978                                                          dev, error);
6979                         if (ret < 0)
6980                                 return ret;
6981                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6982                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6983                         if (items->mask != NULL && items->spec != NULL) {
6984                                 ether_type =
6985                                         ((const struct rte_flow_item_vlan *)
6986                                          items->spec)->inner_type;
6987                                 ether_type &=
6988                                         ((const struct rte_flow_item_vlan *)
6989                                          items->mask)->inner_type;
6990                                 ether_type = rte_be_to_cpu_16(ether_type);
6991                         } else {
6992                                 ether_type = 0;
6993                         }
6994                         /* Store outer VLAN mask for of_push_vlan action. */
6995                         if (!tunnel)
6996                                 vlan_m = items->mask;
6997                         break;
6998                 case RTE_FLOW_ITEM_TYPE_IPV4:
6999                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7000                                                   &item_flags, &tunnel);
7001                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
7002                                                          last_item, ether_type,
7003                                                          error);
7004                         if (ret < 0)
7005                                 return ret;
7006                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7007                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7008                         if (items->mask != NULL &&
7009                             ((const struct rte_flow_item_ipv4 *)
7010                              items->mask)->hdr.next_proto_id) {
7011                                 next_protocol =
7012                                         ((const struct rte_flow_item_ipv4 *)
7013                                          (items->spec))->hdr.next_proto_id;
7014                                 next_protocol &=
7015                                         ((const struct rte_flow_item_ipv4 *)
7016                                          (items->mask))->hdr.next_proto_id;
7017                         } else {
7018                                 /* Reset for inner layer. */
7019                                 next_protocol = 0xff;
7020                         }
7021                         break;
7022                 case RTE_FLOW_ITEM_TYPE_IPV6:
7023                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7024                                                   &item_flags, &tunnel);
7025                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7026                                                            last_item,
7027                                                            ether_type,
7028                                                            &nic_ipv6_mask,
7029                                                            error);
7030                         if (ret < 0)
7031                                 return ret;
7032                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7033                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7034                         if (items->mask != NULL &&
7035                             ((const struct rte_flow_item_ipv6 *)
7036                              items->mask)->hdr.proto) {
7037                                 item_ipv6_proto =
7038                                         ((const struct rte_flow_item_ipv6 *)
7039                                          items->spec)->hdr.proto;
7040                                 next_protocol =
7041                                         ((const struct rte_flow_item_ipv6 *)
7042                                          items->spec)->hdr.proto;
7043                                 next_protocol &=
7044                                         ((const struct rte_flow_item_ipv6 *)
7045                                          items->mask)->hdr.proto;
7046                         } else {
7047                                 /* Reset for inner layer. */
7048                                 next_protocol = 0xff;
7049                         }
7050                         break;
7051                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7052                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7053                                                                   item_flags,
7054                                                                   error);
7055                         if (ret < 0)
7056                                 return ret;
7057                         last_item = tunnel ?
7058                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7059                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7060                         if (items->mask != NULL &&
7061                             ((const struct rte_flow_item_ipv6_frag_ext *)
7062                              items->mask)->hdr.next_header) {
7063                                 next_protocol =
7064                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7065                                  items->spec)->hdr.next_header;
7066                                 next_protocol &=
7067                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7068                                  items->mask)->hdr.next_header;
7069                         } else {
7070                                 /* Reset for inner layer. */
7071                                 next_protocol = 0xff;
7072                         }
7073                         break;
7074                 case RTE_FLOW_ITEM_TYPE_TCP:
7075                         ret = mlx5_flow_validate_item_tcp
7076                                                 (items, item_flags,
7077                                                  next_protocol,
7078                                                  &nic_tcp_mask,
7079                                                  error);
7080                         if (ret < 0)
7081                                 return ret;
7082                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7083                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7084                         break;
7085                 case RTE_FLOW_ITEM_TYPE_UDP:
7086                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7087                                                           next_protocol,
7088                                                           error);
7089                         const struct rte_flow_item_udp *spec = items->spec;
7090                         const struct rte_flow_item_udp *mask = items->mask;
7091                         if (!mask)
7092                                 mask = &rte_flow_item_udp_mask;
7093                         if (spec != NULL)
7094                                 udp_dport = rte_be_to_cpu_16
7095                                                 (spec->hdr.dst_port &
7096                                                  mask->hdr.dst_port);
7097                         if (ret < 0)
7098                                 return ret;
7099                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7100                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7101                         break;
7102                 case RTE_FLOW_ITEM_TYPE_GRE:
7103                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7104                                                           next_protocol, error);
7105                         if (ret < 0)
7106                                 return ret;
7107                         gre_item = items;
7108                         last_item = MLX5_FLOW_LAYER_GRE;
7109                         break;
7110                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7111                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7112                                                             next_protocol,
7113                                                             error);
7114                         if (ret < 0)
7115                                 return ret;
7116                         last_item = MLX5_FLOW_LAYER_NVGRE;
7117                         break;
7118                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7119                         ret = mlx5_flow_validate_item_gre_key
7120                                 (items, item_flags, gre_item, error);
7121                         if (ret < 0)
7122                                 return ret;
7123                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7124                         break;
7125                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7126                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7127                                                             items, item_flags,
7128                                                             attr, error);
7129                         if (ret < 0)
7130                                 return ret;
7131                         last_item = MLX5_FLOW_LAYER_VXLAN;
7132                         break;
7133                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7134                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7135                                                                 item_flags, dev,
7136                                                                 error);
7137                         if (ret < 0)
7138                                 return ret;
7139                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7140                         break;
7141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7142                         ret = mlx5_flow_validate_item_geneve(items,
7143                                                              item_flags, dev,
7144                                                              error);
7145                         if (ret < 0)
7146                                 return ret;
7147                         geneve_item = items;
7148                         last_item = MLX5_FLOW_LAYER_GENEVE;
7149                         break;
7150                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7151                         ret = mlx5_flow_validate_item_geneve_opt(items,
7152                                                                  last_item,
7153                                                                  geneve_item,
7154                                                                  dev,
7155                                                                  error);
7156                         if (ret < 0)
7157                                 return ret;
7158                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7159                         break;
7160                 case RTE_FLOW_ITEM_TYPE_MPLS:
7161                         ret = mlx5_flow_validate_item_mpls(dev, items,
7162                                                            item_flags,
7163                                                            last_item, error);
7164                         if (ret < 0)
7165                                 return ret;
7166                         last_item = MLX5_FLOW_LAYER_MPLS;
7167                         break;
7168
7169                 case RTE_FLOW_ITEM_TYPE_MARK:
7170                         ret = flow_dv_validate_item_mark(dev, items, attr,
7171                                                          error);
7172                         if (ret < 0)
7173                                 return ret;
7174                         last_item = MLX5_FLOW_ITEM_MARK;
7175                         break;
7176                 case RTE_FLOW_ITEM_TYPE_META:
7177                         ret = flow_dv_validate_item_meta(dev, items, attr,
7178                                                          error);
7179                         if (ret < 0)
7180                                 return ret;
7181                         last_item = MLX5_FLOW_ITEM_METADATA;
7182                         break;
7183                 case RTE_FLOW_ITEM_TYPE_ICMP:
7184                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7185                                                            next_protocol,
7186                                                            error);
7187                         if (ret < 0)
7188                                 return ret;
7189                         last_item = MLX5_FLOW_LAYER_ICMP;
7190                         break;
7191                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7192                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7193                                                             next_protocol,
7194                                                             error);
7195                         if (ret < 0)
7196                                 return ret;
7197                         item_ipv6_proto = IPPROTO_ICMPV6;
7198                         last_item = MLX5_FLOW_LAYER_ICMP6;
7199                         break;
7200                 case RTE_FLOW_ITEM_TYPE_TAG:
7201                         ret = flow_dv_validate_item_tag(dev, items,
7202                                                         attr, error);
7203                         if (ret < 0)
7204                                 return ret;
7205                         last_item = MLX5_FLOW_ITEM_TAG;
7206                         break;
7207                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7208                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7209                         break;
7210                 case RTE_FLOW_ITEM_TYPE_GTP:
7211                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7212                                                         error);
7213                         if (ret < 0)
7214                                 return ret;
7215                         gtp_item = items;
7216                         last_item = MLX5_FLOW_LAYER_GTP;
7217                         break;
7218                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7219                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7220                                                             gtp_item, attr,
7221                                                             error);
7222                         if (ret < 0)
7223                                 return ret;
7224                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7225                         break;
7226                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7227                         /* Capacity will be checked in the translate stage. */
7228                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7229                                                             last_item,
7230                                                             ether_type,
7231                                                             &nic_ecpri_mask,
7232                                                             error);
7233                         if (ret < 0)
7234                                 return ret;
7235                         last_item = MLX5_FLOW_LAYER_ECPRI;
7236                         break;
7237                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7238                         ret = flow_dv_validate_item_integrity(dev, items,
7239                                                               item_flags,
7240                                                               &last_item,
7241                                                               integrity_items,
7242                                                               error);
7243                         if (ret < 0)
7244                                 return ret;
7245                         break;
7246                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7247                         ret = flow_dv_validate_item_aso_ct(dev, items,
7248                                                            &item_flags, error);
7249                         if (ret < 0)
7250                                 return ret;
7251                         break;
7252                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7253                         /* tunnel offload item was processed before
7254                          * list it here as a supported type
7255                          */
7256                         break;
7257                 case RTE_FLOW_ITEM_TYPE_FLEX:
7258                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7259                                                          &last_item,
7260                                                          tunnel != 0, error);
7261                         if (ret < 0)
7262                                 return ret;
7263                         break;
7264                 default:
7265                         return rte_flow_error_set(error, ENOTSUP,
7266                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7267                                                   NULL, "item not supported");
7268                 }
7269                 item_flags |= last_item;
7270         }
7271         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7272                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7273                                                            item_flags, error);
7274                 if (ret)
7275                         return ret;
7276         }
7277         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7278                 int type = actions->type;
7279                 bool shared_count = false;
7280
7281                 if (!mlx5_flow_os_action_supported(type))
7282                         return rte_flow_error_set(error, ENOTSUP,
7283                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7284                                                   actions,
7285                                                   "action not supported");
7286                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7287                         return rte_flow_error_set(error, ENOTSUP,
7288                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7289                                                   actions, "too many actions");
7290                 if (action_flags &
7291                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7292                         return rte_flow_error_set(error, ENOTSUP,
7293                                 RTE_FLOW_ERROR_TYPE_ACTION,
7294                                 NULL, "meter action with policy "
7295                                 "must be the last action");
7296                 switch (type) {
7297                 case RTE_FLOW_ACTION_TYPE_VOID:
7298                         break;
7299                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7300                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7301                         ret = flow_dv_validate_action_port_id(dev,
7302                                                               action_flags,
7303                                                               actions,
7304                                                               attr,
7305                                                               error);
7306                         if (ret)
7307                                 return ret;
7308                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7309                         ++actions_n;
7310                         break;
7311                 case RTE_FLOW_ACTION_TYPE_FLAG:
7312                         ret = flow_dv_validate_action_flag(dev, action_flags,
7313                                                            attr, error);
7314                         if (ret < 0)
7315                                 return ret;
7316                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7317                                 /* Count all modify-header actions as one. */
7318                                 if (!(action_flags &
7319                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7320                                         ++actions_n;
7321                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7322                                                 MLX5_FLOW_ACTION_MARK_EXT;
7323                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7324                                         modify_after_mirror = 1;
7325
7326                         } else {
7327                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7328                                 ++actions_n;
7329                         }
7330                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7331                         break;
7332                 case RTE_FLOW_ACTION_TYPE_MARK:
7333                         ret = flow_dv_validate_action_mark(dev, actions,
7334                                                            action_flags,
7335                                                            attr, error);
7336                         if (ret < 0)
7337                                 return ret;
7338                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7339                                 /* Count all modify-header actions as one. */
7340                                 if (!(action_flags &
7341                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7342                                         ++actions_n;
7343                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7344                                                 MLX5_FLOW_ACTION_MARK_EXT;
7345                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7346                                         modify_after_mirror = 1;
7347                         } else {
7348                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7349                                 ++actions_n;
7350                         }
7351                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7352                         break;
7353                 case RTE_FLOW_ACTION_TYPE_SET_META:
7354                         ret = flow_dv_validate_action_set_meta(dev, actions,
7355                                                                action_flags,
7356                                                                attr, error);
7357                         if (ret < 0)
7358                                 return ret;
7359                         /* Count all modify-header actions as one action. */
7360                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7361                                 ++actions_n;
7362                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7363                                 modify_after_mirror = 1;
7364                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7365                         rw_act_num += MLX5_ACT_NUM_SET_META;
7366                         break;
7367                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7368                         ret = flow_dv_validate_action_set_tag(dev, actions,
7369                                                               action_flags,
7370                                                               attr, error);
7371                         if (ret < 0)
7372                                 return ret;
7373                         /* Count all modify-header actions as one action. */
7374                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7375                                 ++actions_n;
7376                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7377                                 modify_after_mirror = 1;
7378                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7379                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7380                         break;
7381                 case RTE_FLOW_ACTION_TYPE_DROP:
7382                         ret = mlx5_flow_validate_action_drop(action_flags,
7383                                                              attr, error);
7384                         if (ret < 0)
7385                                 return ret;
7386                         action_flags |= MLX5_FLOW_ACTION_DROP;
7387                         ++actions_n;
7388                         break;
7389                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7390                         ret = mlx5_flow_validate_action_queue(actions,
7391                                                               action_flags, dev,
7392                                                               attr, error);
7393                         if (ret < 0)
7394                                 return ret;
7395                         queue_index = ((const struct rte_flow_action_queue *)
7396                                                         (actions->conf))->index;
7397                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7398                         ++actions_n;
7399                         break;
7400                 case RTE_FLOW_ACTION_TYPE_RSS:
7401                         rss = actions->conf;
7402                         ret = mlx5_flow_validate_action_rss(actions,
7403                                                             action_flags, dev,
7404                                                             attr, item_flags,
7405                                                             error);
7406                         if (ret < 0)
7407                                 return ret;
7408                         if (rss && sample_rss &&
7409                             (sample_rss->level != rss->level ||
7410                             sample_rss->types != rss->types))
7411                                 return rte_flow_error_set(error, ENOTSUP,
7412                                         RTE_FLOW_ERROR_TYPE_ACTION,
7413                                         NULL,
7414                                         "Can't use the different RSS types "
7415                                         "or level in the same flow");
7416                         if (rss != NULL && rss->queue_num)
7417                                 queue_index = rss->queue[0];
7418                         action_flags |= MLX5_FLOW_ACTION_RSS;
7419                         ++actions_n;
7420                         break;
7421                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7422                         ret =
7423                         mlx5_flow_validate_action_default_miss(action_flags,
7424                                         attr, error);
7425                         if (ret < 0)
7426                                 return ret;
7427                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7428                         ++actions_n;
7429                         break;
7430                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7431                         shared_count = true;
7432                         /* fall-through. */
7433                 case RTE_FLOW_ACTION_TYPE_COUNT:
7434                         ret = flow_dv_validate_action_count(dev, shared_count,
7435                                                             action_flags,
7436                                                             error);
7437                         if (ret < 0)
7438                                 return ret;
7439                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7440                         ++actions_n;
7441                         break;
7442                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7443                         if (flow_dv_validate_action_pop_vlan(dev,
7444                                                              action_flags,
7445                                                              actions,
7446                                                              item_flags, attr,
7447                                                              error))
7448                                 return -rte_errno;
7449                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7450                                 modify_after_mirror = 1;
7451                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7452                         ++actions_n;
7453                         break;
7454                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7455                         ret = flow_dv_validate_action_push_vlan(dev,
7456                                                                 action_flags,
7457                                                                 vlan_m,
7458                                                                 actions, attr,
7459                                                                 error);
7460                         if (ret < 0)
7461                                 return ret;
7462                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7463                                 modify_after_mirror = 1;
7464                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7465                         ++actions_n;
7466                         break;
7467                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7468                         ret = flow_dv_validate_action_set_vlan_pcp
7469                                                 (action_flags, actions, error);
7470                         if (ret < 0)
7471                                 return ret;
7472                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7473                                 modify_after_mirror = 1;
7474                         /* Count PCP with push_vlan command. */
7475                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7476                         break;
7477                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7478                         ret = flow_dv_validate_action_set_vlan_vid
7479                                                 (item_flags, action_flags,
7480                                                  actions, error);
7481                         if (ret < 0)
7482                                 return ret;
7483                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7484                                 modify_after_mirror = 1;
7485                         /* Count VID with push_vlan command. */
7486                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7487                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7488                         break;
7489                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7490                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7491                         ret = flow_dv_validate_action_l2_encap(dev,
7492                                                                action_flags,
7493                                                                actions, attr,
7494                                                                error);
7495                         if (ret < 0)
7496                                 return ret;
7497                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7498                         ++actions_n;
7499                         break;
7500                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7501                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7502                         ret = flow_dv_validate_action_decap(dev, action_flags,
7503                                                             actions, item_flags,
7504                                                             attr, error);
7505                         if (ret < 0)
7506                                 return ret;
7507                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7508                                 modify_after_mirror = 1;
7509                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7510                         ++actions_n;
7511                         break;
7512                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7513                         ret = flow_dv_validate_action_raw_encap_decap
7514                                 (dev, NULL, actions->conf, attr, &action_flags,
7515                                  &actions_n, actions, item_flags, error);
7516                         if (ret < 0)
7517                                 return ret;
7518                         break;
7519                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7520                         decap = actions->conf;
7521                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7522                                 ;
7523                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7524                                 encap = NULL;
7525                                 actions--;
7526                         } else {
7527                                 encap = actions->conf;
7528                         }
7529                         ret = flow_dv_validate_action_raw_encap_decap
7530                                            (dev,
7531                                             decap ? decap : &empty_decap, encap,
7532                                             attr, &action_flags, &actions_n,
7533                                             actions, item_flags, error);
7534                         if (ret < 0)
7535                                 return ret;
7536                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7537                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7538                                 modify_after_mirror = 1;
7539                         break;
7540                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7541                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7542                         ret = flow_dv_validate_action_modify_mac(action_flags,
7543                                                                  actions,
7544                                                                  item_flags,
7545                                                                  error);
7546                         if (ret < 0)
7547                                 return ret;
7548                         /* Count all modify-header actions as one action. */
7549                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7550                                 ++actions_n;
7551                         action_flags |= actions->type ==
7552                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7553                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7554                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7555                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7556                                 modify_after_mirror = 1;
7557                         /*
7558                          * Even if the source and destination MAC addresses have
7559                          * overlap in the header with 4B alignment, the convert
7560                          * function will handle them separately and 4 SW actions
7561                          * will be created. And 2 actions will be added each
7562                          * time no matter how many bytes of address will be set.
7563                          */
7564                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7565                         break;
7566                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7567                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7568                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7569                                                                   actions,
7570                                                                   item_flags,
7571                                                                   error);
7572                         if (ret < 0)
7573                                 return ret;
7574                         /* Count all modify-header actions as one action. */
7575                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7576                                 ++actions_n;
7577                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7578                                 modify_after_mirror = 1;
7579                         action_flags |= actions->type ==
7580                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7581                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7582                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7583                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7584                         break;
7585                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7586                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7587                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7588                                                                   actions,
7589                                                                   item_flags,
7590                                                                   error);
7591                         if (ret < 0)
7592                                 return ret;
7593                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7594                                 return rte_flow_error_set(error, ENOTSUP,
7595                                         RTE_FLOW_ERROR_TYPE_ACTION,
7596                                         actions,
7597                                         "Can't change header "
7598                                         "with ICMPv6 proto");
7599                         /* Count all modify-header actions as one action. */
7600                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7601                                 ++actions_n;
7602                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7603                                 modify_after_mirror = 1;
7604                         action_flags |= actions->type ==
7605                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7606                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7607                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7608                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7609                         break;
7610                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7611                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7612                         ret = flow_dv_validate_action_modify_tp(action_flags,
7613                                                                 actions,
7614                                                                 item_flags,
7615                                                                 error);
7616                         if (ret < 0)
7617                                 return ret;
7618                         /* Count all modify-header actions as one action. */
7619                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7620                                 ++actions_n;
7621                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7622                                 modify_after_mirror = 1;
7623                         action_flags |= actions->type ==
7624                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7625                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7626                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7627                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7628                         break;
7629                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7630                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7631                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7632                                                                  actions,
7633                                                                  item_flags,
7634                                                                  error);
7635                         if (ret < 0)
7636                                 return ret;
7637                         /* Count all modify-header actions as one action. */
7638                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7639                                 ++actions_n;
7640                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7641                                 modify_after_mirror = 1;
7642                         action_flags |= actions->type ==
7643                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7644                                                 MLX5_FLOW_ACTION_SET_TTL :
7645                                                 MLX5_FLOW_ACTION_DEC_TTL;
7646                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7647                         break;
7648                 case RTE_FLOW_ACTION_TYPE_JUMP:
7649                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7650                                                            action_flags,
7651                                                            attr, external,
7652                                                            error);
7653                         if (ret)
7654                                 return ret;
7655                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7656                             fdb_mirror_limit)
7657                                 return rte_flow_error_set(error, EINVAL,
7658                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7659                                                   NULL,
7660                                                   "sample and jump action combination is not supported");
7661                         ++actions_n;
7662                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7663                         break;
7664                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7665                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7666                         ret = flow_dv_validate_action_modify_tcp_seq
7667                                                                 (action_flags,
7668                                                                  actions,
7669                                                                  item_flags,
7670                                                                  error);
7671                         if (ret < 0)
7672                                 return ret;
7673                         /* Count all modify-header actions as one action. */
7674                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7675                                 ++actions_n;
7676                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7677                                 modify_after_mirror = 1;
7678                         action_flags |= actions->type ==
7679                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7680                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7681                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7682                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7683                         break;
7684                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7685                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7686                         ret = flow_dv_validate_action_modify_tcp_ack
7687                                                                 (action_flags,
7688                                                                  actions,
7689                                                                  item_flags,
7690                                                                  error);
7691                         if (ret < 0)
7692                                 return ret;
7693                         /* Count all modify-header actions as one action. */
7694                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7695                                 ++actions_n;
7696                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7697                                 modify_after_mirror = 1;
7698                         action_flags |= actions->type ==
7699                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7700                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7701                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7702                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7703                         break;
7704                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7705                         break;
7706                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7707                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7708                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7709                         break;
7710                 case RTE_FLOW_ACTION_TYPE_METER:
7711                         ret = mlx5_flow_validate_action_meter(dev,
7712                                                               action_flags,
7713                                                               item_flags,
7714                                                               actions, attr,
7715                                                               port_id_item,
7716                                                               &def_policy,
7717                                                               error);
7718                         if (ret < 0)
7719                                 return ret;
7720                         action_flags |= MLX5_FLOW_ACTION_METER;
7721                         if (!def_policy)
7722                                 action_flags |=
7723                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7724                         ++actions_n;
7725                         /* Meter action will add one more TAG action. */
7726                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7727                         break;
7728                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7729                         if (!attr->transfer && !attr->group)
7730                                 return rte_flow_error_set(error, ENOTSUP,
7731                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7732                                                                            NULL,
7733                           "Shared ASO age action is not supported for group 0");
7734                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7735                                 return rte_flow_error_set
7736                                                   (error, EINVAL,
7737                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7738                                                    NULL,
7739                                                    "duplicate age actions set");
7740                         action_flags |= MLX5_FLOW_ACTION_AGE;
7741                         ++actions_n;
7742                         break;
7743                 case RTE_FLOW_ACTION_TYPE_AGE:
7744                         ret = flow_dv_validate_action_age(action_flags,
7745                                                           actions, dev,
7746                                                           error);
7747                         if (ret < 0)
7748                                 return ret;
7749                         /*
7750                          * Validate the regular AGE action (using counter)
7751                          * mutual exclusion with share counter actions.
7752                          */
7753                         if (!priv->sh->flow_hit_aso_en) {
7754                                 if (shared_count)
7755                                         return rte_flow_error_set
7756                                                 (error, EINVAL,
7757                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7758                                                 NULL,
7759                                                 "old age and shared count combination is not supported");
7760                                 if (sample_count)
7761                                         return rte_flow_error_set
7762                                                 (error, EINVAL,
7763                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7764                                                 NULL,
7765                                                 "old age action and count must be in the same sub flow");
7766                         }
7767                         action_flags |= MLX5_FLOW_ACTION_AGE;
7768                         ++actions_n;
7769                         break;
7770                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7771                         ret = flow_dv_validate_action_modify_ipv4_dscp
7772                                                          (action_flags,
7773                                                           actions,
7774                                                           item_flags,
7775                                                           error);
7776                         if (ret < 0)
7777                                 return ret;
7778                         /* Count all modify-header actions as one action. */
7779                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7780                                 ++actions_n;
7781                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7782                                 modify_after_mirror = 1;
7783                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7784                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7785                         break;
7786                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7787                         ret = flow_dv_validate_action_modify_ipv6_dscp
7788                                                                 (action_flags,
7789                                                                  actions,
7790                                                                  item_flags,
7791                                                                  error);
7792                         if (ret < 0)
7793                                 return ret;
7794                         /* Count all modify-header actions as one action. */
7795                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7796                                 ++actions_n;
7797                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7798                                 modify_after_mirror = 1;
7799                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7800                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7801                         break;
7802                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7803                         ret = flow_dv_validate_action_sample(&action_flags,
7804                                                              actions, dev,
7805                                                              attr, item_flags,
7806                                                              rss, &sample_rss,
7807                                                              &sample_count,
7808                                                              &fdb_mirror_limit,
7809                                                              error);
7810                         if (ret < 0)
7811                                 return ret;
7812                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7813                         ++actions_n;
7814                         break;
7815                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7816                         ret = flow_dv_validate_action_modify_field(dev,
7817                                                                    action_flags,
7818                                                                    actions,
7819                                                                    attr,
7820                                                                    error);
7821                         if (ret < 0)
7822                                 return ret;
7823                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7824                                 modify_after_mirror = 1;
7825                         /* Count all modify-header actions as one action. */
7826                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7827                                 ++actions_n;
7828                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7829                         rw_act_num += ret;
7830                         break;
7831                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7832                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7833                                                              item_flags, attr,
7834                                                              error);
7835                         if (ret < 0)
7836                                 return ret;
7837                         action_flags |= MLX5_FLOW_ACTION_CT;
7838                         break;
7839                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7840                         /* tunnel offload action was processed before
7841                          * list it here as a supported type
7842                          */
7843                         break;
7844                 default:
7845                         return rte_flow_error_set(error, ENOTSUP,
7846                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7847                                                   actions,
7848                                                   "action not supported");
7849                 }
7850         }
7851         /*
7852          * Validate actions in flow rules
7853          * - Explicit decap action is prohibited by the tunnel offload API.
7854          * - Drop action in tunnel steer rule is prohibited by the API.
7855          * - Application cannot use MARK action because it's value can mask
7856          *   tunnel default miss notification.
7857          * - JUMP in tunnel match rule has no support in current PMD
7858          *   implementation.
7859          * - TAG & META are reserved for future uses.
7860          */
7861         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7862                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7863                                             MLX5_FLOW_ACTION_MARK     |
7864                                             MLX5_FLOW_ACTION_SET_TAG  |
7865                                             MLX5_FLOW_ACTION_SET_META |
7866                                             MLX5_FLOW_ACTION_DROP;
7867
7868                 if (action_flags & bad_actions_mask)
7869                         return rte_flow_error_set
7870                                         (error, EINVAL,
7871                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7872                                         "Invalid RTE action in tunnel "
7873                                         "set decap rule");
7874                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7875                         return rte_flow_error_set
7876                                         (error, EINVAL,
7877                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7878                                         "tunnel set decap rule must terminate "
7879                                         "with JUMP");
7880                 if (!attr->ingress)
7881                         return rte_flow_error_set
7882                                         (error, EINVAL,
7883                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7884                                         "tunnel flows for ingress traffic only");
7885         }
7886         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7887                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7888                                             MLX5_FLOW_ACTION_MARK    |
7889                                             MLX5_FLOW_ACTION_SET_TAG |
7890                                             MLX5_FLOW_ACTION_SET_META;
7891
7892                 if (action_flags & bad_actions_mask)
7893                         return rte_flow_error_set
7894                                         (error, EINVAL,
7895                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7896                                         "Invalid RTE action in tunnel "
7897                                         "set match rule");
7898         }
7899         /*
7900          * Validate the drop action mutual exclusion with other actions.
7901          * Drop action is mutually-exclusive with any other action, except for
7902          * Count action.
7903          * Drop action compatibility with tunnel offload was already validated.
7904          */
7905         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7906                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7907         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7908             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7909                 return rte_flow_error_set(error, EINVAL,
7910                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7911                                           "Drop action is mutually-exclusive "
7912                                           "with any other action, except for "
7913                                           "Count action");
7914         /* Eswitch has few restrictions on using items and actions */
7915         if (attr->transfer) {
7916                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7917                     action_flags & MLX5_FLOW_ACTION_FLAG)
7918                         return rte_flow_error_set(error, ENOTSUP,
7919                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7920                                                   NULL,
7921                                                   "unsupported action FLAG");
7922                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7923                     action_flags & MLX5_FLOW_ACTION_MARK)
7924                         return rte_flow_error_set(error, ENOTSUP,
7925                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7926                                                   NULL,
7927                                                   "unsupported action MARK");
7928                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7929                         return rte_flow_error_set(error, ENOTSUP,
7930                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7931                                                   NULL,
7932                                                   "unsupported action QUEUE");
7933                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7934                         return rte_flow_error_set(error, ENOTSUP,
7935                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7936                                                   NULL,
7937                                                   "unsupported action RSS");
7938                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7939                         return rte_flow_error_set(error, EINVAL,
7940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7941                                                   actions,
7942                                                   "no fate action is found");
7943         } else {
7944                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7945                         return rte_flow_error_set(error, EINVAL,
7946                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7947                                                   actions,
7948                                                   "no fate action is found");
7949         }
7950         /*
7951          * Continue validation for Xcap and VLAN actions.
7952          * If hairpin is working in explicit TX rule mode, there is no actions
7953          * splitting and the validation of hairpin ingress flow should be the
7954          * same as other standard flows.
7955          */
7956         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7957                              MLX5_FLOW_VLAN_ACTIONS)) &&
7958             (queue_index == 0xFFFF ||
7959              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7960              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7961              conf->tx_explicit != 0))) {
7962                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7963                     MLX5_FLOW_XCAP_ACTIONS)
7964                         return rte_flow_error_set(error, ENOTSUP,
7965                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7966                                                   NULL, "encap and decap "
7967                                                   "combination aren't supported");
7968                 if (!attr->transfer && attr->ingress) {
7969                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7970                                 return rte_flow_error_set
7971                                                 (error, ENOTSUP,
7972                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7973                                                  NULL, "encap is not supported"
7974                                                  " for ingress traffic");
7975                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7976                                 return rte_flow_error_set
7977                                                 (error, ENOTSUP,
7978                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7979                                                  NULL, "push VLAN action not "
7980                                                  "supported for ingress");
7981                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7982                                         MLX5_FLOW_VLAN_ACTIONS)
7983                                 return rte_flow_error_set
7984                                                 (error, ENOTSUP,
7985                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7986                                                  NULL, "no support for "
7987                                                  "multiple VLAN actions");
7988                 }
7989         }
7990         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7991                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7992                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7993                         attr->ingress)
7994                         return rte_flow_error_set
7995                                 (error, ENOTSUP,
7996                                 RTE_FLOW_ERROR_TYPE_ACTION,
7997                                 NULL, "fate action not supported for "
7998                                 "meter with policy");
7999                 if (attr->egress) {
8000                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
8001                                 return rte_flow_error_set
8002                                         (error, ENOTSUP,
8003                                         RTE_FLOW_ERROR_TYPE_ACTION,
8004                                         NULL, "modify header action in egress "
8005                                         "cannot be done before meter action");
8006                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8007                                 return rte_flow_error_set
8008                                         (error, ENOTSUP,
8009                                         RTE_FLOW_ERROR_TYPE_ACTION,
8010                                         NULL, "encap action in egress "
8011                                         "cannot be done before meter action");
8012                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8013                                 return rte_flow_error_set
8014                                         (error, ENOTSUP,
8015                                         RTE_FLOW_ERROR_TYPE_ACTION,
8016                                         NULL, "push vlan action in egress "
8017                                         "cannot be done before meter action");
8018                 }
8019         }
8020         /*
8021          * Hairpin flow will add one more TAG action in TX implicit mode.
8022          * In TX explicit mode, there will be no hairpin flow ID.
8023          */
8024         if (hairpin > 0)
8025                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8026         /* extra metadata enabled: one more TAG action will be add. */
8027         if (dev_conf->dv_flow_en &&
8028             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8029             mlx5_flow_ext_mreg_supported(dev))
8030                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8031         if (rw_act_num >
8032                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8033                 return rte_flow_error_set(error, ENOTSUP,
8034                                           RTE_FLOW_ERROR_TYPE_ACTION,
8035                                           NULL, "too many header modify"
8036                                           " actions to support");
8037         }
8038         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8039         if (fdb_mirror_limit && modify_after_mirror)
8040                 return rte_flow_error_set(error, EINVAL,
8041                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8042                                 "sample before modify action is not supported");
8043         return 0;
8044 }
8045
8046 /**
8047  * Internal preparation function. Allocates the DV flow size,
8048  * this size is constant.
8049  *
8050  * @param[in] dev
8051  *   Pointer to the rte_eth_dev structure.
8052  * @param[in] attr
8053  *   Pointer to the flow attributes.
8054  * @param[in] items
8055  *   Pointer to the list of items.
8056  * @param[in] actions
8057  *   Pointer to the list of actions.
8058  * @param[out] error
8059  *   Pointer to the error structure.
8060  *
8061  * @return
8062  *   Pointer to mlx5_flow object on success,
8063  *   otherwise NULL and rte_errno is set.
8064  */
8065 static struct mlx5_flow *
8066 flow_dv_prepare(struct rte_eth_dev *dev,
8067                 const struct rte_flow_attr *attr __rte_unused,
8068                 const struct rte_flow_item items[] __rte_unused,
8069                 const struct rte_flow_action actions[] __rte_unused,
8070                 struct rte_flow_error *error)
8071 {
8072         uint32_t handle_idx = 0;
8073         struct mlx5_flow *dev_flow;
8074         struct mlx5_flow_handle *dev_handle;
8075         struct mlx5_priv *priv = dev->data->dev_private;
8076         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8077
8078         MLX5_ASSERT(wks);
8079         wks->skip_matcher_reg = 0;
8080         wks->policy = NULL;
8081         wks->final_policy = NULL;
8082         /* In case of corrupting the memory. */
8083         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8084                 rte_flow_error_set(error, ENOSPC,
8085                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8086                                    "not free temporary device flow");
8087                 return NULL;
8088         }
8089         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8090                                    &handle_idx);
8091         if (!dev_handle) {
8092                 rte_flow_error_set(error, ENOMEM,
8093                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8094                                    "not enough memory to create flow handle");
8095                 return NULL;
8096         }
8097         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8098         dev_flow = &wks->flows[wks->flow_idx++];
8099         memset(dev_flow, 0, sizeof(*dev_flow));
8100         dev_flow->handle = dev_handle;
8101         dev_flow->handle_idx = handle_idx;
8102         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8103         dev_flow->ingress = attr->ingress;
8104         dev_flow->dv.transfer = attr->transfer;
8105         return dev_flow;
8106 }
8107
8108 #ifdef RTE_LIBRTE_MLX5_DEBUG
8109 /**
8110  * Sanity check for match mask and value. Similar to check_valid_spec() in
8111  * kernel driver. If unmasked bit is present in value, it returns failure.
8112  *
8113  * @param match_mask
8114  *   pointer to match mask buffer.
8115  * @param match_value
8116  *   pointer to match value buffer.
8117  *
8118  * @return
8119  *   0 if valid, -EINVAL otherwise.
8120  */
8121 static int
8122 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8123 {
8124         uint8_t *m = match_mask;
8125         uint8_t *v = match_value;
8126         unsigned int i;
8127
8128         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8129                 if (v[i] & ~m[i]) {
8130                         DRV_LOG(ERR,
8131                                 "match_value differs from match_criteria"
8132                                 " %p[%u] != %p[%u]",
8133                                 match_value, i, match_mask, i);
8134                         return -EINVAL;
8135                 }
8136         }
8137         return 0;
8138 }
8139 #endif
8140
8141 /**
8142  * Add match of ip_version.
8143  *
8144  * @param[in] group
8145  *   Flow group.
8146  * @param[in] headers_v
8147  *   Values header pointer.
8148  * @param[in] headers_m
8149  *   Masks header pointer.
8150  * @param[in] ip_version
8151  *   The IP version to set.
8152  */
8153 static inline void
8154 flow_dv_set_match_ip_version(uint32_t group,
8155                              void *headers_v,
8156                              void *headers_m,
8157                              uint8_t ip_version)
8158 {
8159         if (group == 0)
8160                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8161         else
8162                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8163                          ip_version);
8164         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8165         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8166         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8167 }
8168
8169 /**
8170  * Add Ethernet item to matcher and to the value.
8171  *
8172  * @param[in, out] matcher
8173  *   Flow matcher.
8174  * @param[in, out] key
8175  *   Flow matcher value.
8176  * @param[in] item
8177  *   Flow pattern to translate.
8178  * @param[in] inner
8179  *   Item is inner pattern.
8180  */
8181 static void
8182 flow_dv_translate_item_eth(void *matcher, void *key,
8183                            const struct rte_flow_item *item, int inner,
8184                            uint32_t group)
8185 {
8186         const struct rte_flow_item_eth *eth_m = item->mask;
8187         const struct rte_flow_item_eth *eth_v = item->spec;
8188         const struct rte_flow_item_eth nic_mask = {
8189                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8190                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8191                 .type = RTE_BE16(0xffff),
8192                 .has_vlan = 0,
8193         };
8194         void *hdrs_m;
8195         void *hdrs_v;
8196         char *l24_v;
8197         unsigned int i;
8198
8199         if (!eth_v)
8200                 return;
8201         if (!eth_m)
8202                 eth_m = &nic_mask;
8203         if (inner) {
8204                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8205                                          inner_headers);
8206                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8207         } else {
8208                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8209                                          outer_headers);
8210                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8211         }
8212         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8213                &eth_m->dst, sizeof(eth_m->dst));
8214         /* The value must be in the range of the mask. */
8215         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8216         for (i = 0; i < sizeof(eth_m->dst); ++i)
8217                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8218         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8219                &eth_m->src, sizeof(eth_m->src));
8220         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8221         /* The value must be in the range of the mask. */
8222         for (i = 0; i < sizeof(eth_m->dst); ++i)
8223                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8224         /*
8225          * HW supports match on one Ethertype, the Ethertype following the last
8226          * VLAN tag of the packet (see PRM).
8227          * Set match on ethertype only if ETH header is not followed by VLAN.
8228          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8229          * ethertype, and use ip_version field instead.
8230          * eCPRI over Ether layer will use type value 0xAEFE.
8231          */
8232         if (eth_m->type == 0xFFFF) {
8233                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8234                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8235                 switch (eth_v->type) {
8236                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8237                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8238                         return;
8239                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8240                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8241                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8242                         return;
8243                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8244                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8245                         return;
8246                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8247                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8248                         return;
8249                 default:
8250                         break;
8251                 }
8252         }
8253         if (eth_m->has_vlan) {
8254                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8255                 if (eth_v->has_vlan) {
8256                         /*
8257                          * Here, when also has_more_vlan field in VLAN item is
8258                          * not set, only single-tagged packets will be matched.
8259                          */
8260                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8261                         return;
8262                 }
8263         }
8264         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8265                  rte_be_to_cpu_16(eth_m->type));
8266         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8267         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8268 }
8269
8270 /**
8271  * Add VLAN item to matcher and to the value.
8272  *
8273  * @param[in, out] dev_flow
8274  *   Flow descriptor.
8275  * @param[in, out] matcher
8276  *   Flow matcher.
8277  * @param[in, out] key
8278  *   Flow matcher value.
8279  * @param[in] item
8280  *   Flow pattern to translate.
8281  * @param[in] inner
8282  *   Item is inner pattern.
8283  */
8284 static void
8285 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8286                             void *matcher, void *key,
8287                             const struct rte_flow_item *item,
8288                             int inner, uint32_t group)
8289 {
8290         const struct rte_flow_item_vlan *vlan_m = item->mask;
8291         const struct rte_flow_item_vlan *vlan_v = item->spec;
8292         void *hdrs_m;
8293         void *hdrs_v;
8294         uint16_t tci_m;
8295         uint16_t tci_v;
8296
8297         if (inner) {
8298                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8299                                          inner_headers);
8300                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8301         } else {
8302                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8303                                          outer_headers);
8304                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8305                 /*
8306                  * This is workaround, masks are not supported,
8307                  * and pre-validated.
8308                  */
8309                 if (vlan_v)
8310                         dev_flow->handle->vf_vlan.tag =
8311                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8312         }
8313         /*
8314          * When VLAN item exists in flow, mark packet as tagged,
8315          * even if TCI is not specified.
8316          */
8317         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8318                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8319                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8320         }
8321         if (!vlan_v)
8322                 return;
8323         if (!vlan_m)
8324                 vlan_m = &rte_flow_item_vlan_mask;
8325         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8326         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8327         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8328         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8329         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8330         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8331         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8332         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8333         /*
8334          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8335          * ethertype, and use ip_version field instead.
8336          */
8337         if (vlan_m->inner_type == 0xFFFF) {
8338                 switch (vlan_v->inner_type) {
8339                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8340                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8341                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8342                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8343                         return;
8344                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8345                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8346                         return;
8347                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8348                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8349                         return;
8350                 default:
8351                         break;
8352                 }
8353         }
8354         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8355                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8356                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8357                 /* Only one vlan_tag bit can be set. */
8358                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8359                 return;
8360         }
8361         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8362                  rte_be_to_cpu_16(vlan_m->inner_type));
8363         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8364                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8365 }
8366
8367 /**
8368  * Add IPV4 item to matcher and to the value.
8369  *
8370  * @param[in, out] matcher
8371  *   Flow matcher.
8372  * @param[in, out] key
8373  *   Flow matcher value.
8374  * @param[in] item
8375  *   Flow pattern to translate.
8376  * @param[in] inner
8377  *   Item is inner pattern.
8378  * @param[in] group
8379  *   The group to insert the rule.
8380  */
8381 static void
8382 flow_dv_translate_item_ipv4(void *matcher, void *key,
8383                             const struct rte_flow_item *item,
8384                             int inner, uint32_t group)
8385 {
8386         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8387         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8388         const struct rte_flow_item_ipv4 nic_mask = {
8389                 .hdr = {
8390                         .src_addr = RTE_BE32(0xffffffff),
8391                         .dst_addr = RTE_BE32(0xffffffff),
8392                         .type_of_service = 0xff,
8393                         .next_proto_id = 0xff,
8394                         .time_to_live = 0xff,
8395                 },
8396         };
8397         void *headers_m;
8398         void *headers_v;
8399         char *l24_m;
8400         char *l24_v;
8401         uint8_t tos, ihl_m, ihl_v;
8402
8403         if (inner) {
8404                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8405                                          inner_headers);
8406                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8407         } else {
8408                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8409                                          outer_headers);
8410                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8411         }
8412         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8413         if (!ipv4_v)
8414                 return;
8415         if (!ipv4_m)
8416                 ipv4_m = &nic_mask;
8417         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8418                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8419         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8420                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8421         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8422         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8423         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8424                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8425         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8426                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8427         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8428         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8429         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8430         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8431         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8432         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8433         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8435                  ipv4_m->hdr.type_of_service);
8436         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8437         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8438                  ipv4_m->hdr.type_of_service >> 2);
8439         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8440         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8441                  ipv4_m->hdr.next_proto_id);
8442         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8443                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8444         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8445                  ipv4_m->hdr.time_to_live);
8446         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8447                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8448         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8449                  !!(ipv4_m->hdr.fragment_offset));
8450         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8451                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8452 }
8453
8454 /**
8455  * Add IPV6 item to matcher and to the value.
8456  *
8457  * @param[in, out] matcher
8458  *   Flow matcher.
8459  * @param[in, out] key
8460  *   Flow matcher value.
8461  * @param[in] item
8462  *   Flow pattern to translate.
8463  * @param[in] inner
8464  *   Item is inner pattern.
8465  * @param[in] group
8466  *   The group to insert the rule.
8467  */
8468 static void
8469 flow_dv_translate_item_ipv6(void *matcher, void *key,
8470                             const struct rte_flow_item *item,
8471                             int inner, uint32_t group)
8472 {
8473         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8474         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8475         const struct rte_flow_item_ipv6 nic_mask = {
8476                 .hdr = {
8477                         .src_addr =
8478                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8479                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8480                         .dst_addr =
8481                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8482                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8483                         .vtc_flow = RTE_BE32(0xffffffff),
8484                         .proto = 0xff,
8485                         .hop_limits = 0xff,
8486                 },
8487         };
8488         void *headers_m;
8489         void *headers_v;
8490         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8491         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8492         char *l24_m;
8493         char *l24_v;
8494         uint32_t vtc_m;
8495         uint32_t vtc_v;
8496         int i;
8497         int size;
8498
8499         if (inner) {
8500                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8501                                          inner_headers);
8502                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8503         } else {
8504                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8505                                          outer_headers);
8506                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8507         }
8508         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8509         if (!ipv6_v)
8510                 return;
8511         if (!ipv6_m)
8512                 ipv6_m = &nic_mask;
8513         size = sizeof(ipv6_m->hdr.dst_addr);
8514         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8515                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8516         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8517                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8518         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8519         for (i = 0; i < size; ++i)
8520                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8521         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8522                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8523         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8524                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8525         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8526         for (i = 0; i < size; ++i)
8527                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8528         /* TOS. */
8529         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8530         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8531         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8532         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8533         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8534         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8535         /* Label. */
8536         if (inner) {
8537                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8538                          vtc_m);
8539                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8540                          vtc_v);
8541         } else {
8542                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8543                          vtc_m);
8544                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8545                          vtc_v);
8546         }
8547         /* Protocol. */
8548         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8549                  ipv6_m->hdr.proto);
8550         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8551                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8552         /* Hop limit. */
8553         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8554                  ipv6_m->hdr.hop_limits);
8555         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8556                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8557         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8558                  !!(ipv6_m->has_frag_ext));
8559         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8560                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8561 }
8562
8563 /**
8564  * Add IPV6 fragment extension item to matcher and to the value.
8565  *
8566  * @param[in, out] matcher
8567  *   Flow matcher.
8568  * @param[in, out] key
8569  *   Flow matcher value.
8570  * @param[in] item
8571  *   Flow pattern to translate.
8572  * @param[in] inner
8573  *   Item is inner pattern.
8574  */
8575 static void
8576 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8577                                      const struct rte_flow_item *item,
8578                                      int inner)
8579 {
8580         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8581         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8582         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8583                 .hdr = {
8584                         .next_header = 0xff,
8585                         .frag_data = RTE_BE16(0xffff),
8586                 },
8587         };
8588         void *headers_m;
8589         void *headers_v;
8590
8591         if (inner) {
8592                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8593                                          inner_headers);
8594                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8595         } else {
8596                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8597                                          outer_headers);
8598                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8599         }
8600         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8601         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8602         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8603         if (!ipv6_frag_ext_v)
8604                 return;
8605         if (!ipv6_frag_ext_m)
8606                 ipv6_frag_ext_m = &nic_mask;
8607         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8608                  ipv6_frag_ext_m->hdr.next_header);
8609         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8610                  ipv6_frag_ext_v->hdr.next_header &
8611                  ipv6_frag_ext_m->hdr.next_header);
8612 }
8613
8614 /**
8615  * Add TCP item to matcher and to the value.
8616  *
8617  * @param[in, out] matcher
8618  *   Flow matcher.
8619  * @param[in, out] key
8620  *   Flow matcher value.
8621  * @param[in] item
8622  *   Flow pattern to translate.
8623  * @param[in] inner
8624  *   Item is inner pattern.
8625  */
8626 static void
8627 flow_dv_translate_item_tcp(void *matcher, void *key,
8628                            const struct rte_flow_item *item,
8629                            int inner)
8630 {
8631         const struct rte_flow_item_tcp *tcp_m = item->mask;
8632         const struct rte_flow_item_tcp *tcp_v = item->spec;
8633         void *headers_m;
8634         void *headers_v;
8635
8636         if (inner) {
8637                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8638                                          inner_headers);
8639                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8640         } else {
8641                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8642                                          outer_headers);
8643                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8644         }
8645         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8646         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8647         if (!tcp_v)
8648                 return;
8649         if (!tcp_m)
8650                 tcp_m = &rte_flow_item_tcp_mask;
8651         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8652                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8653         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8654                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8655         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8656                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8657         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8658                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8659         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8660                  tcp_m->hdr.tcp_flags);
8661         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8662                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8663 }
8664
8665 /**
8666  * Add UDP item to matcher and to the value.
8667  *
8668  * @param[in, out] matcher
8669  *   Flow matcher.
8670  * @param[in, out] key
8671  *   Flow matcher value.
8672  * @param[in] item
8673  *   Flow pattern to translate.
8674  * @param[in] inner
8675  *   Item is inner pattern.
8676  */
8677 static void
8678 flow_dv_translate_item_udp(void *matcher, void *key,
8679                            const struct rte_flow_item *item,
8680                            int inner)
8681 {
8682         const struct rte_flow_item_udp *udp_m = item->mask;
8683         const struct rte_flow_item_udp *udp_v = item->spec;
8684         void *headers_m;
8685         void *headers_v;
8686
8687         if (inner) {
8688                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8689                                          inner_headers);
8690                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8691         } else {
8692                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8693                                          outer_headers);
8694                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8695         }
8696         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8697         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8698         if (!udp_v)
8699                 return;
8700         if (!udp_m)
8701                 udp_m = &rte_flow_item_udp_mask;
8702         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8703                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8704         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8705                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8706         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8707                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8708         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8709                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8710 }
8711
8712 /**
8713  * Add GRE optional Key item to matcher and to the value.
8714  *
8715  * @param[in, out] matcher
8716  *   Flow matcher.
8717  * @param[in, out] key
8718  *   Flow matcher value.
8719  * @param[in] item
8720  *   Flow pattern to translate.
8721  * @param[in] inner
8722  *   Item is inner pattern.
8723  */
8724 static void
8725 flow_dv_translate_item_gre_key(void *matcher, void *key,
8726                                    const struct rte_flow_item *item)
8727 {
8728         const rte_be32_t *key_m = item->mask;
8729         const rte_be32_t *key_v = item->spec;
8730         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8731         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8732         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8733
8734         /* GRE K bit must be on and should already be validated */
8735         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8736         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8737         if (!key_v)
8738                 return;
8739         if (!key_m)
8740                 key_m = &gre_key_default_mask;
8741         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8742                  rte_be_to_cpu_32(*key_m) >> 8);
8743         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8744                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8745         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8746                  rte_be_to_cpu_32(*key_m) & 0xFF);
8747         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8748                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8749 }
8750
8751 /**
8752  * Add GRE item to matcher and to the value.
8753  *
8754  * @param[in, out] matcher
8755  *   Flow matcher.
8756  * @param[in, out] key
8757  *   Flow matcher value.
8758  * @param[in] item
8759  *   Flow pattern to translate.
8760  * @param[in] pattern_flags
8761  *   Accumulated pattern flags.
8762  */
8763 static void
8764 flow_dv_translate_item_gre(void *matcher, void *key,
8765                            const struct rte_flow_item *item,
8766                            uint64_t pattern_flags)
8767 {
8768         static const struct rte_flow_item_gre empty_gre = {0,};
8769         const struct rte_flow_item_gre *gre_m = item->mask;
8770         const struct rte_flow_item_gre *gre_v = item->spec;
8771         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8772         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8773         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8774         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8775         struct {
8776                 union {
8777                         __extension__
8778                         struct {
8779                                 uint16_t version:3;
8780                                 uint16_t rsvd0:9;
8781                                 uint16_t s_present:1;
8782                                 uint16_t k_present:1;
8783                                 uint16_t rsvd_bit1:1;
8784                                 uint16_t c_present:1;
8785                         };
8786                         uint16_t value;
8787                 };
8788         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8789         uint16_t protocol_m, protocol_v;
8790
8791         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8792         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8793         if (!gre_v) {
8794                 gre_v = &empty_gre;
8795                 gre_m = &empty_gre;
8796         } else {
8797                 if (!gre_m)
8798                         gre_m = &rte_flow_item_gre_mask;
8799         }
8800         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8801         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8802         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8803                  gre_crks_rsvd0_ver_m.c_present);
8804         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8805                  gre_crks_rsvd0_ver_v.c_present &
8806                  gre_crks_rsvd0_ver_m.c_present);
8807         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8808                  gre_crks_rsvd0_ver_m.k_present);
8809         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8810                  gre_crks_rsvd0_ver_v.k_present &
8811                  gre_crks_rsvd0_ver_m.k_present);
8812         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8813                  gre_crks_rsvd0_ver_m.s_present);
8814         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8815                  gre_crks_rsvd0_ver_v.s_present &
8816                  gre_crks_rsvd0_ver_m.s_present);
8817         protocol_m = rte_be_to_cpu_16(gre_m->protocol);
8818         protocol_v = rte_be_to_cpu_16(gre_v->protocol);
8819         if (!protocol_m) {
8820                 /* Force next protocol to prevent matchers duplication */
8821                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
8822                 if (protocol_v)
8823                         protocol_m = 0xFFFF;
8824         }
8825         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
8826         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8827                  protocol_m & protocol_v);
8828 }
8829
8830 /**
8831  * Add NVGRE item to matcher and to the value.
8832  *
8833  * @param[in, out] matcher
8834  *   Flow matcher.
8835  * @param[in, out] key
8836  *   Flow matcher value.
8837  * @param[in] item
8838  *   Flow pattern to translate.
8839  * @param[in] pattern_flags
8840  *   Accumulated pattern flags.
8841  */
8842 static void
8843 flow_dv_translate_item_nvgre(void *matcher, void *key,
8844                              const struct rte_flow_item *item,
8845                              unsigned long pattern_flags)
8846 {
8847         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8848         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8849         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8850         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8851         const char *tni_flow_id_m;
8852         const char *tni_flow_id_v;
8853         char *gre_key_m;
8854         char *gre_key_v;
8855         int size;
8856         int i;
8857
8858         /* For NVGRE, GRE header fields must be set with defined values. */
8859         const struct rte_flow_item_gre gre_spec = {
8860                 .c_rsvd0_ver = RTE_BE16(0x2000),
8861                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8862         };
8863         const struct rte_flow_item_gre gre_mask = {
8864                 .c_rsvd0_ver = RTE_BE16(0xB000),
8865                 .protocol = RTE_BE16(UINT16_MAX),
8866         };
8867         const struct rte_flow_item gre_item = {
8868                 .spec = &gre_spec,
8869                 .mask = &gre_mask,
8870                 .last = NULL,
8871         };
8872         flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
8873         if (!nvgre_v)
8874                 return;
8875         if (!nvgre_m)
8876                 nvgre_m = &rte_flow_item_nvgre_mask;
8877         tni_flow_id_m = (const char *)nvgre_m->tni;
8878         tni_flow_id_v = (const char *)nvgre_v->tni;
8879         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8880         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8881         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8882         memcpy(gre_key_m, tni_flow_id_m, size);
8883         for (i = 0; i < size; ++i)
8884                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8885 }
8886
8887 /**
8888  * Add VXLAN item to matcher and to the value.
8889  *
8890  * @param[in] dev
8891  *   Pointer to the Ethernet device structure.
8892  * @param[in] attr
8893  *   Flow rule attributes.
8894  * @param[in, out] matcher
8895  *   Flow matcher.
8896  * @param[in, out] key
8897  *   Flow matcher value.
8898  * @param[in] item
8899  *   Flow pattern to translate.
8900  * @param[in] inner
8901  *   Item is inner pattern.
8902  */
8903 static void
8904 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8905                              const struct rte_flow_attr *attr,
8906                              void *matcher, void *key,
8907                              const struct rte_flow_item *item,
8908                              int inner)
8909 {
8910         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8911         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8912         void *headers_m;
8913         void *headers_v;
8914         void *misc5_m;
8915         void *misc5_v;
8916         uint32_t *tunnel_header_v;
8917         uint32_t *tunnel_header_m;
8918         uint16_t dport;
8919         struct mlx5_priv *priv = dev->data->dev_private;
8920         const struct rte_flow_item_vxlan nic_mask = {
8921                 .vni = "\xff\xff\xff",
8922                 .rsvd1 = 0xff,
8923         };
8924
8925         if (inner) {
8926                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8927                                          inner_headers);
8928                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8929         } else {
8930                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8931                                          outer_headers);
8932                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8933         }
8934         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8935                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8936         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8937                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8938                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8939         }
8940         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8941         if (!vxlan_v)
8942                 return;
8943         if (!vxlan_m) {
8944                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8945                     (attr->group && !priv->sh->misc5_cap))
8946                         vxlan_m = &rte_flow_item_vxlan_mask;
8947                 else
8948                         vxlan_m = &nic_mask;
8949         }
8950         if ((priv->sh->steering_format_version ==
8951             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8952             dport != MLX5_UDP_PORT_VXLAN) ||
8953             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8954             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8955                 void *misc_m;
8956                 void *misc_v;
8957                 char *vni_m;
8958                 char *vni_v;
8959                 int size;
8960                 int i;
8961                 misc_m = MLX5_ADDR_OF(fte_match_param,
8962                                       matcher, misc_parameters);
8963                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8964                 size = sizeof(vxlan_m->vni);
8965                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8966                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8967                 memcpy(vni_m, vxlan_m->vni, size);
8968                 for (i = 0; i < size; ++i)
8969                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8970                 return;
8971         }
8972         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8973         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8974         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8975                                                    misc5_v,
8976                                                    tunnel_header_1);
8977         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8978                                                    misc5_m,
8979                                                    tunnel_header_1);
8980         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8981                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8982                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8983         if (*tunnel_header_v)
8984                 *tunnel_header_m = vxlan_m->vni[0] |
8985                         vxlan_m->vni[1] << 8 |
8986                         vxlan_m->vni[2] << 16;
8987         else
8988                 *tunnel_header_m = 0x0;
8989         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8990         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8991                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8992 }
8993
8994 /**
8995  * Add VXLAN-GPE item to matcher and to the value.
8996  *
8997  * @param[in, out] matcher
8998  *   Flow matcher.
8999  * @param[in, out] key
9000  *   Flow matcher value.
9001  * @param[in] item
9002  *   Flow pattern to translate.
9003  * @param[in] inner
9004  *   Item is inner pattern.
9005  */
9006
9007 static void
9008 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9009                                  const struct rte_flow_item *item,
9010                                  const uint64_t pattern_flags)
9011 {
9012         static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9013         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9014         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9015         /* The item was validated to be on the outer side */
9016         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9017         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9018         void *misc_m =
9019                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9020         void *misc_v =
9021                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9022         char *vni_m =
9023                 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9024         char *vni_v =
9025                 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9026         int i, size = sizeof(vxlan_m->vni);
9027         uint8_t flags_m = 0xff;
9028         uint8_t flags_v = 0xc;
9029         uint8_t m_protocol, v_protocol;
9030
9031         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9032                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9033                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9034                          MLX5_UDP_PORT_VXLAN_GPE);
9035         }
9036         if (!vxlan_v) {
9037                 vxlan_v = &dummy_vxlan_gpe_hdr;
9038                 vxlan_m = &dummy_vxlan_gpe_hdr;
9039         } else {
9040                 if (!vxlan_m)
9041                         vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9042         }
9043         memcpy(vni_m, vxlan_m->vni, size);
9044         for (i = 0; i < size; ++i)
9045                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9046         if (vxlan_m->flags) {
9047                 flags_m = vxlan_m->flags;
9048                 flags_v = vxlan_v->flags;
9049         }
9050         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9051         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9052         m_protocol = vxlan_m->protocol;
9053         v_protocol = vxlan_v->protocol;
9054         if (!m_protocol) {
9055                 /* Force next protocol to ensure next headers parsing. */
9056                 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9057                         v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9058                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9059                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9060                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9061                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9062                 if (v_protocol)
9063                         m_protocol = 0xFF;
9064         }
9065         MLX5_SET(fte_match_set_misc3, misc_m,
9066                  outer_vxlan_gpe_next_protocol, m_protocol);
9067         MLX5_SET(fte_match_set_misc3, misc_v,
9068                  outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9069 }
9070
9071 /**
9072  * Add Geneve item to matcher and to the value.
9073  *
9074  * @param[in, out] matcher
9075  *   Flow matcher.
9076  * @param[in, out] key
9077  *   Flow matcher value.
9078  * @param[in] item
9079  *   Flow pattern to translate.
9080  * @param[in] inner
9081  *   Item is inner pattern.
9082  */
9083
9084 static void
9085 flow_dv_translate_item_geneve(void *matcher, void *key,
9086                               const struct rte_flow_item *item,
9087                               uint64_t pattern_flags)
9088 {
9089         static const struct rte_flow_item_geneve empty_geneve = {0,};
9090         const struct rte_flow_item_geneve *geneve_m = item->mask;
9091         const struct rte_flow_item_geneve *geneve_v = item->spec;
9092         /* GENEVE flow item validation allows single tunnel item */
9093         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9094         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9095         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9096         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9097         uint16_t gbhdr_m;
9098         uint16_t gbhdr_v;
9099         char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9100         char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9101         size_t size = sizeof(geneve_m->vni), i;
9102         uint16_t protocol_m, protocol_v;
9103
9104         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9105                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9106                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9107                          MLX5_UDP_PORT_GENEVE);
9108         }
9109         if (!geneve_v) {
9110                 geneve_v = &empty_geneve;
9111                 geneve_m = &empty_geneve;
9112         } else {
9113                 if (!geneve_m)
9114                         geneve_m = &rte_flow_item_geneve_mask;
9115         }
9116         memcpy(vni_m, geneve_m->vni, size);
9117         for (i = 0; i < size; ++i)
9118                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9119         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9120         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9121         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9122                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9123         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9124                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9125         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9126                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9127         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9128                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9129                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9130         protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9131         protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9132         if (!protocol_m) {
9133                 /* Force next protocol to prevent matchers duplication */
9134                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9135                 if (protocol_v)
9136                         protocol_m = 0xFFFF;
9137         }
9138         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9139         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9140                  protocol_m & protocol_v);
9141 }
9142
9143 /**
9144  * Create Geneve TLV option resource.
9145  *
9146  * @param dev[in, out]
9147  *   Pointer to rte_eth_dev structure.
9148  * @param[in, out] tag_be24
9149  *   Tag value in big endian then R-shift 8.
9150  * @parm[in, out] dev_flow
9151  *   Pointer to the dev_flow.
9152  * @param[out] error
9153  *   pointer to error structure.
9154  *
9155  * @return
9156  *   0 on success otherwise -errno and errno is set.
9157  */
9158
9159 int
9160 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9161                                              const struct rte_flow_item *item,
9162                                              struct rte_flow_error *error)
9163 {
9164         struct mlx5_priv *priv = dev->data->dev_private;
9165         struct mlx5_dev_ctx_shared *sh = priv->sh;
9166         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9167                         sh->geneve_tlv_option_resource;
9168         struct mlx5_devx_obj *obj;
9169         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9170         int ret = 0;
9171
9172         if (!geneve_opt_v)
9173                 return -1;
9174         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9175         if (geneve_opt_resource != NULL) {
9176                 if (geneve_opt_resource->option_class ==
9177                         geneve_opt_v->option_class &&
9178                         geneve_opt_resource->option_type ==
9179                         geneve_opt_v->option_type &&
9180                         geneve_opt_resource->length ==
9181                         geneve_opt_v->option_len) {
9182                         /* We already have GENEVE TLV option obj allocated. */
9183                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9184                                            __ATOMIC_RELAXED);
9185                 } else {
9186                         ret = rte_flow_error_set(error, ENOMEM,
9187                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9188                                 "Only one GENEVE TLV option supported");
9189                         goto exit;
9190                 }
9191         } else {
9192                 /* Create a GENEVE TLV object and resource. */
9193                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9194                                 geneve_opt_v->option_class,
9195                                 geneve_opt_v->option_type,
9196                                 geneve_opt_v->option_len);
9197                 if (!obj) {
9198                         ret = rte_flow_error_set(error, ENODATA,
9199                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9200                                 "Failed to create GENEVE TLV Devx object");
9201                         goto exit;
9202                 }
9203                 sh->geneve_tlv_option_resource =
9204                                 mlx5_malloc(MLX5_MEM_ZERO,
9205                                                 sizeof(*geneve_opt_resource),
9206                                                 0, SOCKET_ID_ANY);
9207                 if (!sh->geneve_tlv_option_resource) {
9208                         claim_zero(mlx5_devx_cmd_destroy(obj));
9209                         ret = rte_flow_error_set(error, ENOMEM,
9210                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9211                                 "GENEVE TLV object memory allocation failed");
9212                         goto exit;
9213                 }
9214                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9215                 geneve_opt_resource->obj = obj;
9216                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9217                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9218                 geneve_opt_resource->length = geneve_opt_v->option_len;
9219                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9220                                 __ATOMIC_RELAXED);
9221         }
9222 exit:
9223         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9224         return ret;
9225 }
9226
9227 /**
9228  * Add Geneve TLV option item to matcher.
9229  *
9230  * @param[in, out] dev
9231  *   Pointer to rte_eth_dev structure.
9232  * @param[in, out] matcher
9233  *   Flow matcher.
9234  * @param[in, out] key
9235  *   Flow matcher value.
9236  * @param[in] item
9237  *   Flow pattern to translate.
9238  * @param[out] error
9239  *   Pointer to error structure.
9240  */
9241 static int
9242 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9243                                   void *key, const struct rte_flow_item *item,
9244                                   struct rte_flow_error *error)
9245 {
9246         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9247         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9248         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9249         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9250         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9251                         misc_parameters_3);
9252         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9253         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9254         int ret = 0;
9255
9256         if (!geneve_opt_v)
9257                 return -1;
9258         if (!geneve_opt_m)
9259                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9260         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9261                                                            error);
9262         if (ret) {
9263                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9264                 return ret;
9265         }
9266         /*
9267          * Set the option length in GENEVE header if not requested.
9268          * The GENEVE TLV option length is expressed by the option length field
9269          * in the GENEVE header.
9270          * If the option length was not requested but the GENEVE TLV option item
9271          * is present we set the option length field implicitly.
9272          */
9273         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9274                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9275                          MLX5_GENEVE_OPTLEN_MASK);
9276                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9277                          geneve_opt_v->option_len + 1);
9278         }
9279         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9280         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9281         /* Set the data. */
9282         if (geneve_opt_v->data) {
9283                 memcpy(&opt_data_key, geneve_opt_v->data,
9284                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9285                                 sizeof(opt_data_key)));
9286                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9287                                 sizeof(opt_data_key));
9288                 memcpy(&opt_data_mask, geneve_opt_m->data,
9289                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9290                                 sizeof(opt_data_mask)));
9291                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9292                                 sizeof(opt_data_mask));
9293                 MLX5_SET(fte_match_set_misc3, misc3_m,
9294                                 geneve_tlv_option_0_data,
9295                                 rte_be_to_cpu_32(opt_data_mask));
9296                 MLX5_SET(fte_match_set_misc3, misc3_v,
9297                                 geneve_tlv_option_0_data,
9298                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9299         }
9300         return ret;
9301 }
9302
9303 /**
9304  * Add MPLS item to matcher and to the value.
9305  *
9306  * @param[in, out] matcher
9307  *   Flow matcher.
9308  * @param[in, out] key
9309  *   Flow matcher value.
9310  * @param[in] item
9311  *   Flow pattern to translate.
9312  * @param[in] prev_layer
9313  *   The protocol layer indicated in previous item.
9314  * @param[in] inner
9315  *   Item is inner pattern.
9316  */
9317 static void
9318 flow_dv_translate_item_mpls(void *matcher, void *key,
9319                             const struct rte_flow_item *item,
9320                             uint64_t prev_layer,
9321                             int inner)
9322 {
9323         const uint32_t *in_mpls_m = item->mask;
9324         const uint32_t *in_mpls_v = item->spec;
9325         uint32_t *out_mpls_m = 0;
9326         uint32_t *out_mpls_v = 0;
9327         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9328         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9329         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9330                                      misc_parameters_2);
9331         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9332         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9333         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9334
9335         switch (prev_layer) {
9336         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9337                 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9338                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9339                                  0xffff);
9340                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9341                                  MLX5_UDP_PORT_MPLS);
9342                 }
9343                 break;
9344         case MLX5_FLOW_LAYER_GRE:
9345                 /* Fall-through. */
9346         case MLX5_FLOW_LAYER_GRE_KEY:
9347                 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9348                         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9349                                  0xffff);
9350                         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9351                                  RTE_ETHER_TYPE_MPLS);
9352                 }
9353                 break;
9354         default:
9355                 break;
9356         }
9357         if (!in_mpls_v)
9358                 return;
9359         if (!in_mpls_m)
9360                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9361         switch (prev_layer) {
9362         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9363                 out_mpls_m =
9364                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9365                                                  outer_first_mpls_over_udp);
9366                 out_mpls_v =
9367                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9368                                                  outer_first_mpls_over_udp);
9369                 break;
9370         case MLX5_FLOW_LAYER_GRE:
9371                 out_mpls_m =
9372                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9373                                                  outer_first_mpls_over_gre);
9374                 out_mpls_v =
9375                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9376                                                  outer_first_mpls_over_gre);
9377                 break;
9378         default:
9379                 /* Inner MPLS not over GRE is not supported. */
9380                 if (!inner) {
9381                         out_mpls_m =
9382                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9383                                                          misc2_m,
9384                                                          outer_first_mpls);
9385                         out_mpls_v =
9386                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9387                                                          misc2_v,
9388                                                          outer_first_mpls);
9389                 }
9390                 break;
9391         }
9392         if (out_mpls_m && out_mpls_v) {
9393                 *out_mpls_m = *in_mpls_m;
9394                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9395         }
9396 }
9397
9398 /**
9399  * Add metadata register item to matcher
9400  *
9401  * @param[in, out] matcher
9402  *   Flow matcher.
9403  * @param[in, out] key
9404  *   Flow matcher value.
9405  * @param[in] reg_type
9406  *   Type of device metadata register
9407  * @param[in] value
9408  *   Register value
9409  * @param[in] mask
9410  *   Register mask
9411  */
9412 static void
9413 flow_dv_match_meta_reg(void *matcher, void *key,
9414                        enum modify_reg reg_type,
9415                        uint32_t data, uint32_t mask)
9416 {
9417         void *misc2_m =
9418                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9419         void *misc2_v =
9420                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9421         uint32_t temp;
9422
9423         data &= mask;
9424         switch (reg_type) {
9425         case REG_A:
9426                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9427                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9428                 break;
9429         case REG_B:
9430                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9431                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9432                 break;
9433         case REG_C_0:
9434                 /*
9435                  * The metadata register C0 field might be divided into
9436                  * source vport index and META item value, we should set
9437                  * this field according to specified mask, not as whole one.
9438                  */
9439                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9440                 temp |= mask;
9441                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9442                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9443                 temp &= ~mask;
9444                 temp |= data;
9445                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9446                 break;
9447         case REG_C_1:
9448                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9449                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9450                 break;
9451         case REG_C_2:
9452                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9453                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9454                 break;
9455         case REG_C_3:
9456                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9457                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9458                 break;
9459         case REG_C_4:
9460                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9461                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9462                 break;
9463         case REG_C_5:
9464                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9465                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9466                 break;
9467         case REG_C_6:
9468                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9469                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9470                 break;
9471         case REG_C_7:
9472                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9473                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9474                 break;
9475         default:
9476                 MLX5_ASSERT(false);
9477                 break;
9478         }
9479 }
9480
9481 /**
9482  * Add MARK item to matcher
9483  *
9484  * @param[in] dev
9485  *   The device to configure through.
9486  * @param[in, out] matcher
9487  *   Flow matcher.
9488  * @param[in, out] key
9489  *   Flow matcher value.
9490  * @param[in] item
9491  *   Flow pattern to translate.
9492  */
9493 static void
9494 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9495                             void *matcher, void *key,
9496                             const struct rte_flow_item *item)
9497 {
9498         struct mlx5_priv *priv = dev->data->dev_private;
9499         const struct rte_flow_item_mark *mark;
9500         uint32_t value;
9501         uint32_t mask;
9502
9503         mark = item->mask ? (const void *)item->mask :
9504                             &rte_flow_item_mark_mask;
9505         mask = mark->id & priv->sh->dv_mark_mask;
9506         mark = (const void *)item->spec;
9507         MLX5_ASSERT(mark);
9508         value = mark->id & priv->sh->dv_mark_mask & mask;
9509         if (mask) {
9510                 enum modify_reg reg;
9511
9512                 /* Get the metadata register index for the mark. */
9513                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9514                 MLX5_ASSERT(reg > 0);
9515                 if (reg == REG_C_0) {
9516                         struct mlx5_priv *priv = dev->data->dev_private;
9517                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9518                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9519
9520                         mask &= msk_c0;
9521                         mask <<= shl_c0;
9522                         value <<= shl_c0;
9523                 }
9524                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9525         }
9526 }
9527
9528 /**
9529  * Add META item to matcher
9530  *
9531  * @param[in] dev
9532  *   The devich to configure through.
9533  * @param[in, out] matcher
9534  *   Flow matcher.
9535  * @param[in, out] key
9536  *   Flow matcher value.
9537  * @param[in] attr
9538  *   Attributes of flow that includes this item.
9539  * @param[in] item
9540  *   Flow pattern to translate.
9541  */
9542 static void
9543 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9544                             void *matcher, void *key,
9545                             const struct rte_flow_attr *attr,
9546                             const struct rte_flow_item *item)
9547 {
9548         const struct rte_flow_item_meta *meta_m;
9549         const struct rte_flow_item_meta *meta_v;
9550
9551         meta_m = (const void *)item->mask;
9552         if (!meta_m)
9553                 meta_m = &rte_flow_item_meta_mask;
9554         meta_v = (const void *)item->spec;
9555         if (meta_v) {
9556                 int reg;
9557                 uint32_t value = meta_v->data;
9558                 uint32_t mask = meta_m->data;
9559
9560                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9561                 if (reg < 0)
9562                         return;
9563                 MLX5_ASSERT(reg != REG_NON);
9564                 if (reg == REG_C_0) {
9565                         struct mlx5_priv *priv = dev->data->dev_private;
9566                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9567                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9568
9569                         mask &= msk_c0;
9570                         mask <<= shl_c0;
9571                         value <<= shl_c0;
9572                 }
9573                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9574         }
9575 }
9576
9577 /**
9578  * Add vport metadata Reg C0 item to matcher
9579  *
9580  * @param[in, out] matcher
9581  *   Flow matcher.
9582  * @param[in, out] key
9583  *   Flow matcher value.
9584  * @param[in] reg
9585  *   Flow pattern to translate.
9586  */
9587 static void
9588 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9589                                   uint32_t value, uint32_t mask)
9590 {
9591         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9592 }
9593
9594 /**
9595  * Add tag item to matcher
9596  *
9597  * @param[in] dev
9598  *   The devich to configure through.
9599  * @param[in, out] matcher
9600  *   Flow matcher.
9601  * @param[in, out] key
9602  *   Flow matcher value.
9603  * @param[in] item
9604  *   Flow pattern to translate.
9605  */
9606 static void
9607 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9608                                 void *matcher, void *key,
9609                                 const struct rte_flow_item *item)
9610 {
9611         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9612         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9613         uint32_t mask, value;
9614
9615         MLX5_ASSERT(tag_v);
9616         value = tag_v->data;
9617         mask = tag_m ? tag_m->data : UINT32_MAX;
9618         if (tag_v->id == REG_C_0) {
9619                 struct mlx5_priv *priv = dev->data->dev_private;
9620                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9621                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9622
9623                 mask &= msk_c0;
9624                 mask <<= shl_c0;
9625                 value <<= shl_c0;
9626         }
9627         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9628 }
9629
9630 /**
9631  * Add TAG item to matcher
9632  *
9633  * @param[in] dev
9634  *   The devich to configure through.
9635  * @param[in, out] matcher
9636  *   Flow matcher.
9637  * @param[in, out] key
9638  *   Flow matcher value.
9639  * @param[in] item
9640  *   Flow pattern to translate.
9641  */
9642 static void
9643 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9644                            void *matcher, void *key,
9645                            const struct rte_flow_item *item)
9646 {
9647         const struct rte_flow_item_tag *tag_v = item->spec;
9648         const struct rte_flow_item_tag *tag_m = item->mask;
9649         enum modify_reg reg;
9650
9651         MLX5_ASSERT(tag_v);
9652         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9653         /* Get the metadata register index for the tag. */
9654         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9655         MLX5_ASSERT(reg > 0);
9656         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9657 }
9658
9659 /**
9660  * Add source vport match to the specified matcher.
9661  *
9662  * @param[in, out] matcher
9663  *   Flow matcher.
9664  * @param[in, out] key
9665  *   Flow matcher value.
9666  * @param[in] port
9667  *   Source vport value to match
9668  * @param[in] mask
9669  *   Mask
9670  */
9671 static void
9672 flow_dv_translate_item_source_vport(void *matcher, void *key,
9673                                     int16_t port, uint16_t mask)
9674 {
9675         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9676         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9677
9678         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9679         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9680 }
9681
9682 /**
9683  * Translate port-id item to eswitch match on  port-id.
9684  *
9685  * @param[in] dev
9686  *   The devich to configure through.
9687  * @param[in, out] matcher
9688  *   Flow matcher.
9689  * @param[in, out] key
9690  *   Flow matcher value.
9691  * @param[in] item
9692  *   Flow pattern to translate.
9693  * @param[in]
9694  *   Flow attributes.
9695  *
9696  * @return
9697  *   0 on success, a negative errno value otherwise.
9698  */
9699 static int
9700 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9701                                void *key, const struct rte_flow_item *item,
9702                                const struct rte_flow_attr *attr)
9703 {
9704         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9705         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9706         struct mlx5_priv *priv;
9707         uint16_t mask, id;
9708
9709         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9710                 flow_dv_translate_item_source_vport(matcher, key,
9711                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9712                 return 0;
9713         }
9714         mask = pid_m ? pid_m->id : 0xffff;
9715         id = pid_v ? pid_v->id : dev->data->port_id;
9716         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9717         if (!priv)
9718                 return -rte_errno;
9719         /*
9720          * Translate to vport field or to metadata, depending on mode.
9721          * Kernel can use either misc.source_port or half of C0 metadata
9722          * register.
9723          */
9724         if (priv->vport_meta_mask) {
9725                 /*
9726                  * Provide the hint for SW steering library
9727                  * to insert the flow into ingress domain and
9728                  * save the extra vport match.
9729                  */
9730                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9731                     priv->pf_bond < 0 && attr->transfer)
9732                         flow_dv_translate_item_source_vport
9733                                 (matcher, key, priv->vport_id, mask);
9734                 /*
9735                  * We should always set the vport metadata register,
9736                  * otherwise the SW steering library can drop
9737                  * the rule if wire vport metadata value is not zero,
9738                  * it depends on kernel configuration.
9739                  */
9740                 flow_dv_translate_item_meta_vport(matcher, key,
9741                                                   priv->vport_meta_tag,
9742                                                   priv->vport_meta_mask);
9743         } else {
9744                 flow_dv_translate_item_source_vport(matcher, key,
9745                                                     priv->vport_id, mask);
9746         }
9747         return 0;
9748 }
9749
9750 /**
9751  * Add ICMP6 item to matcher and to the value.
9752  *
9753  * @param[in, out] matcher
9754  *   Flow matcher.
9755  * @param[in, out] key
9756  *   Flow matcher value.
9757  * @param[in] item
9758  *   Flow pattern to translate.
9759  * @param[in] inner
9760  *   Item is inner pattern.
9761  */
9762 static void
9763 flow_dv_translate_item_icmp6(void *matcher, void *key,
9764                               const struct rte_flow_item *item,
9765                               int inner)
9766 {
9767         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9768         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9769         void *headers_m;
9770         void *headers_v;
9771         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9772                                      misc_parameters_3);
9773         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9774         if (inner) {
9775                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9776                                          inner_headers);
9777                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9778         } else {
9779                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9780                                          outer_headers);
9781                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9782         }
9783         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9784         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9785         if (!icmp6_v)
9786                 return;
9787         if (!icmp6_m)
9788                 icmp6_m = &rte_flow_item_icmp6_mask;
9789         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9790         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9791                  icmp6_v->type & icmp6_m->type);
9792         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9793         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9794                  icmp6_v->code & icmp6_m->code);
9795 }
9796
9797 /**
9798  * Add ICMP item to matcher and to the value.
9799  *
9800  * @param[in, out] matcher
9801  *   Flow matcher.
9802  * @param[in, out] key
9803  *   Flow matcher value.
9804  * @param[in] item
9805  *   Flow pattern to translate.
9806  * @param[in] inner
9807  *   Item is inner pattern.
9808  */
9809 static void
9810 flow_dv_translate_item_icmp(void *matcher, void *key,
9811                             const struct rte_flow_item *item,
9812                             int inner)
9813 {
9814         const struct rte_flow_item_icmp *icmp_m = item->mask;
9815         const struct rte_flow_item_icmp *icmp_v = item->spec;
9816         uint32_t icmp_header_data_m = 0;
9817         uint32_t icmp_header_data_v = 0;
9818         void *headers_m;
9819         void *headers_v;
9820         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9821                                      misc_parameters_3);
9822         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9823         if (inner) {
9824                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9825                                          inner_headers);
9826                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9827         } else {
9828                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9829                                          outer_headers);
9830                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9831         }
9832         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9833         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9834         if (!icmp_v)
9835                 return;
9836         if (!icmp_m)
9837                 icmp_m = &rte_flow_item_icmp_mask;
9838         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9839                  icmp_m->hdr.icmp_type);
9840         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9841                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9842         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9843                  icmp_m->hdr.icmp_code);
9844         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9845                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9846         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9847         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9848         if (icmp_header_data_m) {
9849                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9850                 icmp_header_data_v |=
9851                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9852                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9853                          icmp_header_data_m);
9854                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9855                          icmp_header_data_v & icmp_header_data_m);
9856         }
9857 }
9858
9859 /**
9860  * Add GTP item to matcher and to the value.
9861  *
9862  * @param[in, out] matcher
9863  *   Flow matcher.
9864  * @param[in, out] key
9865  *   Flow matcher value.
9866  * @param[in] item
9867  *   Flow pattern to translate.
9868  * @param[in] inner
9869  *   Item is inner pattern.
9870  */
9871 static void
9872 flow_dv_translate_item_gtp(void *matcher, void *key,
9873                            const struct rte_flow_item *item, int inner)
9874 {
9875         const struct rte_flow_item_gtp *gtp_m = item->mask;
9876         const struct rte_flow_item_gtp *gtp_v = item->spec;
9877         void *headers_m;
9878         void *headers_v;
9879         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9880                                      misc_parameters_3);
9881         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9882         uint16_t dport = RTE_GTPU_UDP_PORT;
9883
9884         if (inner) {
9885                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9886                                          inner_headers);
9887                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9888         } else {
9889                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9890                                          outer_headers);
9891                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9892         }
9893         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9894                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9895                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9896         }
9897         if (!gtp_v)
9898                 return;
9899         if (!gtp_m)
9900                 gtp_m = &rte_flow_item_gtp_mask;
9901         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9902                  gtp_m->v_pt_rsv_flags);
9903         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9904                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9905         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9906         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9907                  gtp_v->msg_type & gtp_m->msg_type);
9908         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9909                  rte_be_to_cpu_32(gtp_m->teid));
9910         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9911                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9912 }
9913
9914 /**
9915  * Add GTP PSC item to matcher.
9916  *
9917  * @param[in, out] matcher
9918  *   Flow matcher.
9919  * @param[in, out] key
9920  *   Flow matcher value.
9921  * @param[in] item
9922  *   Flow pattern to translate.
9923  */
9924 static int
9925 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9926                                const struct rte_flow_item *item)
9927 {
9928         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9929         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9930         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9931                         misc_parameters_3);
9932         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9933         union {
9934                 uint32_t w32;
9935                 struct {
9936                         uint16_t seq_num;
9937                         uint8_t npdu_num;
9938                         uint8_t next_ext_header_type;
9939                 };
9940         } dw_2;
9941         uint8_t gtp_flags;
9942
9943         /* Always set E-flag match on one, regardless of GTP item settings. */
9944         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9945         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9946         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9947         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9948         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9949         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9950         /*Set next extension header type. */
9951         dw_2.seq_num = 0;
9952         dw_2.npdu_num = 0;
9953         dw_2.next_ext_header_type = 0xff;
9954         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9955                  rte_cpu_to_be_32(dw_2.w32));
9956         dw_2.seq_num = 0;
9957         dw_2.npdu_num = 0;
9958         dw_2.next_ext_header_type = 0x85;
9959         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9960                  rte_cpu_to_be_32(dw_2.w32));
9961         if (gtp_psc_v) {
9962                 union {
9963                         uint32_t w32;
9964                         struct {
9965                                 uint8_t len;
9966                                 uint8_t type_flags;
9967                                 uint8_t qfi;
9968                                 uint8_t reserved;
9969                         };
9970                 } dw_0;
9971
9972                 /*Set extension header PDU type and Qos. */
9973                 if (!gtp_psc_m)
9974                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9975                 dw_0.w32 = 0;
9976                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9977                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9978                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9979                          rte_cpu_to_be_32(dw_0.w32));
9980                 dw_0.w32 = 0;
9981                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9982                                                         gtp_psc_m->hdr.type);
9983                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9984                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9985                          rte_cpu_to_be_32(dw_0.w32));
9986         }
9987         return 0;
9988 }
9989
9990 /**
9991  * Add eCPRI item to matcher and to the value.
9992  *
9993  * @param[in] dev
9994  *   The devich to configure through.
9995  * @param[in, out] matcher
9996  *   Flow matcher.
9997  * @param[in, out] key
9998  *   Flow matcher value.
9999  * @param[in] item
10000  *   Flow pattern to translate.
10001  * @param[in] last_item
10002  *   Last item flags.
10003  */
10004 static void
10005 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
10006                              void *key, const struct rte_flow_item *item,
10007                              uint64_t last_item)
10008 {
10009         struct mlx5_priv *priv = dev->data->dev_private;
10010         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10011         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10012         struct rte_ecpri_common_hdr common;
10013         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10014                                      misc_parameters_4);
10015         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10016         uint32_t *samples;
10017         void *dw_m;
10018         void *dw_v;
10019
10020         /*
10021          * In case of eCPRI over Ethernet, if EtherType is not specified,
10022          * match on eCPRI EtherType implicitly.
10023          */
10024         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10025                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10026
10027                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10028                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10029                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10030                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10031                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10032                         *(uint16_t *)l2m = UINT16_MAX;
10033                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10034                 }
10035         }
10036         if (!ecpri_v)
10037                 return;
10038         if (!ecpri_m)
10039                 ecpri_m = &rte_flow_item_ecpri_mask;
10040         /*
10041          * Maximal four DW samples are supported in a single matching now.
10042          * Two are used now for a eCPRI matching:
10043          * 1. Type: one byte, mask should be 0x00ff0000 in network order
10044          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10045          *    if any.
10046          */
10047         if (!ecpri_m->hdr.common.u32)
10048                 return;
10049         samples = priv->sh->ecpri_parser.ids;
10050         /* Need to take the whole DW as the mask to fill the entry. */
10051         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10052                             prog_sample_field_value_0);
10053         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10054                             prog_sample_field_value_0);
10055         /* Already big endian (network order) in the header. */
10056         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10057         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10058         /* Sample#0, used for matching type, offset 0. */
10059         MLX5_SET(fte_match_set_misc4, misc4_m,
10060                  prog_sample_field_id_0, samples[0]);
10061         /* It makes no sense to set the sample ID in the mask field. */
10062         MLX5_SET(fte_match_set_misc4, misc4_v,
10063                  prog_sample_field_id_0, samples[0]);
10064         /*
10065          * Checking if message body part needs to be matched.
10066          * Some wildcard rules only matching type field should be supported.
10067          */
10068         if (ecpri_m->hdr.dummy[0]) {
10069                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10070                 switch (common.type) {
10071                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10072                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10073                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10074                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10075                                             prog_sample_field_value_1);
10076                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10077                                             prog_sample_field_value_1);
10078                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10079                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10080                                             ecpri_m->hdr.dummy[0];
10081                         /* Sample#1, to match message body, offset 4. */
10082                         MLX5_SET(fte_match_set_misc4, misc4_m,
10083                                  prog_sample_field_id_1, samples[1]);
10084                         MLX5_SET(fte_match_set_misc4, misc4_v,
10085                                  prog_sample_field_id_1, samples[1]);
10086                         break;
10087                 default:
10088                         /* Others, do not match any sample ID. */
10089                         break;
10090                 }
10091         }
10092 }
10093
10094 /*
10095  * Add connection tracking status item to matcher
10096  *
10097  * @param[in] dev
10098  *   The devich to configure through.
10099  * @param[in, out] matcher
10100  *   Flow matcher.
10101  * @param[in, out] key
10102  *   Flow matcher value.
10103  * @param[in] item
10104  *   Flow pattern to translate.
10105  */
10106 static void
10107 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10108                               void *matcher, void *key,
10109                               const struct rte_flow_item *item)
10110 {
10111         uint32_t reg_value = 0;
10112         int reg_id;
10113         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10114         uint32_t reg_mask = 0;
10115         const struct rte_flow_item_conntrack *spec = item->spec;
10116         const struct rte_flow_item_conntrack *mask = item->mask;
10117         uint32_t flags;
10118         struct rte_flow_error error;
10119
10120         if (!mask)
10121                 mask = &rte_flow_item_conntrack_mask;
10122         if (!spec || !mask->flags)
10123                 return;
10124         flags = spec->flags & mask->flags;
10125         /* The conflict should be checked in the validation. */
10126         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10127                 reg_value |= MLX5_CT_SYNDROME_VALID;
10128         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10129                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10130         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10131                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10132         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10133                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10134         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10135                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10136         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10137                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10138                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10139                 reg_mask |= 0xc0;
10140         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10141                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10142         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10143                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10144         /* The REG_C_x value could be saved during startup. */
10145         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10146         if (reg_id == REG_NON)
10147                 return;
10148         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10149                                reg_value, reg_mask);
10150 }
10151
10152 static void
10153 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10154                             const struct rte_flow_item *item,
10155                             struct mlx5_flow *dev_flow, bool is_inner)
10156 {
10157         const struct rte_flow_item_flex *spec =
10158                 (const struct rte_flow_item_flex *)item->spec;
10159         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10160
10161         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10162         if (index < 0)
10163                 return;
10164         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10165                 /* Don't count both inner and outer flex items in one rule. */
10166                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10167                         MLX5_ASSERT(false);
10168                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10169         }
10170         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10171 }
10172
10173 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10174
10175 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10176         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10177                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10178
10179 /**
10180  * Calculate flow matcher enable bitmap.
10181  *
10182  * @param match_criteria
10183  *   Pointer to flow matcher criteria.
10184  *
10185  * @return
10186  *   Bitmap of enabled fields.
10187  */
10188 static uint8_t
10189 flow_dv_matcher_enable(uint32_t *match_criteria)
10190 {
10191         uint8_t match_criteria_enable;
10192
10193         match_criteria_enable =
10194                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10195                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10196         match_criteria_enable |=
10197                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10198                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10199         match_criteria_enable |=
10200                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10201                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10202         match_criteria_enable |=
10203                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10204                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10205         match_criteria_enable |=
10206                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10207                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10208         match_criteria_enable |=
10209                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10210                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10211         match_criteria_enable |=
10212                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10213                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10214         return match_criteria_enable;
10215 }
10216
10217 static void
10218 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10219 {
10220         /*
10221          * Check flow matching criteria first, subtract misc5/4 length if flow
10222          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10223          * misc5/4 are not supported, and matcher creation failure is expected
10224          * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10225          * misc5 is right after misc4.
10226          */
10227         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10228                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10229                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10230                 if (!(match_criteria & (1 <<
10231                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10232                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10233                 }
10234         }
10235 }
10236
10237 static struct mlx5_list_entry *
10238 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10239                          struct mlx5_list_entry *entry, void *cb_ctx)
10240 {
10241         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10242         struct mlx5_flow_dv_matcher *ref = ctx->data;
10243         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10244                                                             typeof(*tbl), tbl);
10245         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10246                                                             sizeof(*resource),
10247                                                             0, SOCKET_ID_ANY);
10248
10249         if (!resource) {
10250                 rte_flow_error_set(ctx->error, ENOMEM,
10251                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10252                                    "cannot create matcher");
10253                 return NULL;
10254         }
10255         memcpy(resource, entry, sizeof(*resource));
10256         resource->tbl = &tbl->tbl;
10257         return &resource->entry;
10258 }
10259
10260 static void
10261 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10262                              struct mlx5_list_entry *entry)
10263 {
10264         mlx5_free(entry);
10265 }
10266
10267 struct mlx5_list_entry *
10268 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10269 {
10270         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10271         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10272         struct rte_eth_dev *dev = ctx->dev;
10273         struct mlx5_flow_tbl_data_entry *tbl_data;
10274         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10275         struct rte_flow_error *error = ctx->error;
10276         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10277         struct mlx5_flow_tbl_resource *tbl;
10278         void *domain;
10279         uint32_t idx = 0;
10280         int ret;
10281
10282         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10283         if (!tbl_data) {
10284                 rte_flow_error_set(error, ENOMEM,
10285                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10286                                    NULL,
10287                                    "cannot allocate flow table data entry");
10288                 return NULL;
10289         }
10290         tbl_data->idx = idx;
10291         tbl_data->tunnel = tt_prm->tunnel;
10292         tbl_data->group_id = tt_prm->group_id;
10293         tbl_data->external = !!tt_prm->external;
10294         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10295         tbl_data->is_egress = !!key.is_egress;
10296         tbl_data->is_transfer = !!key.is_fdb;
10297         tbl_data->dummy = !!key.dummy;
10298         tbl_data->level = key.level;
10299         tbl_data->id = key.id;
10300         tbl = &tbl_data->tbl;
10301         if (key.dummy)
10302                 return &tbl_data->entry;
10303         if (key.is_fdb)
10304                 domain = sh->fdb_domain;
10305         else if (key.is_egress)
10306                 domain = sh->tx_domain;
10307         else
10308                 domain = sh->rx_domain;
10309         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10310         if (ret) {
10311                 rte_flow_error_set(error, ENOMEM,
10312                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10313                                    NULL, "cannot create flow table object");
10314                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10315                 return NULL;
10316         }
10317         if (key.level != 0) {
10318                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10319                                         (tbl->obj, &tbl_data->jump.action);
10320                 if (ret) {
10321                         rte_flow_error_set(error, ENOMEM,
10322                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10323                                            NULL,
10324                                            "cannot create flow jump action");
10325                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10326                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10327                         return NULL;
10328                 }
10329         }
10330         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10331               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10332               key.level, key.id);
10333         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10334                                               flow_dv_matcher_create_cb,
10335                                               flow_dv_matcher_match_cb,
10336                                               flow_dv_matcher_remove_cb,
10337                                               flow_dv_matcher_clone_cb,
10338                                               flow_dv_matcher_clone_free_cb);
10339         if (!tbl_data->matchers) {
10340                 rte_flow_error_set(error, ENOMEM,
10341                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10342                                    NULL,
10343                                    "cannot create tbl matcher list");
10344                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10345                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10346                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10347                 return NULL;
10348         }
10349         return &tbl_data->entry;
10350 }
10351
10352 int
10353 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10354                      void *cb_ctx)
10355 {
10356         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10357         struct mlx5_flow_tbl_data_entry *tbl_data =
10358                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10359         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10360
10361         return tbl_data->level != key.level ||
10362                tbl_data->id != key.id ||
10363                tbl_data->dummy != key.dummy ||
10364                tbl_data->is_transfer != !!key.is_fdb ||
10365                tbl_data->is_egress != !!key.is_egress;
10366 }
10367
10368 struct mlx5_list_entry *
10369 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10370                       void *cb_ctx)
10371 {
10372         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10373         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10374         struct mlx5_flow_tbl_data_entry *tbl_data;
10375         struct rte_flow_error *error = ctx->error;
10376         uint32_t idx = 0;
10377
10378         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10379         if (!tbl_data) {
10380                 rte_flow_error_set(error, ENOMEM,
10381                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10382                                    NULL,
10383                                    "cannot allocate flow table data entry");
10384                 return NULL;
10385         }
10386         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10387         tbl_data->idx = idx;
10388         return &tbl_data->entry;
10389 }
10390
10391 void
10392 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10393 {
10394         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10395         struct mlx5_flow_tbl_data_entry *tbl_data =
10396                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10397
10398         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10399 }
10400
10401 /**
10402  * Get a flow table.
10403  *
10404  * @param[in, out] dev
10405  *   Pointer to rte_eth_dev structure.
10406  * @param[in] table_level
10407  *   Table level to use.
10408  * @param[in] egress
10409  *   Direction of the table.
10410  * @param[in] transfer
10411  *   E-Switch or NIC flow.
10412  * @param[in] dummy
10413  *   Dummy entry for dv API.
10414  * @param[in] table_id
10415  *   Table id to use.
10416  * @param[out] error
10417  *   pointer to error structure.
10418  *
10419  * @return
10420  *   Returns tables resource based on the index, NULL in case of failed.
10421  */
10422 struct mlx5_flow_tbl_resource *
10423 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10424                          uint32_t table_level, uint8_t egress,
10425                          uint8_t transfer,
10426                          bool external,
10427                          const struct mlx5_flow_tunnel *tunnel,
10428                          uint32_t group_id, uint8_t dummy,
10429                          uint32_t table_id,
10430                          struct rte_flow_error *error)
10431 {
10432         struct mlx5_priv *priv = dev->data->dev_private;
10433         union mlx5_flow_tbl_key table_key = {
10434                 {
10435                         .level = table_level,
10436                         .id = table_id,
10437                         .reserved = 0,
10438                         .dummy = !!dummy,
10439                         .is_fdb = !!transfer,
10440                         .is_egress = !!egress,
10441                 }
10442         };
10443         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10444                 .tunnel = tunnel,
10445                 .group_id = group_id,
10446                 .external = external,
10447         };
10448         struct mlx5_flow_cb_ctx ctx = {
10449                 .dev = dev,
10450                 .error = error,
10451                 .data = &table_key.v64,
10452                 .data2 = &tt_prm,
10453         };
10454         struct mlx5_list_entry *entry;
10455         struct mlx5_flow_tbl_data_entry *tbl_data;
10456
10457         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10458         if (!entry) {
10459                 rte_flow_error_set(error, ENOMEM,
10460                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10461                                    "cannot get table");
10462                 return NULL;
10463         }
10464         DRV_LOG(DEBUG, "table_level %u table_id %u "
10465                 "tunnel %u group %u registered.",
10466                 table_level, table_id,
10467                 tunnel ? tunnel->tunnel_id : 0, group_id);
10468         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10469         return &tbl_data->tbl;
10470 }
10471
10472 void
10473 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10474 {
10475         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10476         struct mlx5_flow_tbl_data_entry *tbl_data =
10477                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10478
10479         MLX5_ASSERT(entry && sh);
10480         if (tbl_data->jump.action)
10481                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10482         if (tbl_data->tbl.obj)
10483                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10484         if (tbl_data->tunnel_offload && tbl_data->external) {
10485                 struct mlx5_list_entry *he;
10486                 struct mlx5_hlist *tunnel_grp_hash;
10487                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10488                 union tunnel_tbl_key tunnel_key = {
10489                         .tunnel_id = tbl_data->tunnel ?
10490                                         tbl_data->tunnel->tunnel_id : 0,
10491                         .group = tbl_data->group_id
10492                 };
10493                 uint32_t table_level = tbl_data->level;
10494                 struct mlx5_flow_cb_ctx ctx = {
10495                         .data = (void *)&tunnel_key.val,
10496                 };
10497
10498                 tunnel_grp_hash = tbl_data->tunnel ?
10499                                         tbl_data->tunnel->groups :
10500                                         thub->groups;
10501                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10502                 if (he)
10503                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10504                 DRV_LOG(DEBUG,
10505                         "table_level %u id %u tunnel %u group %u released.",
10506                         table_level,
10507                         tbl_data->id,
10508                         tbl_data->tunnel ?
10509                         tbl_data->tunnel->tunnel_id : 0,
10510                         tbl_data->group_id);
10511         }
10512         mlx5_list_destroy(tbl_data->matchers);
10513         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10514 }
10515
10516 /**
10517  * Release a flow table.
10518  *
10519  * @param[in] sh
10520  *   Pointer to device shared structure.
10521  * @param[in] tbl
10522  *   Table resource to be released.
10523  *
10524  * @return
10525  *   Returns 0 if table was released, else return 1;
10526  */
10527 static int
10528 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10529                              struct mlx5_flow_tbl_resource *tbl)
10530 {
10531         struct mlx5_flow_tbl_data_entry *tbl_data =
10532                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10533
10534         if (!tbl)
10535                 return 0;
10536         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10537 }
10538
10539 int
10540 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10541                          struct mlx5_list_entry *entry, void *cb_ctx)
10542 {
10543         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10544         struct mlx5_flow_dv_matcher *ref = ctx->data;
10545         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10546                                                         entry);
10547
10548         return cur->crc != ref->crc ||
10549                cur->priority != ref->priority ||
10550                memcmp((const void *)cur->mask.buf,
10551                       (const void *)ref->mask.buf, ref->mask.size);
10552 }
10553
10554 struct mlx5_list_entry *
10555 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10556 {
10557         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10558         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10559         struct mlx5_flow_dv_matcher *ref = ctx->data;
10560         struct mlx5_flow_dv_matcher *resource;
10561         struct mlx5dv_flow_matcher_attr dv_attr = {
10562                 .type = IBV_FLOW_ATTR_NORMAL,
10563                 .match_mask = (void *)&ref->mask,
10564         };
10565         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10566                                                             typeof(*tbl), tbl);
10567         int ret;
10568
10569         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10570                                SOCKET_ID_ANY);
10571         if (!resource) {
10572                 rte_flow_error_set(ctx->error, ENOMEM,
10573                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10574                                    "cannot create matcher");
10575                 return NULL;
10576         }
10577         *resource = *ref;
10578         dv_attr.match_criteria_enable =
10579                 flow_dv_matcher_enable(resource->mask.buf);
10580         __flow_dv_adjust_buf_size(&ref->mask.size,
10581                                   dv_attr.match_criteria_enable);
10582         dv_attr.priority = ref->priority;
10583         if (tbl->is_egress)
10584                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10585         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10586                                                tbl->tbl.obj,
10587                                                &resource->matcher_object);
10588         if (ret) {
10589                 mlx5_free(resource);
10590                 rte_flow_error_set(ctx->error, ENOMEM,
10591                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10592                                    "cannot create matcher");
10593                 return NULL;
10594         }
10595         return &resource->entry;
10596 }
10597
10598 /**
10599  * Register the flow matcher.
10600  *
10601  * @param[in, out] dev
10602  *   Pointer to rte_eth_dev structure.
10603  * @param[in, out] matcher
10604  *   Pointer to flow matcher.
10605  * @param[in, out] key
10606  *   Pointer to flow table key.
10607  * @parm[in, out] dev_flow
10608  *   Pointer to the dev_flow.
10609  * @param[out] error
10610  *   pointer to error structure.
10611  *
10612  * @return
10613  *   0 on success otherwise -errno and errno is set.
10614  */
10615 static int
10616 flow_dv_matcher_register(struct rte_eth_dev *dev,
10617                          struct mlx5_flow_dv_matcher *ref,
10618                          union mlx5_flow_tbl_key *key,
10619                          struct mlx5_flow *dev_flow,
10620                          const struct mlx5_flow_tunnel *tunnel,
10621                          uint32_t group_id,
10622                          struct rte_flow_error *error)
10623 {
10624         struct mlx5_list_entry *entry;
10625         struct mlx5_flow_dv_matcher *resource;
10626         struct mlx5_flow_tbl_resource *tbl;
10627         struct mlx5_flow_tbl_data_entry *tbl_data;
10628         struct mlx5_flow_cb_ctx ctx = {
10629                 .error = error,
10630                 .data = ref,
10631         };
10632         /**
10633          * tunnel offload API requires this registration for cases when
10634          * tunnel match rule was inserted before tunnel set rule.
10635          */
10636         tbl = flow_dv_tbl_resource_get(dev, key->level,
10637                                        key->is_egress, key->is_fdb,
10638                                        dev_flow->external, tunnel,
10639                                        group_id, 0, key->id, error);
10640         if (!tbl)
10641                 return -rte_errno;      /* No need to refill the error info */
10642         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10643         ref->tbl = tbl;
10644         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10645         if (!entry) {
10646                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10647                 return rte_flow_error_set(error, ENOMEM,
10648                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10649                                           "cannot allocate ref memory");
10650         }
10651         resource = container_of(entry, typeof(*resource), entry);
10652         dev_flow->handle->dvh.matcher = resource;
10653         return 0;
10654 }
10655
10656 struct mlx5_list_entry *
10657 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10658 {
10659         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10660         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10661         struct mlx5_flow_dv_tag_resource *entry;
10662         uint32_t idx = 0;
10663         int ret;
10664
10665         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10666         if (!entry) {
10667                 rte_flow_error_set(ctx->error, ENOMEM,
10668                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10669                                    "cannot allocate resource memory");
10670                 return NULL;
10671         }
10672         entry->idx = idx;
10673         entry->tag_id = *(uint32_t *)(ctx->data);
10674         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10675                                                   &entry->action);
10676         if (ret) {
10677                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10678                 rte_flow_error_set(ctx->error, ENOMEM,
10679                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10680                                    NULL, "cannot create action");
10681                 return NULL;
10682         }
10683         return &entry->entry;
10684 }
10685
10686 int
10687 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10688                      void *cb_ctx)
10689 {
10690         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10691         struct mlx5_flow_dv_tag_resource *tag =
10692                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10693
10694         return *(uint32_t *)(ctx->data) != tag->tag_id;
10695 }
10696
10697 struct mlx5_list_entry *
10698 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10699                      void *cb_ctx)
10700 {
10701         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10702         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10703         struct mlx5_flow_dv_tag_resource *entry;
10704         uint32_t idx = 0;
10705
10706         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10707         if (!entry) {
10708                 rte_flow_error_set(ctx->error, ENOMEM,
10709                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10710                                    "cannot allocate tag resource memory");
10711                 return NULL;
10712         }
10713         memcpy(entry, oentry, sizeof(*entry));
10714         entry->idx = idx;
10715         return &entry->entry;
10716 }
10717
10718 void
10719 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10720 {
10721         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10722         struct mlx5_flow_dv_tag_resource *tag =
10723                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10724
10725         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10726 }
10727
10728 /**
10729  * Find existing tag resource or create and register a new one.
10730  *
10731  * @param dev[in, out]
10732  *   Pointer to rte_eth_dev structure.
10733  * @param[in, out] tag_be24
10734  *   Tag value in big endian then R-shift 8.
10735  * @parm[in, out] dev_flow
10736  *   Pointer to the dev_flow.
10737  * @param[out] error
10738  *   pointer to error structure.
10739  *
10740  * @return
10741  *   0 on success otherwise -errno and errno is set.
10742  */
10743 static int
10744 flow_dv_tag_resource_register
10745                         (struct rte_eth_dev *dev,
10746                          uint32_t tag_be24,
10747                          struct mlx5_flow *dev_flow,
10748                          struct rte_flow_error *error)
10749 {
10750         struct mlx5_priv *priv = dev->data->dev_private;
10751         struct mlx5_flow_dv_tag_resource *resource;
10752         struct mlx5_list_entry *entry;
10753         struct mlx5_flow_cb_ctx ctx = {
10754                                         .error = error,
10755                                         .data = &tag_be24,
10756                                         };
10757         struct mlx5_hlist *tag_table;
10758
10759         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10760                                       "tags",
10761                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10762                                       false, false, priv->sh,
10763                                       flow_dv_tag_create_cb,
10764                                       flow_dv_tag_match_cb,
10765                                       flow_dv_tag_remove_cb,
10766                                       flow_dv_tag_clone_cb,
10767                                       flow_dv_tag_clone_free_cb);
10768         if (unlikely(!tag_table))
10769                 return -rte_errno;
10770         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10771         if (entry) {
10772                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10773                                         entry);
10774                 dev_flow->handle->dvh.rix_tag = resource->idx;
10775                 dev_flow->dv.tag_resource = resource;
10776                 return 0;
10777         }
10778         return -rte_errno;
10779 }
10780
10781 void
10782 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10783 {
10784         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10785         struct mlx5_flow_dv_tag_resource *tag =
10786                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10787
10788         MLX5_ASSERT(tag && sh && tag->action);
10789         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10790         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10791         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10792 }
10793
10794 /**
10795  * Release the tag.
10796  *
10797  * @param dev
10798  *   Pointer to Ethernet device.
10799  * @param tag_idx
10800  *   Tag index.
10801  *
10802  * @return
10803  *   1 while a reference on it exists, 0 when freed.
10804  */
10805 static int
10806 flow_dv_tag_release(struct rte_eth_dev *dev,
10807                     uint32_t tag_idx)
10808 {
10809         struct mlx5_priv *priv = dev->data->dev_private;
10810         struct mlx5_flow_dv_tag_resource *tag;
10811
10812         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10813         if (!tag)
10814                 return 0;
10815         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10816                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10817         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10818 }
10819
10820 /**
10821  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10822  *
10823  * @param[in] dev
10824  *   Pointer to rte_eth_dev structure.
10825  * @param[in] action
10826  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10827  * @param[out] dst_port_id
10828  *   The target port ID.
10829  * @param[out] error
10830  *   Pointer to the error structure.
10831  *
10832  * @return
10833  *   0 on success, a negative errno value otherwise and rte_errno is set.
10834  */
10835 static int
10836 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10837                                  const struct rte_flow_action *action,
10838                                  uint32_t *dst_port_id,
10839                                  struct rte_flow_error *error)
10840 {
10841         uint32_t port;
10842         struct mlx5_priv *priv;
10843
10844         switch (action->type) {
10845         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10846                 const struct rte_flow_action_port_id *conf;
10847
10848                 conf = (const struct rte_flow_action_port_id *)action->conf;
10849                 port = conf->original ? dev->data->port_id : conf->id;
10850                 break;
10851         }
10852         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10853                 const struct rte_flow_action_ethdev *ethdev;
10854
10855                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10856                 port = ethdev->port_id;
10857                 break;
10858         }
10859         default:
10860                 MLX5_ASSERT(false);
10861                 return rte_flow_error_set(error, EINVAL,
10862                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10863                                           "unknown E-Switch action");
10864         }
10865
10866         priv = mlx5_port_to_eswitch_info(port, false);
10867         if (!priv)
10868                 return rte_flow_error_set(error, -rte_errno,
10869                                           RTE_FLOW_ERROR_TYPE_ACTION,
10870                                           NULL,
10871                                           "No eswitch info was found for port");
10872 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10873         /*
10874          * This parameter is transferred to
10875          * mlx5dv_dr_action_create_dest_ib_port().
10876          */
10877         *dst_port_id = priv->dev_port;
10878 #else
10879         /*
10880          * Legacy mode, no LAG configurations is supported.
10881          * This parameter is transferred to
10882          * mlx5dv_dr_action_create_dest_vport().
10883          */
10884         *dst_port_id = priv->vport_id;
10885 #endif
10886         return 0;
10887 }
10888
10889 /**
10890  * Create a counter with aging configuration.
10891  *
10892  * @param[in] dev
10893  *   Pointer to rte_eth_dev structure.
10894  * @param[in] dev_flow
10895  *   Pointer to the mlx5_flow.
10896  * @param[out] count
10897  *   Pointer to the counter action configuration.
10898  * @param[in] age
10899  *   Pointer to the aging action configuration.
10900  *
10901  * @return
10902  *   Index to flow counter on success, 0 otherwise.
10903  */
10904 static uint32_t
10905 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10906                                 struct mlx5_flow *dev_flow,
10907                                 const struct rte_flow_action_count *count
10908                                         __rte_unused,
10909                                 const struct rte_flow_action_age *age)
10910 {
10911         uint32_t counter;
10912         struct mlx5_age_param *age_param;
10913
10914         counter = flow_dv_counter_alloc(dev, !!age);
10915         if (!counter || age == NULL)
10916                 return counter;
10917         age_param = flow_dv_counter_idx_get_age(dev, counter);
10918         age_param->context = age->context ? age->context :
10919                 (void *)(uintptr_t)(dev_flow->flow_idx);
10920         age_param->timeout = age->timeout;
10921         age_param->port_id = dev->data->port_id;
10922         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10923         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10924         return counter;
10925 }
10926
10927 /**
10928  * Add Tx queue matcher
10929  *
10930  * @param[in] dev
10931  *   Pointer to the dev struct.
10932  * @param[in, out] matcher
10933  *   Flow matcher.
10934  * @param[in, out] key
10935  *   Flow matcher value.
10936  * @param[in] item
10937  *   Flow pattern to translate.
10938  * @param[in] inner
10939  *   Item is inner pattern.
10940  */
10941 static void
10942 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10943                                 void *matcher, void *key,
10944                                 const struct rte_flow_item *item)
10945 {
10946         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10947         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10948         void *misc_m =
10949                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10950         void *misc_v =
10951                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10952         struct mlx5_txq_ctrl *txq;
10953         uint32_t queue, mask;
10954
10955         queue_m = (const void *)item->mask;
10956         queue_v = (const void *)item->spec;
10957         if (!queue_v)
10958                 return;
10959         txq = mlx5_txq_get(dev, queue_v->queue);
10960         if (!txq)
10961                 return;
10962         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10963                 queue = txq->obj->sq->id;
10964         else
10965                 queue = txq->obj->sq_obj.sq->id;
10966         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10967         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10968         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10969         mlx5_txq_release(dev, queue_v->queue);
10970 }
10971
10972 /**
10973  * Set the hash fields according to the @p flow information.
10974  *
10975  * @param[in] dev_flow
10976  *   Pointer to the mlx5_flow.
10977  * @param[in] rss_desc
10978  *   Pointer to the mlx5_flow_rss_desc.
10979  */
10980 static void
10981 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10982                        struct mlx5_flow_rss_desc *rss_desc)
10983 {
10984         uint64_t items = dev_flow->handle->layers;
10985         int rss_inner = 0;
10986         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10987
10988         dev_flow->hash_fields = 0;
10989 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10990         if (rss_desc->level >= 2)
10991                 rss_inner = 1;
10992 #endif
10993         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10994             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10995                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10996                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10997                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10998                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10999                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
11000                         else
11001                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
11002                 }
11003         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
11004                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
11005                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
11006                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11007                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
11008                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11009                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
11010                         else
11011                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
11012                 }
11013         }
11014         if (dev_flow->hash_fields == 0)
11015                 /*
11016                  * There is no match between the RSS types and the
11017                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
11018                  */
11019                 return;
11020         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11021             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
11022                 if (rss_types & RTE_ETH_RSS_UDP) {
11023                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11024                                 dev_flow->hash_fields |=
11025                                                 IBV_RX_HASH_SRC_PORT_UDP;
11026                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11027                                 dev_flow->hash_fields |=
11028                                                 IBV_RX_HASH_DST_PORT_UDP;
11029                         else
11030                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
11031                 }
11032         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11033                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
11034                 if (rss_types & RTE_ETH_RSS_TCP) {
11035                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11036                                 dev_flow->hash_fields |=
11037                                                 IBV_RX_HASH_SRC_PORT_TCP;
11038                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11039                                 dev_flow->hash_fields |=
11040                                                 IBV_RX_HASH_DST_PORT_TCP;
11041                         else
11042                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
11043                 }
11044         }
11045         if (rss_inner)
11046                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
11047 }
11048
11049 /**
11050  * Prepare an Rx Hash queue.
11051  *
11052  * @param dev
11053  *   Pointer to Ethernet device.
11054  * @param[in] dev_flow
11055  *   Pointer to the mlx5_flow.
11056  * @param[in] rss_desc
11057  *   Pointer to the mlx5_flow_rss_desc.
11058  * @param[out] hrxq_idx
11059  *   Hash Rx queue index.
11060  *
11061  * @return
11062  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11063  */
11064 static struct mlx5_hrxq *
11065 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11066                      struct mlx5_flow *dev_flow,
11067                      struct mlx5_flow_rss_desc *rss_desc,
11068                      uint32_t *hrxq_idx)
11069 {
11070         struct mlx5_priv *priv = dev->data->dev_private;
11071         struct mlx5_flow_handle *dh = dev_flow->handle;
11072         struct mlx5_hrxq *hrxq;
11073
11074         MLX5_ASSERT(rss_desc->queue_num);
11075         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11076         rss_desc->hash_fields = dev_flow->hash_fields;
11077         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11078         rss_desc->shared_rss = 0;
11079         if (rss_desc->hash_fields == 0)
11080                 rss_desc->queue_num = 1;
11081         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11082         if (!*hrxq_idx)
11083                 return NULL;
11084         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11085                               *hrxq_idx);
11086         return hrxq;
11087 }
11088
11089 /**
11090  * Release sample sub action resource.
11091  *
11092  * @param[in, out] dev
11093  *   Pointer to rte_eth_dev structure.
11094  * @param[in] act_res
11095  *   Pointer to sample sub action resource.
11096  */
11097 static void
11098 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11099                                    struct mlx5_flow_sub_actions_idx *act_res)
11100 {
11101         if (act_res->rix_hrxq) {
11102                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11103                 act_res->rix_hrxq = 0;
11104         }
11105         if (act_res->rix_encap_decap) {
11106                 flow_dv_encap_decap_resource_release(dev,
11107                                                      act_res->rix_encap_decap);
11108                 act_res->rix_encap_decap = 0;
11109         }
11110         if (act_res->rix_port_id_action) {
11111                 flow_dv_port_id_action_resource_release(dev,
11112                                                 act_res->rix_port_id_action);
11113                 act_res->rix_port_id_action = 0;
11114         }
11115         if (act_res->rix_tag) {
11116                 flow_dv_tag_release(dev, act_res->rix_tag);
11117                 act_res->rix_tag = 0;
11118         }
11119         if (act_res->rix_jump) {
11120                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11121                 act_res->rix_jump = 0;
11122         }
11123 }
11124
11125 int
11126 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11127                         struct mlx5_list_entry *entry, void *cb_ctx)
11128 {
11129         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11130         struct rte_eth_dev *dev = ctx->dev;
11131         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11132         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11133                                                               typeof(*resource),
11134                                                               entry);
11135
11136         if (ctx_resource->ratio == resource->ratio &&
11137             ctx_resource->ft_type == resource->ft_type &&
11138             ctx_resource->ft_id == resource->ft_id &&
11139             ctx_resource->set_action == resource->set_action &&
11140             !memcmp((void *)&ctx_resource->sample_act,
11141                     (void *)&resource->sample_act,
11142                     sizeof(struct mlx5_flow_sub_actions_list))) {
11143                 /*
11144                  * Existing sample action should release the prepared
11145                  * sub-actions reference counter.
11146                  */
11147                 flow_dv_sample_sub_actions_release(dev,
11148                                                    &ctx_resource->sample_idx);
11149                 return 0;
11150         }
11151         return 1;
11152 }
11153
11154 struct mlx5_list_entry *
11155 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11156 {
11157         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11158         struct rte_eth_dev *dev = ctx->dev;
11159         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11160         void **sample_dv_actions = ctx_resource->sub_actions;
11161         struct mlx5_flow_dv_sample_resource *resource;
11162         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11163         struct mlx5_priv *priv = dev->data->dev_private;
11164         struct mlx5_dev_ctx_shared *sh = priv->sh;
11165         struct mlx5_flow_tbl_resource *tbl;
11166         uint32_t idx = 0;
11167         const uint32_t next_ft_step = 1;
11168         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11169         uint8_t is_egress = 0;
11170         uint8_t is_transfer = 0;
11171         struct rte_flow_error *error = ctx->error;
11172
11173         /* Register new sample resource. */
11174         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11175         if (!resource) {
11176                 rte_flow_error_set(error, ENOMEM,
11177                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11178                                           NULL,
11179                                           "cannot allocate resource memory");
11180                 return NULL;
11181         }
11182         *resource = *ctx_resource;
11183         /* Create normal path table level */
11184         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11185                 is_transfer = 1;
11186         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11187                 is_egress = 1;
11188         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11189                                         is_egress, is_transfer,
11190                                         true, NULL, 0, 0, 0, error);
11191         if (!tbl) {
11192                 rte_flow_error_set(error, ENOMEM,
11193                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11194                                           NULL,
11195                                           "fail to create normal path table "
11196                                           "for sample");
11197                 goto error;
11198         }
11199         resource->normal_path_tbl = tbl;
11200         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11201                 if (!sh->default_miss_action) {
11202                         rte_flow_error_set(error, ENOMEM,
11203                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11204                                                 NULL,
11205                                                 "default miss action was not "
11206                                                 "created");
11207                         goto error;
11208                 }
11209                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11210                                                 sh->default_miss_action;
11211         }
11212         /* Create a DR sample action */
11213         sampler_attr.sample_ratio = resource->ratio;
11214         sampler_attr.default_next_table = tbl->obj;
11215         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11216         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11217                                                         &sample_dv_actions[0];
11218         sampler_attr.action = resource->set_action;
11219         if (mlx5_os_flow_dr_create_flow_action_sampler
11220                         (&sampler_attr, &resource->verbs_action)) {
11221                 rte_flow_error_set(error, ENOMEM,
11222                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11223                                         NULL, "cannot create sample action");
11224                 goto error;
11225         }
11226         resource->idx = idx;
11227         resource->dev = dev;
11228         return &resource->entry;
11229 error:
11230         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11231                 flow_dv_sample_sub_actions_release(dev,
11232                                                    &resource->sample_idx);
11233         if (resource->normal_path_tbl)
11234                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11235                                 resource->normal_path_tbl);
11236         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11237         return NULL;
11238
11239 }
11240
11241 struct mlx5_list_entry *
11242 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11243                          struct mlx5_list_entry *entry __rte_unused,
11244                          void *cb_ctx)
11245 {
11246         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11247         struct rte_eth_dev *dev = ctx->dev;
11248         struct mlx5_flow_dv_sample_resource *resource;
11249         struct mlx5_priv *priv = dev->data->dev_private;
11250         struct mlx5_dev_ctx_shared *sh = priv->sh;
11251         uint32_t idx = 0;
11252
11253         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11254         if (!resource) {
11255                 rte_flow_error_set(ctx->error, ENOMEM,
11256                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11257                                           NULL,
11258                                           "cannot allocate resource memory");
11259                 return NULL;
11260         }
11261         memcpy(resource, entry, sizeof(*resource));
11262         resource->idx = idx;
11263         resource->dev = dev;
11264         return &resource->entry;
11265 }
11266
11267 void
11268 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11269                              struct mlx5_list_entry *entry)
11270 {
11271         struct mlx5_flow_dv_sample_resource *resource =
11272                                   container_of(entry, typeof(*resource), entry);
11273         struct rte_eth_dev *dev = resource->dev;
11274         struct mlx5_priv *priv = dev->data->dev_private;
11275
11276         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11277 }
11278
11279 /**
11280  * Find existing sample resource or create and register a new one.
11281  *
11282  * @param[in, out] dev
11283  *   Pointer to rte_eth_dev structure.
11284  * @param[in] ref
11285  *   Pointer to sample resource reference.
11286  * @parm[in, out] dev_flow
11287  *   Pointer to the dev_flow.
11288  * @param[out] error
11289  *   pointer to error structure.
11290  *
11291  * @return
11292  *   0 on success otherwise -errno and errno is set.
11293  */
11294 static int
11295 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11296                          struct mlx5_flow_dv_sample_resource *ref,
11297                          struct mlx5_flow *dev_flow,
11298                          struct rte_flow_error *error)
11299 {
11300         struct mlx5_flow_dv_sample_resource *resource;
11301         struct mlx5_list_entry *entry;
11302         struct mlx5_priv *priv = dev->data->dev_private;
11303         struct mlx5_flow_cb_ctx ctx = {
11304                 .dev = dev,
11305                 .error = error,
11306                 .data = ref,
11307         };
11308
11309         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11310         if (!entry)
11311                 return -rte_errno;
11312         resource = container_of(entry, typeof(*resource), entry);
11313         dev_flow->handle->dvh.rix_sample = resource->idx;
11314         dev_flow->dv.sample_res = resource;
11315         return 0;
11316 }
11317
11318 int
11319 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11320                             struct mlx5_list_entry *entry, void *cb_ctx)
11321 {
11322         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11323         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11324         struct rte_eth_dev *dev = ctx->dev;
11325         struct mlx5_flow_dv_dest_array_resource *resource =
11326                                   container_of(entry, typeof(*resource), entry);
11327         uint32_t idx = 0;
11328
11329         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11330             ctx_resource->ft_type == resource->ft_type &&
11331             !memcmp((void *)resource->sample_act,
11332                     (void *)ctx_resource->sample_act,
11333                    (ctx_resource->num_of_dest *
11334                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11335                 /*
11336                  * Existing sample action should release the prepared
11337                  * sub-actions reference counter.
11338                  */
11339                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11340                         flow_dv_sample_sub_actions_release(dev,
11341                                         &ctx_resource->sample_idx[idx]);
11342                 return 0;
11343         }
11344         return 1;
11345 }
11346
11347 struct mlx5_list_entry *
11348 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11349 {
11350         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11351         struct rte_eth_dev *dev = ctx->dev;
11352         struct mlx5_flow_dv_dest_array_resource *resource;
11353         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11354         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11355         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11356         struct mlx5_priv *priv = dev->data->dev_private;
11357         struct mlx5_dev_ctx_shared *sh = priv->sh;
11358         struct mlx5_flow_sub_actions_list *sample_act;
11359         struct mlx5dv_dr_domain *domain;
11360         uint32_t idx = 0, res_idx = 0;
11361         struct rte_flow_error *error = ctx->error;
11362         uint64_t action_flags;
11363         int ret;
11364
11365         /* Register new destination array resource. */
11366         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11367                                             &res_idx);
11368         if (!resource) {
11369                 rte_flow_error_set(error, ENOMEM,
11370                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11371                                           NULL,
11372                                           "cannot allocate resource memory");
11373                 return NULL;
11374         }
11375         *resource = *ctx_resource;
11376         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11377                 domain = sh->fdb_domain;
11378         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11379                 domain = sh->rx_domain;
11380         else
11381                 domain = sh->tx_domain;
11382         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11383                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11384                                  mlx5_malloc(MLX5_MEM_ZERO,
11385                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11386                                  0, SOCKET_ID_ANY);
11387                 if (!dest_attr[idx]) {
11388                         rte_flow_error_set(error, ENOMEM,
11389                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11390                                            NULL,
11391                                            "cannot allocate resource memory");
11392                         goto error;
11393                 }
11394                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11395                 sample_act = &ctx_resource->sample_act[idx];
11396                 action_flags = sample_act->action_flags;
11397                 switch (action_flags) {
11398                 case MLX5_FLOW_ACTION_QUEUE:
11399                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11400                         break;
11401                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11402                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11403                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11404                         dest_attr[idx]->dest_reformat->reformat =
11405                                         sample_act->dr_encap_action;
11406                         dest_attr[idx]->dest_reformat->dest =
11407                                         sample_act->dr_port_id_action;
11408                         break;
11409                 case MLX5_FLOW_ACTION_PORT_ID:
11410                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11411                         break;
11412                 case MLX5_FLOW_ACTION_JUMP:
11413                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11414                         break;
11415                 default:
11416                         rte_flow_error_set(error, EINVAL,
11417                                            RTE_FLOW_ERROR_TYPE_ACTION,
11418                                            NULL,
11419                                            "unsupported actions type");
11420                         goto error;
11421                 }
11422         }
11423         /* create a dest array action */
11424         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11425                                                 (domain,
11426                                                  resource->num_of_dest,
11427                                                  dest_attr,
11428                                                  &resource->action);
11429         if (ret) {
11430                 rte_flow_error_set(error, ENOMEM,
11431                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11432                                    NULL,
11433                                    "cannot create destination array action");
11434                 goto error;
11435         }
11436         resource->idx = res_idx;
11437         resource->dev = dev;
11438         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11439                 mlx5_free(dest_attr[idx]);
11440         return &resource->entry;
11441 error:
11442         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11443                 flow_dv_sample_sub_actions_release(dev,
11444                                                    &resource->sample_idx[idx]);
11445                 if (dest_attr[idx])
11446                         mlx5_free(dest_attr[idx]);
11447         }
11448         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11449         return NULL;
11450 }
11451
11452 struct mlx5_list_entry *
11453 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11454                             struct mlx5_list_entry *entry __rte_unused,
11455                             void *cb_ctx)
11456 {
11457         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11458         struct rte_eth_dev *dev = ctx->dev;
11459         struct mlx5_flow_dv_dest_array_resource *resource;
11460         struct mlx5_priv *priv = dev->data->dev_private;
11461         struct mlx5_dev_ctx_shared *sh = priv->sh;
11462         uint32_t res_idx = 0;
11463         struct rte_flow_error *error = ctx->error;
11464
11465         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11466                                       &res_idx);
11467         if (!resource) {
11468                 rte_flow_error_set(error, ENOMEM,
11469                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11470                                           NULL,
11471                                           "cannot allocate dest-array memory");
11472                 return NULL;
11473         }
11474         memcpy(resource, entry, sizeof(*resource));
11475         resource->idx = res_idx;
11476         resource->dev = dev;
11477         return &resource->entry;
11478 }
11479
11480 void
11481 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11482                                  struct mlx5_list_entry *entry)
11483 {
11484         struct mlx5_flow_dv_dest_array_resource *resource =
11485                         container_of(entry, typeof(*resource), entry);
11486         struct rte_eth_dev *dev = resource->dev;
11487         struct mlx5_priv *priv = dev->data->dev_private;
11488
11489         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11490 }
11491
11492 /**
11493  * Find existing destination array resource or create and register a new one.
11494  *
11495  * @param[in, out] dev
11496  *   Pointer to rte_eth_dev structure.
11497  * @param[in] ref
11498  *   Pointer to destination array resource reference.
11499  * @parm[in, out] dev_flow
11500  *   Pointer to the dev_flow.
11501  * @param[out] error
11502  *   pointer to error structure.
11503  *
11504  * @return
11505  *   0 on success otherwise -errno and errno is set.
11506  */
11507 static int
11508 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11509                          struct mlx5_flow_dv_dest_array_resource *ref,
11510                          struct mlx5_flow *dev_flow,
11511                          struct rte_flow_error *error)
11512 {
11513         struct mlx5_flow_dv_dest_array_resource *resource;
11514         struct mlx5_priv *priv = dev->data->dev_private;
11515         struct mlx5_list_entry *entry;
11516         struct mlx5_flow_cb_ctx ctx = {
11517                 .dev = dev,
11518                 .error = error,
11519                 .data = ref,
11520         };
11521
11522         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11523         if (!entry)
11524                 return -rte_errno;
11525         resource = container_of(entry, typeof(*resource), entry);
11526         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11527         dev_flow->dv.dest_array_res = resource;
11528         return 0;
11529 }
11530
11531 /**
11532  * Convert Sample action to DV specification.
11533  *
11534  * @param[in] dev
11535  *   Pointer to rte_eth_dev structure.
11536  * @param[in] action
11537  *   Pointer to sample action structure.
11538  * @param[in, out] dev_flow
11539  *   Pointer to the mlx5_flow.
11540  * @param[in] attr
11541  *   Pointer to the flow attributes.
11542  * @param[in, out] num_of_dest
11543  *   Pointer to the num of destination.
11544  * @param[in, out] sample_actions
11545  *   Pointer to sample actions list.
11546  * @param[in, out] res
11547  *   Pointer to sample resource.
11548  * @param[out] error
11549  *   Pointer to the error structure.
11550  *
11551  * @return
11552  *   0 on success, a negative errno value otherwise and rte_errno is set.
11553  */
11554 static int
11555 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11556                                 const struct rte_flow_action_sample *action,
11557                                 struct mlx5_flow *dev_flow,
11558                                 const struct rte_flow_attr *attr,
11559                                 uint32_t *num_of_dest,
11560                                 void **sample_actions,
11561                                 struct mlx5_flow_dv_sample_resource *res,
11562                                 struct rte_flow_error *error)
11563 {
11564         struct mlx5_priv *priv = dev->data->dev_private;
11565         const struct rte_flow_action *sub_actions;
11566         struct mlx5_flow_sub_actions_list *sample_act;
11567         struct mlx5_flow_sub_actions_idx *sample_idx;
11568         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11569         struct rte_flow *flow = dev_flow->flow;
11570         struct mlx5_flow_rss_desc *rss_desc;
11571         uint64_t action_flags = 0;
11572
11573         MLX5_ASSERT(wks);
11574         rss_desc = &wks->rss_desc;
11575         sample_act = &res->sample_act;
11576         sample_idx = &res->sample_idx;
11577         res->ratio = action->ratio;
11578         sub_actions = action->actions;
11579         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11580                 int type = sub_actions->type;
11581                 uint32_t pre_rix = 0;
11582                 void *pre_r;
11583                 switch (type) {
11584                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11585                 {
11586                         const struct rte_flow_action_queue *queue;
11587                         struct mlx5_hrxq *hrxq;
11588                         uint32_t hrxq_idx;
11589
11590                         queue = sub_actions->conf;
11591                         rss_desc->queue_num = 1;
11592                         rss_desc->queue[0] = queue->index;
11593                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11594                                                     rss_desc, &hrxq_idx);
11595                         if (!hrxq)
11596                                 return rte_flow_error_set
11597                                         (error, rte_errno,
11598                                          RTE_FLOW_ERROR_TYPE_ACTION,
11599                                          NULL,
11600                                          "cannot create fate queue");
11601                         sample_act->dr_queue_action = hrxq->action;
11602                         sample_idx->rix_hrxq = hrxq_idx;
11603                         sample_actions[sample_act->actions_num++] =
11604                                                 hrxq->action;
11605                         (*num_of_dest)++;
11606                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11607                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11608                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11609                         dev_flow->handle->fate_action =
11610                                         MLX5_FLOW_FATE_QUEUE;
11611                         break;
11612                 }
11613                 case RTE_FLOW_ACTION_TYPE_RSS:
11614                 {
11615                         struct mlx5_hrxq *hrxq;
11616                         uint32_t hrxq_idx;
11617                         const struct rte_flow_action_rss *rss;
11618                         const uint8_t *rss_key;
11619
11620                         rss = sub_actions->conf;
11621                         memcpy(rss_desc->queue, rss->queue,
11622                                rss->queue_num * sizeof(uint16_t));
11623                         rss_desc->queue_num = rss->queue_num;
11624                         /* NULL RSS key indicates default RSS key. */
11625                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11626                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11627                         /*
11628                          * rss->level and rss.types should be set in advance
11629                          * when expanding items for RSS.
11630                          */
11631                         flow_dv_hashfields_set(dev_flow, rss_desc);
11632                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11633                                                     rss_desc, &hrxq_idx);
11634                         if (!hrxq)
11635                                 return rte_flow_error_set
11636                                         (error, rte_errno,
11637                                          RTE_FLOW_ERROR_TYPE_ACTION,
11638                                          NULL,
11639                                          "cannot create fate queue");
11640                         sample_act->dr_queue_action = hrxq->action;
11641                         sample_idx->rix_hrxq = hrxq_idx;
11642                         sample_actions[sample_act->actions_num++] =
11643                                                 hrxq->action;
11644                         (*num_of_dest)++;
11645                         action_flags |= MLX5_FLOW_ACTION_RSS;
11646                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11647                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11648                         dev_flow->handle->fate_action =
11649                                         MLX5_FLOW_FATE_QUEUE;
11650                         break;
11651                 }
11652                 case RTE_FLOW_ACTION_TYPE_MARK:
11653                 {
11654                         uint32_t tag_be = mlx5_flow_mark_set
11655                                 (((const struct rte_flow_action_mark *)
11656                                 (sub_actions->conf))->id);
11657
11658                         wks->mark = 1;
11659                         pre_rix = dev_flow->handle->dvh.rix_tag;
11660                         /* Save the mark resource before sample */
11661                         pre_r = dev_flow->dv.tag_resource;
11662                         if (flow_dv_tag_resource_register(dev, tag_be,
11663                                                   dev_flow, error))
11664                                 return -rte_errno;
11665                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11666                         sample_act->dr_tag_action =
11667                                 dev_flow->dv.tag_resource->action;
11668                         sample_idx->rix_tag =
11669                                 dev_flow->handle->dvh.rix_tag;
11670                         sample_actions[sample_act->actions_num++] =
11671                                                 sample_act->dr_tag_action;
11672                         /* Recover the mark resource after sample */
11673                         dev_flow->dv.tag_resource = pre_r;
11674                         dev_flow->handle->dvh.rix_tag = pre_rix;
11675                         action_flags |= MLX5_FLOW_ACTION_MARK;
11676                         break;
11677                 }
11678                 case RTE_FLOW_ACTION_TYPE_COUNT:
11679                 {
11680                         if (!flow->counter) {
11681                                 flow->counter =
11682                                         flow_dv_translate_create_counter(dev,
11683                                                 dev_flow, sub_actions->conf,
11684                                                 0);
11685                                 if (!flow->counter)
11686                                         return rte_flow_error_set
11687                                                 (error, rte_errno,
11688                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11689                                                 NULL,
11690                                                 "cannot create counter"
11691                                                 " object.");
11692                         }
11693                         sample_act->dr_cnt_action =
11694                                   (flow_dv_counter_get_by_idx(dev,
11695                                   flow->counter, NULL))->action;
11696                         sample_actions[sample_act->actions_num++] =
11697                                                 sample_act->dr_cnt_action;
11698                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11699                         break;
11700                 }
11701                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11702                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11703                 {
11704                         struct mlx5_flow_dv_port_id_action_resource
11705                                         port_id_resource;
11706                         uint32_t port_id = 0;
11707
11708                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11709                         /* Save the port id resource before sample */
11710                         pre_rix = dev_flow->handle->rix_port_id_action;
11711                         pre_r = dev_flow->dv.port_id_action;
11712                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11713                                                              &port_id, error))
11714                                 return -rte_errno;
11715                         port_id_resource.port_id = port_id;
11716                         if (flow_dv_port_id_action_resource_register
11717                             (dev, &port_id_resource, dev_flow, error))
11718                                 return -rte_errno;
11719                         sample_act->dr_port_id_action =
11720                                 dev_flow->dv.port_id_action->action;
11721                         sample_idx->rix_port_id_action =
11722                                 dev_flow->handle->rix_port_id_action;
11723                         sample_actions[sample_act->actions_num++] =
11724                                                 sample_act->dr_port_id_action;
11725                         /* Recover the port id resource after sample */
11726                         dev_flow->dv.port_id_action = pre_r;
11727                         dev_flow->handle->rix_port_id_action = pre_rix;
11728                         (*num_of_dest)++;
11729                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11730                         break;
11731                 }
11732                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11733                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11734                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11735                         /* Save the encap resource before sample */
11736                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11737                         pre_r = dev_flow->dv.encap_decap;
11738                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11739                                                            dev_flow,
11740                                                            attr->transfer,
11741                                                            error))
11742                                 return -rte_errno;
11743                         sample_act->dr_encap_action =
11744                                 dev_flow->dv.encap_decap->action;
11745                         sample_idx->rix_encap_decap =
11746                                 dev_flow->handle->dvh.rix_encap_decap;
11747                         sample_actions[sample_act->actions_num++] =
11748                                                 sample_act->dr_encap_action;
11749                         /* Recover the encap resource after sample */
11750                         dev_flow->dv.encap_decap = pre_r;
11751                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11752                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11753                         break;
11754                 default:
11755                         return rte_flow_error_set(error, EINVAL,
11756                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11757                                 NULL,
11758                                 "Not support for sampler action");
11759                 }
11760         }
11761         sample_act->action_flags = action_flags;
11762         res->ft_id = dev_flow->dv.group;
11763         if (attr->transfer) {
11764                 union {
11765                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11766                         uint64_t set_action;
11767                 } action_ctx = { .set_action = 0 };
11768
11769                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11770                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11771                          MLX5_MODIFICATION_TYPE_SET);
11772                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11773                          MLX5_MODI_META_REG_C_0);
11774                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11775                          priv->vport_meta_tag);
11776                 res->set_action = action_ctx.set_action;
11777         } else if (attr->ingress) {
11778                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11779         } else {
11780                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11781         }
11782         return 0;
11783 }
11784
11785 /**
11786  * Convert Sample action to DV specification.
11787  *
11788  * @param[in] dev
11789  *   Pointer to rte_eth_dev structure.
11790  * @param[in, out] dev_flow
11791  *   Pointer to the mlx5_flow.
11792  * @param[in] num_of_dest
11793  *   The num of destination.
11794  * @param[in, out] res
11795  *   Pointer to sample resource.
11796  * @param[in, out] mdest_res
11797  *   Pointer to destination array resource.
11798  * @param[in] sample_actions
11799  *   Pointer to sample path actions list.
11800  * @param[in] action_flags
11801  *   Holds the actions detected until now.
11802  * @param[out] error
11803  *   Pointer to the error structure.
11804  *
11805  * @return
11806  *   0 on success, a negative errno value otherwise and rte_errno is set.
11807  */
11808 static int
11809 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11810                              struct mlx5_flow *dev_flow,
11811                              uint32_t num_of_dest,
11812                              struct mlx5_flow_dv_sample_resource *res,
11813                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11814                              void **sample_actions,
11815                              uint64_t action_flags,
11816                              struct rte_flow_error *error)
11817 {
11818         /* update normal path action resource into last index of array */
11819         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11820         struct mlx5_flow_sub_actions_list *sample_act =
11821                                         &mdest_res->sample_act[dest_index];
11822         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11823         struct mlx5_flow_rss_desc *rss_desc;
11824         uint32_t normal_idx = 0;
11825         struct mlx5_hrxq *hrxq;
11826         uint32_t hrxq_idx;
11827
11828         MLX5_ASSERT(wks);
11829         rss_desc = &wks->rss_desc;
11830         if (num_of_dest > 1) {
11831                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11832                         /* Handle QP action for mirroring */
11833                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11834                                                     rss_desc, &hrxq_idx);
11835                         if (!hrxq)
11836                                 return rte_flow_error_set
11837                                      (error, rte_errno,
11838                                       RTE_FLOW_ERROR_TYPE_ACTION,
11839                                       NULL,
11840                                       "cannot create rx queue");
11841                         normal_idx++;
11842                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11843                         sample_act->dr_queue_action = hrxq->action;
11844                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11845                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11846                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11847                 }
11848                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11849                         normal_idx++;
11850                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11851                                 dev_flow->handle->dvh.rix_encap_decap;
11852                         sample_act->dr_encap_action =
11853                                 dev_flow->dv.encap_decap->action;
11854                         dev_flow->handle->dvh.rix_encap_decap = 0;
11855                 }
11856                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11857                         normal_idx++;
11858                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11859                                 dev_flow->handle->rix_port_id_action;
11860                         sample_act->dr_port_id_action =
11861                                 dev_flow->dv.port_id_action->action;
11862                         dev_flow->handle->rix_port_id_action = 0;
11863                 }
11864                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11865                         normal_idx++;
11866                         mdest_res->sample_idx[dest_index].rix_jump =
11867                                 dev_flow->handle->rix_jump;
11868                         sample_act->dr_jump_action =
11869                                 dev_flow->dv.jump->action;
11870                         dev_flow->handle->rix_jump = 0;
11871                 }
11872                 sample_act->actions_num = normal_idx;
11873                 /* update sample action resource into first index of array */
11874                 mdest_res->ft_type = res->ft_type;
11875                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11876                                 sizeof(struct mlx5_flow_sub_actions_idx));
11877                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11878                                 sizeof(struct mlx5_flow_sub_actions_list));
11879                 mdest_res->num_of_dest = num_of_dest;
11880                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11881                                                          dev_flow, error))
11882                         return rte_flow_error_set(error, EINVAL,
11883                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11884                                                   NULL, "can't create sample "
11885                                                   "action");
11886         } else {
11887                 res->sub_actions = sample_actions;
11888                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11889                         return rte_flow_error_set(error, EINVAL,
11890                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11891                                                   NULL,
11892                                                   "can't create sample action");
11893         }
11894         return 0;
11895 }
11896
11897 /**
11898  * Remove an ASO age action from age actions list.
11899  *
11900  * @param[in] dev
11901  *   Pointer to the Ethernet device structure.
11902  * @param[in] age
11903  *   Pointer to the aso age action handler.
11904  */
11905 static void
11906 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11907                                 struct mlx5_aso_age_action *age)
11908 {
11909         struct mlx5_age_info *age_info;
11910         struct mlx5_age_param *age_param = &age->age_params;
11911         struct mlx5_priv *priv = dev->data->dev_private;
11912         uint16_t expected = AGE_CANDIDATE;
11913
11914         age_info = GET_PORT_AGE_INFO(priv);
11915         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11916                                          AGE_FREE, false, __ATOMIC_RELAXED,
11917                                          __ATOMIC_RELAXED)) {
11918                 /**
11919                  * We need the lock even it is age timeout,
11920                  * since age action may still in process.
11921                  */
11922                 rte_spinlock_lock(&age_info->aged_sl);
11923                 LIST_REMOVE(age, next);
11924                 rte_spinlock_unlock(&age_info->aged_sl);
11925                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11926         }
11927 }
11928
11929 /**
11930  * Release an ASO age action.
11931  *
11932  * @param[in] dev
11933  *   Pointer to the Ethernet device structure.
11934  * @param[in] age_idx
11935  *   Index of ASO age action to release.
11936  * @param[in] flow
11937  *   True if the release operation is during flow destroy operation.
11938  *   False if the release operation is during action destroy operation.
11939  *
11940  * @return
11941  *   0 when age action was removed, otherwise the number of references.
11942  */
11943 static int
11944 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11945 {
11946         struct mlx5_priv *priv = dev->data->dev_private;
11947         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11948         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11949         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11950
11951         if (!ret) {
11952                 flow_dv_aso_age_remove_from_age(dev, age);
11953                 rte_spinlock_lock(&mng->free_sl);
11954                 LIST_INSERT_HEAD(&mng->free, age, next);
11955                 rte_spinlock_unlock(&mng->free_sl);
11956         }
11957         return ret;
11958 }
11959
11960 /**
11961  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11962  *
11963  * @param[in] dev
11964  *   Pointer to the Ethernet device structure.
11965  *
11966  * @return
11967  *   0 on success, otherwise negative errno value and rte_errno is set.
11968  */
11969 static int
11970 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11971 {
11972         struct mlx5_priv *priv = dev->data->dev_private;
11973         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11974         void *old_pools = mng->pools;
11975         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11976         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11977         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11978
11979         if (!pools) {
11980                 rte_errno = ENOMEM;
11981                 return -ENOMEM;
11982         }
11983         if (old_pools) {
11984                 memcpy(pools, old_pools,
11985                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11986                 mlx5_free(old_pools);
11987         } else {
11988                 /* First ASO flow hit allocation - starting ASO data-path. */
11989                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11990
11991                 if (ret) {
11992                         mlx5_free(pools);
11993                         return ret;
11994                 }
11995         }
11996         mng->n = resize;
11997         mng->pools = pools;
11998         return 0;
11999 }
12000
12001 /**
12002  * Create and initialize a new ASO aging pool.
12003  *
12004  * @param[in] dev
12005  *   Pointer to the Ethernet device structure.
12006  * @param[out] age_free
12007  *   Where to put the pointer of a new age action.
12008  *
12009  * @return
12010  *   The age actions pool pointer and @p age_free is set on success,
12011  *   NULL otherwise and rte_errno is set.
12012  */
12013 static struct mlx5_aso_age_pool *
12014 flow_dv_age_pool_create(struct rte_eth_dev *dev,
12015                         struct mlx5_aso_age_action **age_free)
12016 {
12017         struct mlx5_priv *priv = dev->data->dev_private;
12018         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12019         struct mlx5_aso_age_pool *pool = NULL;
12020         struct mlx5_devx_obj *obj = NULL;
12021         uint32_t i;
12022
12023         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12024                                                     priv->sh->cdev->pdn);
12025         if (!obj) {
12026                 rte_errno = ENODATA;
12027                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12028                 return NULL;
12029         }
12030         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12031         if (!pool) {
12032                 claim_zero(mlx5_devx_cmd_destroy(obj));
12033                 rte_errno = ENOMEM;
12034                 return NULL;
12035         }
12036         pool->flow_hit_aso_obj = obj;
12037         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12038         rte_rwlock_write_lock(&mng->resize_rwl);
12039         pool->index = mng->next;
12040         /* Resize pools array if there is no room for the new pool in it. */
12041         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12042                 claim_zero(mlx5_devx_cmd_destroy(obj));
12043                 mlx5_free(pool);
12044                 rte_rwlock_write_unlock(&mng->resize_rwl);
12045                 return NULL;
12046         }
12047         mng->pools[pool->index] = pool;
12048         mng->next++;
12049         rte_rwlock_write_unlock(&mng->resize_rwl);
12050         /* Assign the first action in the new pool, the rest go to free list. */
12051         *age_free = &pool->actions[0];
12052         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12053                 pool->actions[i].offset = i;
12054                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12055         }
12056         return pool;
12057 }
12058
12059 /**
12060  * Allocate a ASO aging bit.
12061  *
12062  * @param[in] dev
12063  *   Pointer to the Ethernet device structure.
12064  * @param[out] error
12065  *   Pointer to the error structure.
12066  *
12067  * @return
12068  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12069  */
12070 static uint32_t
12071 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12072 {
12073         struct mlx5_priv *priv = dev->data->dev_private;
12074         const struct mlx5_aso_age_pool *pool;
12075         struct mlx5_aso_age_action *age_free = NULL;
12076         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12077
12078         MLX5_ASSERT(mng);
12079         /* Try to get the next free age action bit. */
12080         rte_spinlock_lock(&mng->free_sl);
12081         age_free = LIST_FIRST(&mng->free);
12082         if (age_free) {
12083                 LIST_REMOVE(age_free, next);
12084         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12085                 rte_spinlock_unlock(&mng->free_sl);
12086                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12087                                    NULL, "failed to create ASO age pool");
12088                 return 0; /* 0 is an error. */
12089         }
12090         rte_spinlock_unlock(&mng->free_sl);
12091         pool = container_of
12092           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12093                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12094                                                                        actions);
12095         if (!age_free->dr_action) {
12096                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12097                                                  error);
12098
12099                 if (reg_c < 0) {
12100                         rte_flow_error_set(error, rte_errno,
12101                                            RTE_FLOW_ERROR_TYPE_ACTION,
12102                                            NULL, "failed to get reg_c "
12103                                            "for ASO flow hit");
12104                         return 0; /* 0 is an error. */
12105                 }
12106 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12107                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12108                                 (priv->sh->rx_domain,
12109                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12110                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12111                                  (reg_c - REG_C_0));
12112 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12113                 if (!age_free->dr_action) {
12114                         rte_errno = errno;
12115                         rte_spinlock_lock(&mng->free_sl);
12116                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12117                         rte_spinlock_unlock(&mng->free_sl);
12118                         rte_flow_error_set(error, rte_errno,
12119                                            RTE_FLOW_ERROR_TYPE_ACTION,
12120                                            NULL, "failed to create ASO "
12121                                            "flow hit action");
12122                         return 0; /* 0 is an error. */
12123                 }
12124         }
12125         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12126         return pool->index | ((age_free->offset + 1) << 16);
12127 }
12128
12129 /**
12130  * Initialize flow ASO age parameters.
12131  *
12132  * @param[in] dev
12133  *   Pointer to rte_eth_dev structure.
12134  * @param[in] age_idx
12135  *   Index of ASO age action.
12136  * @param[in] context
12137  *   Pointer to flow counter age context.
12138  * @param[in] timeout
12139  *   Aging timeout in seconds.
12140  *
12141  */
12142 static void
12143 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12144                             uint32_t age_idx,
12145                             void *context,
12146                             uint32_t timeout)
12147 {
12148         struct mlx5_aso_age_action *aso_age;
12149
12150         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12151         MLX5_ASSERT(aso_age);
12152         aso_age->age_params.context = context;
12153         aso_age->age_params.timeout = timeout;
12154         aso_age->age_params.port_id = dev->data->port_id;
12155         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12156                          __ATOMIC_RELAXED);
12157         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12158                          __ATOMIC_RELAXED);
12159 }
12160
12161 static void
12162 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12163                                const struct rte_flow_item_integrity *value,
12164                                void *headers_m, void *headers_v)
12165 {
12166         if (mask->l4_ok) {
12167                 /* RTE l4_ok filter aggregates hardware l4_ok and
12168                  * l4_checksum_ok filters.
12169                  * Positive RTE l4_ok match requires hardware match on both L4
12170                  * hardware integrity bits.
12171                  * For negative match, check hardware l4_checksum_ok bit only,
12172                  * because hardware sets that bit to 0 for all packets
12173                  * with bad L4.
12174                  */
12175                 if (value->l4_ok) {
12176                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12177                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12178                 }
12179                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12180                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12181                          !!value->l4_ok);
12182         }
12183         if (mask->l4_csum_ok) {
12184                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12185                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12186                          value->l4_csum_ok);
12187         }
12188 }
12189
12190 static void
12191 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12192                                const struct rte_flow_item_integrity *value,
12193                                void *headers_m, void *headers_v, bool is_ipv4)
12194 {
12195         if (mask->l3_ok) {
12196                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12197                  * ipv4_csum_ok filters.
12198                  * Positive RTE l3_ok match requires hardware match on both L3
12199                  * hardware integrity bits.
12200                  * For negative match, check hardware l3_csum_ok bit only,
12201                  * because hardware sets that bit to 0 for all packets
12202                  * with bad L3.
12203                  */
12204                 if (is_ipv4) {
12205                         if (value->l3_ok) {
12206                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12207                                          l3_ok, 1);
12208                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12209                                          l3_ok, 1);
12210                         }
12211                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12212                                  ipv4_checksum_ok, 1);
12213                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12214                                  ipv4_checksum_ok, !!value->l3_ok);
12215                 } else {
12216                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12217                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12218                                  value->l3_ok);
12219                 }
12220         }
12221         if (mask->ipv4_csum_ok) {
12222                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12223                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12224                          value->ipv4_csum_ok);
12225         }
12226 }
12227
12228 static void
12229 set_integrity_bits(void *headers_m, void *headers_v,
12230                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12231 {
12232         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12233         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12234
12235         /* Integrity bits validation cleared spec pointer */
12236         MLX5_ASSERT(spec != NULL);
12237         if (!mask)
12238                 mask = &rte_flow_item_integrity_mask;
12239         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12240                                        is_l3_ip4);
12241         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12242 }
12243
12244 static void
12245 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12246                                       const
12247                                       struct rte_flow_item *integrity_items[2],
12248                                       uint64_t pattern_flags)
12249 {
12250         void *headers_m, *headers_v;
12251         bool is_l3_ip4;
12252
12253         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12254                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12255                                          inner_headers);
12256                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12257                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12258                             0;
12259                 set_integrity_bits(headers_m, headers_v,
12260                                    integrity_items[1], is_l3_ip4);
12261         }
12262         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12263                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12264                                          outer_headers);
12265                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12266                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12267                             0;
12268                 set_integrity_bits(headers_m, headers_v,
12269                                    integrity_items[0], is_l3_ip4);
12270         }
12271 }
12272
12273 static void
12274 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12275                                  const struct rte_flow_item *integrity_items[2],
12276                                  uint64_t *last_item)
12277 {
12278         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12279
12280         /* integrity bits validation cleared spec pointer */
12281         MLX5_ASSERT(spec != NULL);
12282         if (spec->level > 1) {
12283                 integrity_items[1] = item;
12284                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12285         } else {
12286                 integrity_items[0] = item;
12287                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12288         }
12289 }
12290
12291 /**
12292  * Prepares DV flow counter with aging configuration.
12293  * Gets it by index when exists, creates a new one when doesn't.
12294  *
12295  * @param[in] dev
12296  *   Pointer to rte_eth_dev structure.
12297  * @param[in] dev_flow
12298  *   Pointer to the mlx5_flow.
12299  * @param[in, out] flow
12300  *   Pointer to the sub flow.
12301  * @param[in] count
12302  *   Pointer to the counter action configuration.
12303  * @param[in] age
12304  *   Pointer to the aging action configuration.
12305  * @param[out] error
12306  *   Pointer to the error structure.
12307  *
12308  * @return
12309  *   Pointer to the counter, NULL otherwise.
12310  */
12311 static struct mlx5_flow_counter *
12312 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12313                         struct mlx5_flow *dev_flow,
12314                         struct rte_flow *flow,
12315                         const struct rte_flow_action_count *count,
12316                         const struct rte_flow_action_age *age,
12317                         struct rte_flow_error *error)
12318 {
12319         if (!flow->counter) {
12320                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12321                                                                  count, age);
12322                 if (!flow->counter) {
12323                         rte_flow_error_set(error, rte_errno,
12324                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12325                                            "cannot create counter object.");
12326                         return NULL;
12327                 }
12328         }
12329         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12330 }
12331
12332 /*
12333  * Release an ASO CT action by its own device.
12334  *
12335  * @param[in] dev
12336  *   Pointer to the Ethernet device structure.
12337  * @param[in] idx
12338  *   Index of ASO CT action to release.
12339  *
12340  * @return
12341  *   0 when CT action was removed, otherwise the number of references.
12342  */
12343 static inline int
12344 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12345 {
12346         struct mlx5_priv *priv = dev->data->dev_private;
12347         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12348         uint32_t ret;
12349         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12350         enum mlx5_aso_ct_state state =
12351                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12352
12353         /* Cannot release when CT is in the ASO SQ. */
12354         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12355                 return -1;
12356         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12357         if (!ret) {
12358                 if (ct->dr_action_orig) {
12359 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12360                         claim_zero(mlx5_glue->destroy_flow_action
12361                                         (ct->dr_action_orig));
12362 #endif
12363                         ct->dr_action_orig = NULL;
12364                 }
12365                 if (ct->dr_action_rply) {
12366 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12367                         claim_zero(mlx5_glue->destroy_flow_action
12368                                         (ct->dr_action_rply));
12369 #endif
12370                         ct->dr_action_rply = NULL;
12371                 }
12372                 /* Clear the state to free, no need in 1st allocation. */
12373                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12374                 rte_spinlock_lock(&mng->ct_sl);
12375                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12376                 rte_spinlock_unlock(&mng->ct_sl);
12377         }
12378         return (int)ret;
12379 }
12380
12381 static inline int
12382 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12383                        struct rte_flow_error *error)
12384 {
12385         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12386         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12387         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12388         int ret;
12389
12390         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12391         if (dev->data->dev_started != 1)
12392                 return rte_flow_error_set(error, EAGAIN,
12393                                           RTE_FLOW_ERROR_TYPE_ACTION,
12394                                           NULL,
12395                                           "Indirect CT action cannot be destroyed when the port is stopped");
12396         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12397         if (ret < 0)
12398                 return rte_flow_error_set(error, EAGAIN,
12399                                           RTE_FLOW_ERROR_TYPE_ACTION,
12400                                           NULL,
12401                                           "Current state prevents indirect CT action from being destroyed");
12402         return ret;
12403 }
12404
12405 /*
12406  * Resize the ASO CT pools array by 64 pools.
12407  *
12408  * @param[in] dev
12409  *   Pointer to the Ethernet device structure.
12410  *
12411  * @return
12412  *   0 on success, otherwise negative errno value and rte_errno is set.
12413  */
12414 static int
12415 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12416 {
12417         struct mlx5_priv *priv = dev->data->dev_private;
12418         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12419         void *old_pools = mng->pools;
12420         /* Magic number now, need a macro. */
12421         uint32_t resize = mng->n + 64;
12422         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12423         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12424
12425         if (!pools) {
12426                 rte_errno = ENOMEM;
12427                 return -rte_errno;
12428         }
12429         rte_rwlock_write_lock(&mng->resize_rwl);
12430         /* ASO SQ/QP was already initialized in the startup. */
12431         if (old_pools) {
12432                 /* Realloc could be an alternative choice. */
12433                 rte_memcpy(pools, old_pools,
12434                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12435                 mlx5_free(old_pools);
12436         }
12437         mng->n = resize;
12438         mng->pools = pools;
12439         rte_rwlock_write_unlock(&mng->resize_rwl);
12440         return 0;
12441 }
12442
12443 /*
12444  * Create and initialize a new ASO CT pool.
12445  *
12446  * @param[in] dev
12447  *   Pointer to the Ethernet device structure.
12448  * @param[out] ct_free
12449  *   Where to put the pointer of a new CT action.
12450  *
12451  * @return
12452  *   The CT actions pool pointer and @p ct_free is set on success,
12453  *   NULL otherwise and rte_errno is set.
12454  */
12455 static struct mlx5_aso_ct_pool *
12456 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12457                        struct mlx5_aso_ct_action **ct_free)
12458 {
12459         struct mlx5_priv *priv = dev->data->dev_private;
12460         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12461         struct mlx5_aso_ct_pool *pool = NULL;
12462         struct mlx5_devx_obj *obj = NULL;
12463         uint32_t i;
12464         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12465
12466         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12467                                                           priv->sh->cdev->pdn,
12468                                                           log_obj_size);
12469         if (!obj) {
12470                 rte_errno = ENODATA;
12471                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12472                 return NULL;
12473         }
12474         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12475         if (!pool) {
12476                 rte_errno = ENOMEM;
12477                 claim_zero(mlx5_devx_cmd_destroy(obj));
12478                 return NULL;
12479         }
12480         pool->devx_obj = obj;
12481         pool->index = mng->next;
12482         /* Resize pools array if there is no room for the new pool in it. */
12483         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12484                 claim_zero(mlx5_devx_cmd_destroy(obj));
12485                 mlx5_free(pool);
12486                 return NULL;
12487         }
12488         mng->pools[pool->index] = pool;
12489         mng->next++;
12490         /* Assign the first action in the new pool, the rest go to free list. */
12491         *ct_free = &pool->actions[0];
12492         /* Lock outside, the list operation is safe here. */
12493         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12494                 /* refcnt is 0 when allocating the memory. */
12495                 pool->actions[i].offset = i;
12496                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12497         }
12498         return pool;
12499 }
12500
12501 /*
12502  * Allocate a ASO CT action from free list.
12503  *
12504  * @param[in] dev
12505  *   Pointer to the Ethernet device structure.
12506  * @param[out] error
12507  *   Pointer to the error structure.
12508  *
12509  * @return
12510  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12511  */
12512 static uint32_t
12513 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12514 {
12515         struct mlx5_priv *priv = dev->data->dev_private;
12516         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12517         struct mlx5_aso_ct_action *ct = NULL;
12518         struct mlx5_aso_ct_pool *pool;
12519         uint8_t reg_c;
12520         uint32_t ct_idx;
12521
12522         MLX5_ASSERT(mng);
12523         if (!priv->sh->devx) {
12524                 rte_errno = ENOTSUP;
12525                 return 0;
12526         }
12527         /* Get a free CT action, if no, a new pool will be created. */
12528         rte_spinlock_lock(&mng->ct_sl);
12529         ct = LIST_FIRST(&mng->free_cts);
12530         if (ct) {
12531                 LIST_REMOVE(ct, next);
12532         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12533                 rte_spinlock_unlock(&mng->ct_sl);
12534                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12535                                    NULL, "failed to create ASO CT pool");
12536                 return 0;
12537         }
12538         rte_spinlock_unlock(&mng->ct_sl);
12539         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12540         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12541         /* 0: inactive, 1: created, 2+: used by flows. */
12542         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12543         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12544         if (!ct->dr_action_orig) {
12545 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12546                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12547                         (priv->sh->rx_domain, pool->devx_obj->obj,
12548                          ct->offset,
12549                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12550                          reg_c - REG_C_0);
12551 #else
12552                 RTE_SET_USED(reg_c);
12553 #endif
12554                 if (!ct->dr_action_orig) {
12555                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12556                         rte_flow_error_set(error, rte_errno,
12557                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12558                                            "failed to create ASO CT action");
12559                         return 0;
12560                 }
12561         }
12562         if (!ct->dr_action_rply) {
12563 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12564                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12565                         (priv->sh->rx_domain, pool->devx_obj->obj,
12566                          ct->offset,
12567                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12568                          reg_c - REG_C_0);
12569 #endif
12570                 if (!ct->dr_action_rply) {
12571                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12572                         rte_flow_error_set(error, rte_errno,
12573                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12574                                            "failed to create ASO CT action");
12575                         return 0;
12576                 }
12577         }
12578         return ct_idx;
12579 }
12580
12581 /*
12582  * Create a conntrack object with context and actions by using ASO mechanism.
12583  *
12584  * @param[in] dev
12585  *   Pointer to rte_eth_dev structure.
12586  * @param[in] pro
12587  *   Pointer to conntrack information profile.
12588  * @param[out] error
12589  *   Pointer to the error structure.
12590  *
12591  * @return
12592  *   Index to conntrack object on success, 0 otherwise.
12593  */
12594 static uint32_t
12595 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12596                                    const struct rte_flow_action_conntrack *pro,
12597                                    struct rte_flow_error *error)
12598 {
12599         struct mlx5_priv *priv = dev->data->dev_private;
12600         struct mlx5_dev_ctx_shared *sh = priv->sh;
12601         struct mlx5_aso_ct_action *ct;
12602         uint32_t idx;
12603
12604         if (!sh->ct_aso_en)
12605                 return rte_flow_error_set(error, ENOTSUP,
12606                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12607                                           "Connection is not supported");
12608         idx = flow_dv_aso_ct_alloc(dev, error);
12609         if (!idx)
12610                 return rte_flow_error_set(error, rte_errno,
12611                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12612                                           "Failed to allocate CT object");
12613         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12614         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12615                 return rte_flow_error_set(error, EBUSY,
12616                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12617                                           "Failed to update CT");
12618         ct->is_original = !!pro->is_original_dir;
12619         ct->peer = pro->peer_port;
12620         return idx;
12621 }
12622
12623 /**
12624  * Fill the flow with DV spec, lock free
12625  * (mutex should be acquired by caller).
12626  *
12627  * @param[in] dev
12628  *   Pointer to rte_eth_dev structure.
12629  * @param[in, out] dev_flow
12630  *   Pointer to the sub flow.
12631  * @param[in] attr
12632  *   Pointer to the flow attributes.
12633  * @param[in] items
12634  *   Pointer to the list of items.
12635  * @param[in] actions
12636  *   Pointer to the list of actions.
12637  * @param[out] error
12638  *   Pointer to the error structure.
12639  *
12640  * @return
12641  *   0 on success, a negative errno value otherwise and rte_errno is set.
12642  */
12643 static int
12644 flow_dv_translate(struct rte_eth_dev *dev,
12645                   struct mlx5_flow *dev_flow,
12646                   const struct rte_flow_attr *attr,
12647                   const struct rte_flow_item items[],
12648                   const struct rte_flow_action actions[],
12649                   struct rte_flow_error *error)
12650 {
12651         struct mlx5_priv *priv = dev->data->dev_private;
12652         struct mlx5_dev_config *dev_conf = &priv->config;
12653         struct rte_flow *flow = dev_flow->flow;
12654         struct mlx5_flow_handle *handle = dev_flow->handle;
12655         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12656         struct mlx5_flow_rss_desc *rss_desc;
12657         uint64_t item_flags = 0;
12658         uint64_t last_item = 0;
12659         uint64_t action_flags = 0;
12660         struct mlx5_flow_dv_matcher matcher = {
12661                 .mask = {
12662                         .size = sizeof(matcher.mask.buf),
12663                 },
12664         };
12665         int actions_n = 0;
12666         bool actions_end = false;
12667         union {
12668                 struct mlx5_flow_dv_modify_hdr_resource res;
12669                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12670                             sizeof(struct mlx5_modification_cmd) *
12671                             (MLX5_MAX_MODIFY_NUM + 1)];
12672         } mhdr_dummy;
12673         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12674         const struct rte_flow_action_count *count = NULL;
12675         const struct rte_flow_action_age *non_shared_age = NULL;
12676         union flow_dv_attr flow_attr = { .attr = 0 };
12677         uint32_t tag_be;
12678         union mlx5_flow_tbl_key tbl_key;
12679         uint32_t modify_action_position = UINT32_MAX;
12680         void *match_mask = matcher.mask.buf;
12681         void *match_value = dev_flow->dv.value.buf;
12682         uint8_t next_protocol = 0xff;
12683         struct rte_vlan_hdr vlan = { 0 };
12684         struct mlx5_flow_dv_dest_array_resource mdest_res;
12685         struct mlx5_flow_dv_sample_resource sample_res;
12686         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12687         const struct rte_flow_action_sample *sample = NULL;
12688         struct mlx5_flow_sub_actions_list *sample_act;
12689         uint32_t sample_act_pos = UINT32_MAX;
12690         uint32_t age_act_pos = UINT32_MAX;
12691         uint32_t num_of_dest = 0;
12692         int tmp_actions_n = 0;
12693         uint32_t table;
12694         int ret = 0;
12695         const struct mlx5_flow_tunnel *tunnel = NULL;
12696         struct flow_grp_info grp_info = {
12697                 .external = !!dev_flow->external,
12698                 .transfer = !!attr->transfer,
12699                 .fdb_def_rule = !!priv->fdb_def_rule,
12700                 .skip_scale = dev_flow->skip_scale &
12701                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12702                 .std_tbl_fix = true,
12703         };
12704         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12705         const struct rte_flow_item *tunnel_item = NULL;
12706
12707         if (!wks)
12708                 return rte_flow_error_set(error, ENOMEM,
12709                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12710                                           NULL,
12711                                           "failed to push flow workspace");
12712         rss_desc = &wks->rss_desc;
12713         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12714         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12715         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12716                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12717         /* update normal path action resource into last index of array */
12718         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12719         if (is_tunnel_offload_active(dev)) {
12720                 if (dev_flow->tunnel) {
12721                         RTE_VERIFY(dev_flow->tof_type ==
12722                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12723                         tunnel = dev_flow->tunnel;
12724                 } else {
12725                         tunnel = mlx5_get_tof(items, actions,
12726                                               &dev_flow->tof_type);
12727                         dev_flow->tunnel = tunnel;
12728                 }
12729                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12730                                         (dev, attr, tunnel, dev_flow->tof_type);
12731         }
12732         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12733                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12734         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12735                                        &grp_info, error);
12736         if (ret)
12737                 return ret;
12738         dev_flow->dv.group = table;
12739         if (attr->transfer)
12740                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12741         /* number of actions must be set to 0 in case of dirty stack. */
12742         mhdr_res->actions_num = 0;
12743         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12744                 /*
12745                  * do not add decap action if match rule drops packet
12746                  * HW rejects rules with decap & drop
12747                  *
12748                  * if tunnel match rule was inserted before matching tunnel set
12749                  * rule flow table used in the match rule must be registered.
12750                  * current implementation handles that in the
12751                  * flow_dv_match_register() at the function end.
12752                  */
12753                 bool add_decap = true;
12754                 const struct rte_flow_action *ptr = actions;
12755
12756                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12757                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12758                                 add_decap = false;
12759                                 break;
12760                         }
12761                 }
12762                 if (add_decap) {
12763                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12764                                                            attr->transfer,
12765                                                            error))
12766                                 return -rte_errno;
12767                         dev_flow->dv.actions[actions_n++] =
12768                                         dev_flow->dv.encap_decap->action;
12769                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12770                 }
12771         }
12772         for (; !actions_end ; actions++) {
12773                 const struct rte_flow_action_queue *queue;
12774                 const struct rte_flow_action_rss *rss;
12775                 const struct rte_flow_action *action = actions;
12776                 const uint8_t *rss_key;
12777                 struct mlx5_flow_tbl_resource *tbl;
12778                 struct mlx5_aso_age_action *age_act;
12779                 struct mlx5_flow_counter *cnt_act;
12780                 uint32_t port_id = 0;
12781                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12782                 int action_type = actions->type;
12783                 const struct rte_flow_action *found_action = NULL;
12784                 uint32_t jump_group = 0;
12785                 uint32_t owner_idx;
12786                 struct mlx5_aso_ct_action *ct;
12787
12788                 if (!mlx5_flow_os_action_supported(action_type))
12789                         return rte_flow_error_set(error, ENOTSUP,
12790                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12791                                                   actions,
12792                                                   "action not supported");
12793                 switch (action_type) {
12794                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12795                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12796                         break;
12797                 case RTE_FLOW_ACTION_TYPE_VOID:
12798                         break;
12799                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12800                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12801                         if (flow_dv_translate_action_port_id(dev, action,
12802                                                              &port_id, error))
12803                                 return -rte_errno;
12804                         port_id_resource.port_id = port_id;
12805                         MLX5_ASSERT(!handle->rix_port_id_action);
12806                         if (flow_dv_port_id_action_resource_register
12807                             (dev, &port_id_resource, dev_flow, error))
12808                                 return -rte_errno;
12809                         dev_flow->dv.actions[actions_n++] =
12810                                         dev_flow->dv.port_id_action->action;
12811                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12812                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12813                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12814                         num_of_dest++;
12815                         break;
12816                 case RTE_FLOW_ACTION_TYPE_FLAG:
12817                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12818                         wks->mark = 1;
12819                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12820                                 struct rte_flow_action_mark mark = {
12821                                         .id = MLX5_FLOW_MARK_DEFAULT,
12822                                 };
12823
12824                                 if (flow_dv_convert_action_mark(dev, &mark,
12825                                                                 mhdr_res,
12826                                                                 error))
12827                                         return -rte_errno;
12828                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12829                                 break;
12830                         }
12831                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12832                         /*
12833                          * Only one FLAG or MARK is supported per device flow
12834                          * right now. So the pointer to the tag resource must be
12835                          * zero before the register process.
12836                          */
12837                         MLX5_ASSERT(!handle->dvh.rix_tag);
12838                         if (flow_dv_tag_resource_register(dev, tag_be,
12839                                                           dev_flow, error))
12840                                 return -rte_errno;
12841                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12842                         dev_flow->dv.actions[actions_n++] =
12843                                         dev_flow->dv.tag_resource->action;
12844                         break;
12845                 case RTE_FLOW_ACTION_TYPE_MARK:
12846                         action_flags |= MLX5_FLOW_ACTION_MARK;
12847                         wks->mark = 1;
12848                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12849                                 const struct rte_flow_action_mark *mark =
12850                                         (const struct rte_flow_action_mark *)
12851                                                 actions->conf;
12852
12853                                 if (flow_dv_convert_action_mark(dev, mark,
12854                                                                 mhdr_res,
12855                                                                 error))
12856                                         return -rte_errno;
12857                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12858                                 break;
12859                         }
12860                         /* Fall-through */
12861                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12862                         /* Legacy (non-extensive) MARK action. */
12863                         tag_be = mlx5_flow_mark_set
12864                               (((const struct rte_flow_action_mark *)
12865                                (actions->conf))->id);
12866                         MLX5_ASSERT(!handle->dvh.rix_tag);
12867                         if (flow_dv_tag_resource_register(dev, tag_be,
12868                                                           dev_flow, error))
12869                                 return -rte_errno;
12870                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12871                         dev_flow->dv.actions[actions_n++] =
12872                                         dev_flow->dv.tag_resource->action;
12873                         break;
12874                 case RTE_FLOW_ACTION_TYPE_SET_META:
12875                         if (flow_dv_convert_action_set_meta
12876                                 (dev, mhdr_res, attr,
12877                                  (const struct rte_flow_action_set_meta *)
12878                                   actions->conf, error))
12879                                 return -rte_errno;
12880                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12881                         break;
12882                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12883                         if (flow_dv_convert_action_set_tag
12884                                 (dev, mhdr_res,
12885                                  (const struct rte_flow_action_set_tag *)
12886                                   actions->conf, error))
12887                                 return -rte_errno;
12888                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12889                         break;
12890                 case RTE_FLOW_ACTION_TYPE_DROP:
12891                         action_flags |= MLX5_FLOW_ACTION_DROP;
12892                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12893                         break;
12894                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12895                         queue = actions->conf;
12896                         rss_desc->queue_num = 1;
12897                         rss_desc->queue[0] = queue->index;
12898                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12899                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12900                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12901                         num_of_dest++;
12902                         break;
12903                 case RTE_FLOW_ACTION_TYPE_RSS:
12904                         rss = actions->conf;
12905                         memcpy(rss_desc->queue, rss->queue,
12906                                rss->queue_num * sizeof(uint16_t));
12907                         rss_desc->queue_num = rss->queue_num;
12908                         /* NULL RSS key indicates default RSS key. */
12909                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12910                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12911                         /*
12912                          * rss->level and rss.types should be set in advance
12913                          * when expanding items for RSS.
12914                          */
12915                         action_flags |= MLX5_FLOW_ACTION_RSS;
12916                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12917                                 MLX5_FLOW_FATE_SHARED_RSS :
12918                                 MLX5_FLOW_FATE_QUEUE;
12919                         break;
12920                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12921                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12922                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12923                         if (flow->age == 0) {
12924                                 flow->age = owner_idx;
12925                                 __atomic_fetch_add(&age_act->refcnt, 1,
12926                                                    __ATOMIC_RELAXED);
12927                         }
12928                         age_act_pos = actions_n++;
12929                         action_flags |= MLX5_FLOW_ACTION_AGE;
12930                         break;
12931                 case RTE_FLOW_ACTION_TYPE_AGE:
12932                         non_shared_age = action->conf;
12933                         age_act_pos = actions_n++;
12934                         action_flags |= MLX5_FLOW_ACTION_AGE;
12935                         break;
12936                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12937                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12938                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12939                                                              NULL);
12940                         MLX5_ASSERT(cnt_act != NULL);
12941                         /**
12942                          * When creating meter drop flow in drop table, the
12943                          * counter should not overwrite the rte flow counter.
12944                          */
12945                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12946                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12947                                 dev_flow->dv.actions[actions_n++] =
12948                                                         cnt_act->action;
12949                         } else {
12950                                 if (flow->counter == 0) {
12951                                         flow->counter = owner_idx;
12952                                         __atomic_fetch_add
12953                                                 (&cnt_act->shared_info.refcnt,
12954                                                  1, __ATOMIC_RELAXED);
12955                                 }
12956                                 /* Save information first, will apply later. */
12957                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12958                         }
12959                         break;
12960                 case RTE_FLOW_ACTION_TYPE_COUNT:
12961                         if (!priv->sh->devx) {
12962                                 return rte_flow_error_set
12963                                               (error, ENOTSUP,
12964                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12965                                                NULL,
12966                                                "count action not supported");
12967                         }
12968                         /* Save information first, will apply later. */
12969                         count = action->conf;
12970                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12971                         break;
12972                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12973                         dev_flow->dv.actions[actions_n++] =
12974                                                 priv->sh->pop_vlan_action;
12975                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12976                         break;
12977                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12978                         if (!(action_flags &
12979                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12980                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12981                         vlan.eth_proto = rte_be_to_cpu_16
12982                              ((((const struct rte_flow_action_of_push_vlan *)
12983                                                    actions->conf)->ethertype));
12984                         found_action = mlx5_flow_find_action
12985                                         (actions + 1,
12986                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12987                         if (found_action)
12988                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12989                         found_action = mlx5_flow_find_action
12990                                         (actions + 1,
12991                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12992                         if (found_action)
12993                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12994                         if (flow_dv_create_action_push_vlan
12995                                             (dev, attr, &vlan, dev_flow, error))
12996                                 return -rte_errno;
12997                         dev_flow->dv.actions[actions_n++] =
12998                                         dev_flow->dv.push_vlan_res->action;
12999                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
13000                         break;
13001                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
13002                         /* of_vlan_push action handled this action */
13003                         MLX5_ASSERT(action_flags &
13004                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
13005                         break;
13006                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13007                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13008                                 break;
13009                         flow_dev_get_vlan_info_from_items(items, &vlan);
13010                         mlx5_update_vlan_vid_pcp(actions, &vlan);
13011                         /* If no VLAN push - this is a modify header action */
13012                         if (flow_dv_convert_action_modify_vlan_vid
13013                                                 (mhdr_res, actions, error))
13014                                 return -rte_errno;
13015                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13016                         break;
13017                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13018                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13019                         if (flow_dv_create_action_l2_encap(dev, actions,
13020                                                            dev_flow,
13021                                                            attr->transfer,
13022                                                            error))
13023                                 return -rte_errno;
13024                         dev_flow->dv.actions[actions_n++] =
13025                                         dev_flow->dv.encap_decap->action;
13026                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13027                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13028                                 sample_act->action_flags |=
13029                                                         MLX5_FLOW_ACTION_ENCAP;
13030                         break;
13031                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13032                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13033                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
13034                                                            attr->transfer,
13035                                                            error))
13036                                 return -rte_errno;
13037                         dev_flow->dv.actions[actions_n++] =
13038                                         dev_flow->dv.encap_decap->action;
13039                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13040                         break;
13041                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13042                         /* Handle encap with preceding decap. */
13043                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13044                                 if (flow_dv_create_action_raw_encap
13045                                         (dev, actions, dev_flow, attr, error))
13046                                         return -rte_errno;
13047                                 dev_flow->dv.actions[actions_n++] =
13048                                         dev_flow->dv.encap_decap->action;
13049                         } else {
13050                                 /* Handle encap without preceding decap. */
13051                                 if (flow_dv_create_action_l2_encap
13052                                     (dev, actions, dev_flow, attr->transfer,
13053                                      error))
13054                                         return -rte_errno;
13055                                 dev_flow->dv.actions[actions_n++] =
13056                                         dev_flow->dv.encap_decap->action;
13057                         }
13058                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13059                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13060                                 sample_act->action_flags |=
13061                                                         MLX5_FLOW_ACTION_ENCAP;
13062                         break;
13063                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13064                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13065                                 ;
13066                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13067                                 if (flow_dv_create_action_l2_decap
13068                                     (dev, dev_flow, attr->transfer, error))
13069                                         return -rte_errno;
13070                                 dev_flow->dv.actions[actions_n++] =
13071                                         dev_flow->dv.encap_decap->action;
13072                         }
13073                         /* If decap is followed by encap, handle it at encap. */
13074                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13075                         break;
13076                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13077                         dev_flow->dv.actions[actions_n++] =
13078                                 (void *)(uintptr_t)action->conf;
13079                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13080                         break;
13081                 case RTE_FLOW_ACTION_TYPE_JUMP:
13082                         jump_group = ((const struct rte_flow_action_jump *)
13083                                                         action->conf)->group;
13084                         grp_info.std_tbl_fix = 0;
13085                         if (dev_flow->skip_scale &
13086                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13087                                 grp_info.skip_scale = 1;
13088                         else
13089                                 grp_info.skip_scale = 0;
13090                         ret = mlx5_flow_group_to_table(dev, tunnel,
13091                                                        jump_group,
13092                                                        &table,
13093                                                        &grp_info, error);
13094                         if (ret)
13095                                 return ret;
13096                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13097                                                        attr->transfer,
13098                                                        !!dev_flow->external,
13099                                                        tunnel, jump_group, 0,
13100                                                        0, error);
13101                         if (!tbl)
13102                                 return rte_flow_error_set
13103                                                 (error, errno,
13104                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13105                                                  NULL,
13106                                                  "cannot create jump action.");
13107                         if (flow_dv_jump_tbl_resource_register
13108                             (dev, tbl, dev_flow, error)) {
13109                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13110                                 return rte_flow_error_set
13111                                                 (error, errno,
13112                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13113                                                  NULL,
13114                                                  "cannot create jump action.");
13115                         }
13116                         dev_flow->dv.actions[actions_n++] =
13117                                         dev_flow->dv.jump->action;
13118                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13119                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13120                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13121                         num_of_dest++;
13122                         break;
13123                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13124                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13125                         if (flow_dv_convert_action_modify_mac
13126                                         (mhdr_res, actions, error))
13127                                 return -rte_errno;
13128                         action_flags |= actions->type ==
13129                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13130                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13131                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13132                         break;
13133                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13134                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13135                         if (flow_dv_convert_action_modify_ipv4
13136                                         (mhdr_res, actions, error))
13137                                 return -rte_errno;
13138                         action_flags |= actions->type ==
13139                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13140                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13141                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13142                         break;
13143                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13144                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13145                         if (flow_dv_convert_action_modify_ipv6
13146                                         (mhdr_res, actions, error))
13147                                 return -rte_errno;
13148                         action_flags |= actions->type ==
13149                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13150                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13151                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13152                         break;
13153                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13154                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13155                         if (flow_dv_convert_action_modify_tp
13156                                         (mhdr_res, actions, items,
13157                                          &flow_attr, dev_flow, !!(action_flags &
13158                                          MLX5_FLOW_ACTION_DECAP), error))
13159                                 return -rte_errno;
13160                         action_flags |= actions->type ==
13161                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13162                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13163                                         MLX5_FLOW_ACTION_SET_TP_DST;
13164                         break;
13165                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13166                         if (flow_dv_convert_action_modify_dec_ttl
13167                                         (mhdr_res, items, &flow_attr, dev_flow,
13168                                          !!(action_flags &
13169                                          MLX5_FLOW_ACTION_DECAP), error))
13170                                 return -rte_errno;
13171                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13172                         break;
13173                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13174                         if (flow_dv_convert_action_modify_ttl
13175                                         (mhdr_res, actions, items, &flow_attr,
13176                                          dev_flow, !!(action_flags &
13177                                          MLX5_FLOW_ACTION_DECAP), error))
13178                                 return -rte_errno;
13179                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13180                         break;
13181                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13182                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13183                         if (flow_dv_convert_action_modify_tcp_seq
13184                                         (mhdr_res, actions, error))
13185                                 return -rte_errno;
13186                         action_flags |= actions->type ==
13187                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13188                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13189                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13190                         break;
13191
13192                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13193                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13194                         if (flow_dv_convert_action_modify_tcp_ack
13195                                         (mhdr_res, actions, error))
13196                                 return -rte_errno;
13197                         action_flags |= actions->type ==
13198                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13199                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13200                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13201                         break;
13202                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13203                         if (flow_dv_convert_action_set_reg
13204                                         (mhdr_res, actions, error))
13205                                 return -rte_errno;
13206                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13207                         break;
13208                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13209                         if (flow_dv_convert_action_copy_mreg
13210                                         (dev, mhdr_res, actions, error))
13211                                 return -rte_errno;
13212                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13213                         break;
13214                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13215                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13216                         dev_flow->handle->fate_action =
13217                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13218                         break;
13219                 case RTE_FLOW_ACTION_TYPE_METER:
13220                         if (!wks->fm)
13221                                 return rte_flow_error_set(error, rte_errno,
13222                                         RTE_FLOW_ERROR_TYPE_ACTION,
13223                                         NULL, "Failed to get meter in flow.");
13224                         /* Set the meter action. */
13225                         dev_flow->dv.actions[actions_n++] =
13226                                 wks->fm->meter_action;
13227                         action_flags |= MLX5_FLOW_ACTION_METER;
13228                         break;
13229                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13230                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13231                                                               actions, error))
13232                                 return -rte_errno;
13233                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13234                         break;
13235                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13236                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13237                                                               actions, error))
13238                                 return -rte_errno;
13239                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13240                         break;
13241                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13242                         sample_act_pos = actions_n;
13243                         sample = (const struct rte_flow_action_sample *)
13244                                  action->conf;
13245                         actions_n++;
13246                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13247                         /* put encap action into group if work with port id */
13248                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13249                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13250                                 sample_act->action_flags |=
13251                                                         MLX5_FLOW_ACTION_ENCAP;
13252                         break;
13253                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13254                         if (flow_dv_convert_action_modify_field
13255                                         (dev, mhdr_res, actions, attr, error))
13256                                 return -rte_errno;
13257                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13258                         break;
13259                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13260                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13261                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13262                         if (!ct)
13263                                 return rte_flow_error_set(error, EINVAL,
13264                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13265                                                 NULL,
13266                                                 "Failed to get CT object.");
13267                         if (mlx5_aso_ct_available(priv->sh, ct))
13268                                 return rte_flow_error_set(error, rte_errno,
13269                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13270                                                 NULL,
13271                                                 "CT is unavailable.");
13272                         if (ct->is_original)
13273                                 dev_flow->dv.actions[actions_n] =
13274                                                         ct->dr_action_orig;
13275                         else
13276                                 dev_flow->dv.actions[actions_n] =
13277                                                         ct->dr_action_rply;
13278                         if (flow->ct == 0) {
13279                                 flow->indirect_type =
13280                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13281                                 flow->ct = owner_idx;
13282                                 __atomic_fetch_add(&ct->refcnt, 1,
13283                                                    __ATOMIC_RELAXED);
13284                         }
13285                         actions_n++;
13286                         action_flags |= MLX5_FLOW_ACTION_CT;
13287                         break;
13288                 case RTE_FLOW_ACTION_TYPE_END:
13289                         actions_end = true;
13290                         if (mhdr_res->actions_num) {
13291                                 /* create modify action if needed. */
13292                                 if (flow_dv_modify_hdr_resource_register
13293                                         (dev, mhdr_res, dev_flow, error))
13294                                         return -rte_errno;
13295                                 dev_flow->dv.actions[modify_action_position] =
13296                                         handle->dvh.modify_hdr->action;
13297                         }
13298                         /*
13299                          * Handle AGE and COUNT action by single HW counter
13300                          * when they are not shared.
13301                          */
13302                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13303                                 if ((non_shared_age && count) ||
13304                                     !(priv->sh->flow_hit_aso_en &&
13305                                       (attr->group || attr->transfer))) {
13306                                         /* Creates age by counters. */
13307                                         cnt_act = flow_dv_prepare_counter
13308                                                                 (dev, dev_flow,
13309                                                                  flow, count,
13310                                                                  non_shared_age,
13311                                                                  error);
13312                                         if (!cnt_act)
13313                                                 return -rte_errno;
13314                                         dev_flow->dv.actions[age_act_pos] =
13315                                                                 cnt_act->action;
13316                                         break;
13317                                 }
13318                                 if (!flow->age && non_shared_age) {
13319                                         flow->age = flow_dv_aso_age_alloc
13320                                                                 (dev, error);
13321                                         if (!flow->age)
13322                                                 return -rte_errno;
13323                                         flow_dv_aso_age_params_init
13324                                                     (dev, flow->age,
13325                                                      non_shared_age->context ?
13326                                                      non_shared_age->context :
13327                                                      (void *)(uintptr_t)
13328                                                      (dev_flow->flow_idx),
13329                                                      non_shared_age->timeout);
13330                                 }
13331                                 age_act = flow_aso_age_get_by_idx(dev,
13332                                                                   flow->age);
13333                                 dev_flow->dv.actions[age_act_pos] =
13334                                                              age_act->dr_action;
13335                         }
13336                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13337                                 /*
13338                                  * Create one count action, to be used
13339                                  * by all sub-flows.
13340                                  */
13341                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13342                                                                   flow, count,
13343                                                                   NULL, error);
13344                                 if (!cnt_act)
13345                                         return -rte_errno;
13346                                 dev_flow->dv.actions[actions_n++] =
13347                                                                 cnt_act->action;
13348                         }
13349                 default:
13350                         break;
13351                 }
13352                 if (mhdr_res->actions_num &&
13353                     modify_action_position == UINT32_MAX)
13354                         modify_action_position = actions_n++;
13355         }
13356         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13357                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13358                 int item_type = items->type;
13359
13360                 if (!mlx5_flow_os_item_supported(item_type))
13361                         return rte_flow_error_set(error, ENOTSUP,
13362                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13363                                                   NULL, "item not supported");
13364                 switch (item_type) {
13365                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13366                         flow_dv_translate_item_port_id
13367                                 (dev, match_mask, match_value, items, attr);
13368                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13369                         break;
13370                 case RTE_FLOW_ITEM_TYPE_ETH:
13371                         flow_dv_translate_item_eth(match_mask, match_value,
13372                                                    items, tunnel,
13373                                                    dev_flow->dv.group);
13374                         matcher.priority = action_flags &
13375                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13376                                         !dev_flow->external ?
13377                                         MLX5_PRIORITY_MAP_L3 :
13378                                         MLX5_PRIORITY_MAP_L2;
13379                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13380                                              MLX5_FLOW_LAYER_OUTER_L2;
13381                         break;
13382                 case RTE_FLOW_ITEM_TYPE_VLAN:
13383                         flow_dv_translate_item_vlan(dev_flow,
13384                                                     match_mask, match_value,
13385                                                     items, tunnel,
13386                                                     dev_flow->dv.group);
13387                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13388                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13389                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13390                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13391                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13392                         break;
13393                 case RTE_FLOW_ITEM_TYPE_IPV4:
13394                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13395                                                   &item_flags, &tunnel);
13396                         flow_dv_translate_item_ipv4(match_mask, match_value,
13397                                                     items, tunnel,
13398                                                     dev_flow->dv.group);
13399                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13400                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13401                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13402                         if (items->mask != NULL &&
13403                             ((const struct rte_flow_item_ipv4 *)
13404                              items->mask)->hdr.next_proto_id) {
13405                                 next_protocol =
13406                                         ((const struct rte_flow_item_ipv4 *)
13407                                          (items->spec))->hdr.next_proto_id;
13408                                 next_protocol &=
13409                                         ((const struct rte_flow_item_ipv4 *)
13410                                          (items->mask))->hdr.next_proto_id;
13411                         } else {
13412                                 /* Reset for inner layer. */
13413                                 next_protocol = 0xff;
13414                         }
13415                         break;
13416                 case RTE_FLOW_ITEM_TYPE_IPV6:
13417                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13418                                                   &item_flags, &tunnel);
13419                         flow_dv_translate_item_ipv6(match_mask, match_value,
13420                                                     items, tunnel,
13421                                                     dev_flow->dv.group);
13422                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13423                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13424                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13425                         if (items->mask != NULL &&
13426                             ((const struct rte_flow_item_ipv6 *)
13427                              items->mask)->hdr.proto) {
13428                                 next_protocol =
13429                                         ((const struct rte_flow_item_ipv6 *)
13430                                          items->spec)->hdr.proto;
13431                                 next_protocol &=
13432                                         ((const struct rte_flow_item_ipv6 *)
13433                                          items->mask)->hdr.proto;
13434                         } else {
13435                                 /* Reset for inner layer. */
13436                                 next_protocol = 0xff;
13437                         }
13438                         break;
13439                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13440                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13441                                                              match_value,
13442                                                              items, tunnel);
13443                         last_item = tunnel ?
13444                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13445                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13446                         if (items->mask != NULL &&
13447                             ((const struct rte_flow_item_ipv6_frag_ext *)
13448                              items->mask)->hdr.next_header) {
13449                                 next_protocol =
13450                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13451                                  items->spec)->hdr.next_header;
13452                                 next_protocol &=
13453                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13454                                  items->mask)->hdr.next_header;
13455                         } else {
13456                                 /* Reset for inner layer. */
13457                                 next_protocol = 0xff;
13458                         }
13459                         break;
13460                 case RTE_FLOW_ITEM_TYPE_TCP:
13461                         flow_dv_translate_item_tcp(match_mask, match_value,
13462                                                    items, tunnel);
13463                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13464                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13465                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13466                         break;
13467                 case RTE_FLOW_ITEM_TYPE_UDP:
13468                         flow_dv_translate_item_udp(match_mask, match_value,
13469                                                    items, tunnel);
13470                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13471                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13472                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13473                         break;
13474                 case RTE_FLOW_ITEM_TYPE_GRE:
13475                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13476                         last_item = MLX5_FLOW_LAYER_GRE;
13477                         tunnel_item = items;
13478                         break;
13479                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13480                         flow_dv_translate_item_gre_key(match_mask,
13481                                                        match_value, items);
13482                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13483                         break;
13484                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13485                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13486                         last_item = MLX5_FLOW_LAYER_GRE;
13487                         tunnel_item = items;
13488                         break;
13489                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13490                         flow_dv_translate_item_vxlan(dev, attr,
13491                                                      match_mask, match_value,
13492                                                      items, tunnel);
13493                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13494                         last_item = MLX5_FLOW_LAYER_VXLAN;
13495                         break;
13496                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13497                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13498                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13499                         tunnel_item = items;
13500                         break;
13501                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13502                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13503                         last_item = MLX5_FLOW_LAYER_GENEVE;
13504                         tunnel_item = items;
13505                         break;
13506                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13507                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13508                                                           match_value,
13509                                                           items, error);
13510                         if (ret)
13511                                 return rte_flow_error_set(error, -ret,
13512                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13513                                         "cannot create GENEVE TLV option");
13514                         flow->geneve_tlv_option = 1;
13515                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13516                         break;
13517                 case RTE_FLOW_ITEM_TYPE_MPLS:
13518                         flow_dv_translate_item_mpls(match_mask, match_value,
13519                                                     items, last_item, tunnel);
13520                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13521                         last_item = MLX5_FLOW_LAYER_MPLS;
13522                         break;
13523                 case RTE_FLOW_ITEM_TYPE_MARK:
13524                         flow_dv_translate_item_mark(dev, match_mask,
13525                                                     match_value, items);
13526                         last_item = MLX5_FLOW_ITEM_MARK;
13527                         break;
13528                 case RTE_FLOW_ITEM_TYPE_META:
13529                         flow_dv_translate_item_meta(dev, match_mask,
13530                                                     match_value, attr, items);
13531                         last_item = MLX5_FLOW_ITEM_METADATA;
13532                         break;
13533                 case RTE_FLOW_ITEM_TYPE_ICMP:
13534                         flow_dv_translate_item_icmp(match_mask, match_value,
13535                                                     items, tunnel);
13536                         last_item = MLX5_FLOW_LAYER_ICMP;
13537                         break;
13538                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13539                         flow_dv_translate_item_icmp6(match_mask, match_value,
13540                                                       items, tunnel);
13541                         last_item = MLX5_FLOW_LAYER_ICMP6;
13542                         break;
13543                 case RTE_FLOW_ITEM_TYPE_TAG:
13544                         flow_dv_translate_item_tag(dev, match_mask,
13545                                                    match_value, items);
13546                         last_item = MLX5_FLOW_ITEM_TAG;
13547                         break;
13548                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13549                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13550                                                         match_value, items);
13551                         last_item = MLX5_FLOW_ITEM_TAG;
13552                         break;
13553                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13554                         flow_dv_translate_item_tx_queue(dev, match_mask,
13555                                                         match_value,
13556                                                         items);
13557                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13558                         break;
13559                 case RTE_FLOW_ITEM_TYPE_GTP:
13560                         flow_dv_translate_item_gtp(match_mask, match_value,
13561                                                    items, tunnel);
13562                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13563                         last_item = MLX5_FLOW_LAYER_GTP;
13564                         break;
13565                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13566                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13567                                                           match_value,
13568                                                           items);
13569                         if (ret)
13570                                 return rte_flow_error_set(error, -ret,
13571                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13572                                         "cannot create GTP PSC item");
13573                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13574                         break;
13575                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13576                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13577                                 /* Create it only the first time to be used. */
13578                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13579                                 if (ret)
13580                                         return rte_flow_error_set
13581                                                 (error, -ret,
13582                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13583                                                 NULL,
13584                                                 "cannot create eCPRI parser");
13585                         }
13586                         flow_dv_translate_item_ecpri(dev, match_mask,
13587                                                      match_value, items,
13588                                                      last_item);
13589                         /* No other protocol should follow eCPRI layer. */
13590                         last_item = MLX5_FLOW_LAYER_ECPRI;
13591                         break;
13592                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13593                         flow_dv_translate_item_integrity(items, integrity_items,
13594                                                          &last_item);
13595                         break;
13596                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13597                         flow_dv_translate_item_aso_ct(dev, match_mask,
13598                                                       match_value, items);
13599                         break;
13600                 case RTE_FLOW_ITEM_TYPE_FLEX:
13601                         flow_dv_translate_item_flex(dev, match_mask,
13602                                                     match_value, items,
13603                                                     dev_flow, tunnel != 0);
13604                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13605                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13606                         break;
13607                 default:
13608                         break;
13609                 }
13610                 item_flags |= last_item;
13611         }
13612         /*
13613          * When E-Switch mode is enabled, we have two cases where we need to
13614          * set the source port manually.
13615          * The first one, is in case of Nic steering rule, and the second is
13616          * E-Switch rule where no port_id item was found. In both cases
13617          * the source port is set according the current port in use.
13618          */
13619         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13620             (priv->representor || priv->master)) {
13621                 if (flow_dv_translate_item_port_id(dev, match_mask,
13622                                                    match_value, NULL, attr))
13623                         return -rte_errno;
13624         }
13625         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13626                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13627                                                       integrity_items,
13628                                                       item_flags);
13629         }
13630         if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
13631                 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
13632                                                  tunnel_item, item_flags);
13633         else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
13634                 flow_dv_translate_item_geneve(match_mask, match_value,
13635                                               tunnel_item, item_flags);
13636         else if (item_flags & MLX5_FLOW_LAYER_GRE) {
13637                 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
13638                         flow_dv_translate_item_gre(match_mask, match_value,
13639                                                    tunnel_item, item_flags);
13640                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
13641                         flow_dv_translate_item_nvgre(match_mask, match_value,
13642                                                      tunnel_item, item_flags);
13643                 else
13644                         MLX5_ASSERT(false);
13645         }
13646 #ifdef RTE_LIBRTE_MLX5_DEBUG
13647         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13648                                               dev_flow->dv.value.buf));
13649 #endif
13650         /*
13651          * Layers may be already initialized from prefix flow if this dev_flow
13652          * is the suffix flow.
13653          */
13654         handle->layers |= item_flags;
13655         if (action_flags & MLX5_FLOW_ACTION_RSS)
13656                 flow_dv_hashfields_set(dev_flow, rss_desc);
13657         /* If has RSS action in the sample action, the Sample/Mirror resource
13658          * should be registered after the hash filed be update.
13659          */
13660         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13661                 ret = flow_dv_translate_action_sample(dev,
13662                                                       sample,
13663                                                       dev_flow, attr,
13664                                                       &num_of_dest,
13665                                                       sample_actions,
13666                                                       &sample_res,
13667                                                       error);
13668                 if (ret < 0)
13669                         return ret;
13670                 ret = flow_dv_create_action_sample(dev,
13671                                                    dev_flow,
13672                                                    num_of_dest,
13673                                                    &sample_res,
13674                                                    &mdest_res,
13675                                                    sample_actions,
13676                                                    action_flags,
13677                                                    error);
13678                 if (ret < 0)
13679                         return rte_flow_error_set
13680                                                 (error, rte_errno,
13681                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13682                                                 NULL,
13683                                                 "cannot create sample action");
13684                 if (num_of_dest > 1) {
13685                         dev_flow->dv.actions[sample_act_pos] =
13686                         dev_flow->dv.dest_array_res->action;
13687                 } else {
13688                         dev_flow->dv.actions[sample_act_pos] =
13689                         dev_flow->dv.sample_res->verbs_action;
13690                 }
13691         }
13692         /*
13693          * For multiple destination (sample action with ratio=1), the encap
13694          * action and port id action will be combined into group action.
13695          * So need remove the original these actions in the flow and only
13696          * use the sample action instead of.
13697          */
13698         if (num_of_dest > 1 &&
13699             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13700                 int i;
13701                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13702
13703                 for (i = 0; i < actions_n; i++) {
13704                         if ((sample_act->dr_encap_action &&
13705                                 sample_act->dr_encap_action ==
13706                                 dev_flow->dv.actions[i]) ||
13707                                 (sample_act->dr_port_id_action &&
13708                                 sample_act->dr_port_id_action ==
13709                                 dev_flow->dv.actions[i]) ||
13710                                 (sample_act->dr_jump_action &&
13711                                 sample_act->dr_jump_action ==
13712                                 dev_flow->dv.actions[i]))
13713                                 continue;
13714                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13715                 }
13716                 memcpy((void *)dev_flow->dv.actions,
13717                                 (void *)temp_actions,
13718                                 tmp_actions_n * sizeof(void *));
13719                 actions_n = tmp_actions_n;
13720         }
13721         dev_flow->dv.actions_n = actions_n;
13722         dev_flow->act_flags = action_flags;
13723         if (wks->skip_matcher_reg)
13724                 return 0;
13725         /* Register matcher. */
13726         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13727                                     matcher.mask.size);
13728         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13729                                                      matcher.priority,
13730                                                      dev_flow->external);
13731         /**
13732          * When creating meter drop flow in drop table, using original
13733          * 5-tuple match, the matcher priority should be lower than
13734          * mtr_id matcher.
13735          */
13736         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13737             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13738             matcher.priority <= MLX5_REG_BITS)
13739                 matcher.priority += MLX5_REG_BITS;
13740         /* reserved field no needs to be set to 0 here. */
13741         tbl_key.is_fdb = attr->transfer;
13742         tbl_key.is_egress = attr->egress;
13743         tbl_key.level = dev_flow->dv.group;
13744         tbl_key.id = dev_flow->dv.table_id;
13745         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13746                                      tunnel, attr->group, error))
13747                 return -rte_errno;
13748         return 0;
13749 }
13750
13751 /**
13752  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13753  * and tunnel.
13754  *
13755  * @param[in, out] action
13756  *   Shred RSS action holding hash RX queue objects.
13757  * @param[in] hash_fields
13758  *   Defines combination of packet fields to participate in RX hash.
13759  * @param[in] tunnel
13760  *   Tunnel type
13761  * @param[in] hrxq_idx
13762  *   Hash RX queue index to set.
13763  *
13764  * @return
13765  *   0 on success, otherwise negative errno value.
13766  */
13767 static int
13768 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13769                               const uint64_t hash_fields,
13770                               uint32_t hrxq_idx)
13771 {
13772         uint32_t *hrxqs = action->hrxq;
13773
13774         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13775         case MLX5_RSS_HASH_IPV4:
13776                 /* fall-through. */
13777         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13778                 /* fall-through. */
13779         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13780                 hrxqs[0] = hrxq_idx;
13781                 return 0;
13782         case MLX5_RSS_HASH_IPV4_TCP:
13783                 /* fall-through. */
13784         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13785                 /* fall-through. */
13786         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13787                 hrxqs[1] = hrxq_idx;
13788                 return 0;
13789         case MLX5_RSS_HASH_IPV4_UDP:
13790                 /* fall-through. */
13791         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13792                 /* fall-through. */
13793         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13794                 hrxqs[2] = hrxq_idx;
13795                 return 0;
13796         case MLX5_RSS_HASH_IPV6:
13797                 /* fall-through. */
13798         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13799                 /* fall-through. */
13800         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13801                 hrxqs[3] = hrxq_idx;
13802                 return 0;
13803         case MLX5_RSS_HASH_IPV6_TCP:
13804                 /* fall-through. */
13805         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13806                 /* fall-through. */
13807         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13808                 hrxqs[4] = hrxq_idx;
13809                 return 0;
13810         case MLX5_RSS_HASH_IPV6_UDP:
13811                 /* fall-through. */
13812         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13813                 /* fall-through. */
13814         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13815                 hrxqs[5] = hrxq_idx;
13816                 return 0;
13817         case MLX5_RSS_HASH_NONE:
13818                 hrxqs[6] = hrxq_idx;
13819                 return 0;
13820         default:
13821                 return -1;
13822         }
13823 }
13824
13825 /**
13826  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13827  * and tunnel.
13828  *
13829  * @param[in] dev
13830  *   Pointer to the Ethernet device structure.
13831  * @param[in] idx
13832  *   Shared RSS action ID holding hash RX queue objects.
13833  * @param[in] hash_fields
13834  *   Defines combination of packet fields to participate in RX hash.
13835  * @param[in] tunnel
13836  *   Tunnel type
13837  *
13838  * @return
13839  *   Valid hash RX queue index, otherwise 0.
13840  */
13841 static uint32_t
13842 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13843                                  const uint64_t hash_fields)
13844 {
13845         struct mlx5_priv *priv = dev->data->dev_private;
13846         struct mlx5_shared_action_rss *shared_rss =
13847             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13848         const uint32_t *hrxqs = shared_rss->hrxq;
13849
13850         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13851         case MLX5_RSS_HASH_IPV4:
13852                 /* fall-through. */
13853         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13854                 /* fall-through. */
13855         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13856                 return hrxqs[0];
13857         case MLX5_RSS_HASH_IPV4_TCP:
13858                 /* fall-through. */
13859         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13860                 /* fall-through. */
13861         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13862                 return hrxqs[1];
13863         case MLX5_RSS_HASH_IPV4_UDP:
13864                 /* fall-through. */
13865         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13866                 /* fall-through. */
13867         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13868                 return hrxqs[2];
13869         case MLX5_RSS_HASH_IPV6:
13870                 /* fall-through. */
13871         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13872                 /* fall-through. */
13873         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13874                 return hrxqs[3];
13875         case MLX5_RSS_HASH_IPV6_TCP:
13876                 /* fall-through. */
13877         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13878                 /* fall-through. */
13879         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13880                 return hrxqs[4];
13881         case MLX5_RSS_HASH_IPV6_UDP:
13882                 /* fall-through. */
13883         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13884                 /* fall-through. */
13885         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13886                 return hrxqs[5];
13887         case MLX5_RSS_HASH_NONE:
13888                 return hrxqs[6];
13889         default:
13890                 return 0;
13891         }
13892
13893 }
13894
13895 /**
13896  * Apply the flow to the NIC, lock free,
13897  * (mutex should be acquired by caller).
13898  *
13899  * @param[in] dev
13900  *   Pointer to the Ethernet device structure.
13901  * @param[in, out] flow
13902  *   Pointer to flow structure.
13903  * @param[out] error
13904  *   Pointer to error structure.
13905  *
13906  * @return
13907  *   0 on success, a negative errno value otherwise and rte_errno is set.
13908  */
13909 static int
13910 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13911               struct rte_flow_error *error)
13912 {
13913         struct mlx5_flow_dv_workspace *dv;
13914         struct mlx5_flow_handle *dh;
13915         struct mlx5_flow_handle_dv *dv_h;
13916         struct mlx5_flow *dev_flow;
13917         struct mlx5_priv *priv = dev->data->dev_private;
13918         uint32_t handle_idx;
13919         int n;
13920         int err;
13921         int idx;
13922         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13923         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13924         uint8_t misc_mask;
13925
13926         MLX5_ASSERT(wks);
13927         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13928                 dev_flow = &wks->flows[idx];
13929                 dv = &dev_flow->dv;
13930                 dh = dev_flow->handle;
13931                 dv_h = &dh->dvh;
13932                 n = dv->actions_n;
13933                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13934                         if (dv->transfer) {
13935                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13936                                 dv->actions[n++] = priv->sh->dr_drop_action;
13937                         } else {
13938 #ifdef HAVE_MLX5DV_DR
13939                                 /* DR supports drop action placeholder. */
13940                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13941                                 dv->actions[n++] = dv->group ?
13942                                         priv->sh->dr_drop_action :
13943                                         priv->root_drop_action;
13944 #else
13945                                 /* For DV we use the explicit drop queue. */
13946                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13947                                 dv->actions[n++] =
13948                                                 priv->drop_queue.hrxq->action;
13949 #endif
13950                         }
13951                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13952                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13953                         struct mlx5_hrxq *hrxq;
13954                         uint32_t hrxq_idx;
13955
13956                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13957                                                     &hrxq_idx);
13958                         if (!hrxq) {
13959                                 rte_flow_error_set
13960                                         (error, rte_errno,
13961                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13962                                          "cannot get hash queue");
13963                                 goto error;
13964                         }
13965                         dh->rix_hrxq = hrxq_idx;
13966                         dv->actions[n++] = hrxq->action;
13967                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13968                         struct mlx5_hrxq *hrxq = NULL;
13969                         uint32_t hrxq_idx;
13970
13971                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13972                                                 rss_desc->shared_rss,
13973                                                 dev_flow->hash_fields);
13974                         if (hrxq_idx)
13975                                 hrxq = mlx5_ipool_get
13976                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13977                                          hrxq_idx);
13978                         if (!hrxq) {
13979                                 rte_flow_error_set
13980                                         (error, rte_errno,
13981                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13982                                          "cannot get hash queue");
13983                                 goto error;
13984                         }
13985                         dh->rix_srss = rss_desc->shared_rss;
13986                         dv->actions[n++] = hrxq->action;
13987                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13988                         if (!priv->sh->default_miss_action) {
13989                                 rte_flow_error_set
13990                                         (error, rte_errno,
13991                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13992                                          "default miss action not be created.");
13993                                 goto error;
13994                         }
13995                         dv->actions[n++] = priv->sh->default_miss_action;
13996                 }
13997                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13998                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13999                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
14000                                                (void *)&dv->value, n,
14001                                                dv->actions, &dh->drv_flow);
14002                 if (err) {
14003                         rte_flow_error_set
14004                                 (error, errno,
14005                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14006                                 NULL,
14007                                 (!priv->config.allow_duplicate_pattern &&
14008                                 errno == EEXIST) ?
14009                                 "duplicating pattern is not allowed" :
14010                                 "hardware refuses to create flow");
14011                         goto error;
14012                 }
14013                 if (priv->vmwa_context &&
14014                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
14015                         /*
14016                          * The rule contains the VLAN pattern.
14017                          * For VF we are going to create VLAN
14018                          * interface to make hypervisor set correct
14019                          * e-Switch vport context.
14020                          */
14021                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14022                 }
14023         }
14024         return 0;
14025 error:
14026         err = rte_errno; /* Save rte_errno before cleanup. */
14027         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14028                        handle_idx, dh, next) {
14029                 /* hrxq is union, don't clear it if the flag is not set. */
14030                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14031                         mlx5_hrxq_release(dev, dh->rix_hrxq);
14032                         dh->rix_hrxq = 0;
14033                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14034                         dh->rix_srss = 0;
14035                 }
14036                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14037                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14038         }
14039         rte_errno = err; /* Restore rte_errno. */
14040         return -rte_errno;
14041 }
14042
14043 void
14044 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14045                           struct mlx5_list_entry *entry)
14046 {
14047         struct mlx5_flow_dv_matcher *resource = container_of(entry,
14048                                                              typeof(*resource),
14049                                                              entry);
14050
14051         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14052         mlx5_free(resource);
14053 }
14054
14055 /**
14056  * Release the flow matcher.
14057  *
14058  * @param dev
14059  *   Pointer to Ethernet device.
14060  * @param port_id
14061  *   Index to port ID action resource.
14062  *
14063  * @return
14064  *   1 while a reference on it exists, 0 when freed.
14065  */
14066 static int
14067 flow_dv_matcher_release(struct rte_eth_dev *dev,
14068                         struct mlx5_flow_handle *handle)
14069 {
14070         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14071         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14072                                                             typeof(*tbl), tbl);
14073         int ret;
14074
14075         MLX5_ASSERT(matcher->matcher_object);
14076         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14077         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14078         return ret;
14079 }
14080
14081 void
14082 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14083 {
14084         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14085         struct mlx5_flow_dv_encap_decap_resource *res =
14086                                        container_of(entry, typeof(*res), entry);
14087
14088         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14089         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14090 }
14091
14092 /**
14093  * Release an encap/decap resource.
14094  *
14095  * @param dev
14096  *   Pointer to Ethernet device.
14097  * @param encap_decap_idx
14098  *   Index of encap decap resource.
14099  *
14100  * @return
14101  *   1 while a reference on it exists, 0 when freed.
14102  */
14103 static int
14104 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14105                                      uint32_t encap_decap_idx)
14106 {
14107         struct mlx5_priv *priv = dev->data->dev_private;
14108         struct mlx5_flow_dv_encap_decap_resource *resource;
14109
14110         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14111                                   encap_decap_idx);
14112         if (!resource)
14113                 return 0;
14114         MLX5_ASSERT(resource->action);
14115         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14116 }
14117
14118 /**
14119  * Release an jump to table action resource.
14120  *
14121  * @param dev
14122  *   Pointer to Ethernet device.
14123  * @param rix_jump
14124  *   Index to the jump action resource.
14125  *
14126  * @return
14127  *   1 while a reference on it exists, 0 when freed.
14128  */
14129 static int
14130 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14131                                   uint32_t rix_jump)
14132 {
14133         struct mlx5_priv *priv = dev->data->dev_private;
14134         struct mlx5_flow_tbl_data_entry *tbl_data;
14135
14136         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14137                                   rix_jump);
14138         if (!tbl_data)
14139                 return 0;
14140         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14141 }
14142
14143 void
14144 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14145 {
14146         struct mlx5_flow_dv_modify_hdr_resource *res =
14147                 container_of(entry, typeof(*res), entry);
14148         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14149
14150         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14151         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14152 }
14153
14154 /**
14155  * Release a modify-header resource.
14156  *
14157  * @param dev
14158  *   Pointer to Ethernet device.
14159  * @param handle
14160  *   Pointer to mlx5_flow_handle.
14161  *
14162  * @return
14163  *   1 while a reference on it exists, 0 when freed.
14164  */
14165 static int
14166 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14167                                     struct mlx5_flow_handle *handle)
14168 {
14169         struct mlx5_priv *priv = dev->data->dev_private;
14170         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14171
14172         MLX5_ASSERT(entry->action);
14173         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14174 }
14175
14176 void
14177 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14178 {
14179         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14180         struct mlx5_flow_dv_port_id_action_resource *resource =
14181                                   container_of(entry, typeof(*resource), entry);
14182
14183         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14184         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14185 }
14186
14187 /**
14188  * Release port ID action resource.
14189  *
14190  * @param dev
14191  *   Pointer to Ethernet device.
14192  * @param handle
14193  *   Pointer to mlx5_flow_handle.
14194  *
14195  * @return
14196  *   1 while a reference on it exists, 0 when freed.
14197  */
14198 static int
14199 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14200                                         uint32_t port_id)
14201 {
14202         struct mlx5_priv *priv = dev->data->dev_private;
14203         struct mlx5_flow_dv_port_id_action_resource *resource;
14204
14205         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14206         if (!resource)
14207                 return 0;
14208         MLX5_ASSERT(resource->action);
14209         return mlx5_list_unregister(priv->sh->port_id_action_list,
14210                                     &resource->entry);
14211 }
14212
14213 /**
14214  * Release shared RSS action resource.
14215  *
14216  * @param dev
14217  *   Pointer to Ethernet device.
14218  * @param srss
14219  *   Shared RSS action index.
14220  */
14221 static void
14222 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14223 {
14224         struct mlx5_priv *priv = dev->data->dev_private;
14225         struct mlx5_shared_action_rss *shared_rss;
14226
14227         shared_rss = mlx5_ipool_get
14228                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14229         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14230 }
14231
14232 void
14233 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14234 {
14235         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14236         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14237                         container_of(entry, typeof(*resource), entry);
14238
14239         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14240         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14241 }
14242
14243 /**
14244  * Release push vlan action resource.
14245  *
14246  * @param dev
14247  *   Pointer to Ethernet device.
14248  * @param handle
14249  *   Pointer to mlx5_flow_handle.
14250  *
14251  * @return
14252  *   1 while a reference on it exists, 0 when freed.
14253  */
14254 static int
14255 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14256                                           struct mlx5_flow_handle *handle)
14257 {
14258         struct mlx5_priv *priv = dev->data->dev_private;
14259         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14260         uint32_t idx = handle->dvh.rix_push_vlan;
14261
14262         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14263         if (!resource)
14264                 return 0;
14265         MLX5_ASSERT(resource->action);
14266         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14267                                     &resource->entry);
14268 }
14269
14270 /**
14271  * Release the fate resource.
14272  *
14273  * @param dev
14274  *   Pointer to Ethernet device.
14275  * @param handle
14276  *   Pointer to mlx5_flow_handle.
14277  */
14278 static void
14279 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14280                                struct mlx5_flow_handle *handle)
14281 {
14282         if (!handle->rix_fate)
14283                 return;
14284         switch (handle->fate_action) {
14285         case MLX5_FLOW_FATE_QUEUE:
14286                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14287                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14288                 break;
14289         case MLX5_FLOW_FATE_JUMP:
14290                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14291                 break;
14292         case MLX5_FLOW_FATE_PORT_ID:
14293                 flow_dv_port_id_action_resource_release(dev,
14294                                 handle->rix_port_id_action);
14295                 break;
14296         default:
14297                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14298                 break;
14299         }
14300         handle->rix_fate = 0;
14301 }
14302
14303 void
14304 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14305                          struct mlx5_list_entry *entry)
14306 {
14307         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14308                                                               typeof(*resource),
14309                                                               entry);
14310         struct rte_eth_dev *dev = resource->dev;
14311         struct mlx5_priv *priv = dev->data->dev_private;
14312
14313         if (resource->verbs_action)
14314                 claim_zero(mlx5_flow_os_destroy_flow_action
14315                                                       (resource->verbs_action));
14316         if (resource->normal_path_tbl)
14317                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14318                                              resource->normal_path_tbl);
14319         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14320         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14321         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14322 }
14323
14324 /**
14325  * Release an sample resource.
14326  *
14327  * @param dev
14328  *   Pointer to Ethernet device.
14329  * @param handle
14330  *   Pointer to mlx5_flow_handle.
14331  *
14332  * @return
14333  *   1 while a reference on it exists, 0 when freed.
14334  */
14335 static int
14336 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14337                                      struct mlx5_flow_handle *handle)
14338 {
14339         struct mlx5_priv *priv = dev->data->dev_private;
14340         struct mlx5_flow_dv_sample_resource *resource;
14341
14342         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14343                                   handle->dvh.rix_sample);
14344         if (!resource)
14345                 return 0;
14346         MLX5_ASSERT(resource->verbs_action);
14347         return mlx5_list_unregister(priv->sh->sample_action_list,
14348                                     &resource->entry);
14349 }
14350
14351 void
14352 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14353                              struct mlx5_list_entry *entry)
14354 {
14355         struct mlx5_flow_dv_dest_array_resource *resource =
14356                         container_of(entry, typeof(*resource), entry);
14357         struct rte_eth_dev *dev = resource->dev;
14358         struct mlx5_priv *priv = dev->data->dev_private;
14359         uint32_t i = 0;
14360
14361         MLX5_ASSERT(resource->action);
14362         if (resource->action)
14363                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14364         for (; i < resource->num_of_dest; i++)
14365                 flow_dv_sample_sub_actions_release(dev,
14366                                                    &resource->sample_idx[i]);
14367         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14368         DRV_LOG(DEBUG, "destination array resource %p: removed",
14369                 (void *)resource);
14370 }
14371
14372 /**
14373  * Release an destination array resource.
14374  *
14375  * @param dev
14376  *   Pointer to Ethernet device.
14377  * @param handle
14378  *   Pointer to mlx5_flow_handle.
14379  *
14380  * @return
14381  *   1 while a reference on it exists, 0 when freed.
14382  */
14383 static int
14384 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14385                                     struct mlx5_flow_handle *handle)
14386 {
14387         struct mlx5_priv *priv = dev->data->dev_private;
14388         struct mlx5_flow_dv_dest_array_resource *resource;
14389
14390         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14391                                   handle->dvh.rix_dest_array);
14392         if (!resource)
14393                 return 0;
14394         MLX5_ASSERT(resource->action);
14395         return mlx5_list_unregister(priv->sh->dest_array_list,
14396                                     &resource->entry);
14397 }
14398
14399 static void
14400 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14401 {
14402         struct mlx5_priv *priv = dev->data->dev_private;
14403         struct mlx5_dev_ctx_shared *sh = priv->sh;
14404         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14405                                 sh->geneve_tlv_option_resource;
14406         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14407         if (geneve_opt_resource) {
14408                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14409                                          __ATOMIC_RELAXED))) {
14410                         claim_zero(mlx5_devx_cmd_destroy
14411                                         (geneve_opt_resource->obj));
14412                         mlx5_free(sh->geneve_tlv_option_resource);
14413                         sh->geneve_tlv_option_resource = NULL;
14414                 }
14415         }
14416         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14417 }
14418
14419 /**
14420  * Remove the flow from the NIC but keeps it in memory.
14421  * Lock free, (mutex should be acquired by caller).
14422  *
14423  * @param[in] dev
14424  *   Pointer to Ethernet device.
14425  * @param[in, out] flow
14426  *   Pointer to flow structure.
14427  */
14428 static void
14429 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14430 {
14431         struct mlx5_flow_handle *dh;
14432         uint32_t handle_idx;
14433         struct mlx5_priv *priv = dev->data->dev_private;
14434
14435         if (!flow)
14436                 return;
14437         handle_idx = flow->dev_handles;
14438         while (handle_idx) {
14439                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14440                                     handle_idx);
14441                 if (!dh)
14442                         return;
14443                 if (dh->drv_flow) {
14444                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14445                         dh->drv_flow = NULL;
14446                 }
14447                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14448                         flow_dv_fate_resource_release(dev, dh);
14449                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14450                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14451                 handle_idx = dh->next.next;
14452         }
14453 }
14454
14455 /**
14456  * Remove the flow from the NIC and the memory.
14457  * Lock free, (mutex should be acquired by caller).
14458  *
14459  * @param[in] dev
14460  *   Pointer to the Ethernet device structure.
14461  * @param[in, out] flow
14462  *   Pointer to flow structure.
14463  */
14464 static void
14465 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14466 {
14467         struct mlx5_flow_handle *dev_handle;
14468         struct mlx5_priv *priv = dev->data->dev_private;
14469         struct mlx5_flow_meter_info *fm = NULL;
14470         uint32_t srss = 0;
14471
14472         if (!flow)
14473                 return;
14474         flow_dv_remove(dev, flow);
14475         if (flow->counter) {
14476                 flow_dv_counter_free(dev, flow->counter);
14477                 flow->counter = 0;
14478         }
14479         if (flow->meter) {
14480                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14481                 if (fm)
14482                         mlx5_flow_meter_detach(priv, fm);
14483                 flow->meter = 0;
14484         }
14485         /* Keep the current age handling by default. */
14486         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14487                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14488         else if (flow->age)
14489                 flow_dv_aso_age_release(dev, flow->age);
14490         if (flow->geneve_tlv_option) {
14491                 flow_dv_geneve_tlv_option_resource_release(dev);
14492                 flow->geneve_tlv_option = 0;
14493         }
14494         while (flow->dev_handles) {
14495                 uint32_t tmp_idx = flow->dev_handles;
14496
14497                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14498                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14499                 if (!dev_handle)
14500                         return;
14501                 flow->dev_handles = dev_handle->next.next;
14502                 while (dev_handle->flex_item) {
14503                         int index = rte_bsf32(dev_handle->flex_item);
14504
14505                         mlx5_flex_release_index(dev, index);
14506                         dev_handle->flex_item &= ~RTE_BIT32(index);
14507                 }
14508                 if (dev_handle->dvh.matcher)
14509                         flow_dv_matcher_release(dev, dev_handle);
14510                 if (dev_handle->dvh.rix_sample)
14511                         flow_dv_sample_resource_release(dev, dev_handle);
14512                 if (dev_handle->dvh.rix_dest_array)
14513                         flow_dv_dest_array_resource_release(dev, dev_handle);
14514                 if (dev_handle->dvh.rix_encap_decap)
14515                         flow_dv_encap_decap_resource_release(dev,
14516                                 dev_handle->dvh.rix_encap_decap);
14517                 if (dev_handle->dvh.modify_hdr)
14518                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14519                 if (dev_handle->dvh.rix_push_vlan)
14520                         flow_dv_push_vlan_action_resource_release(dev,
14521                                                                   dev_handle);
14522                 if (dev_handle->dvh.rix_tag)
14523                         flow_dv_tag_release(dev,
14524                                             dev_handle->dvh.rix_tag);
14525                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14526                         flow_dv_fate_resource_release(dev, dev_handle);
14527                 else if (!srss)
14528                         srss = dev_handle->rix_srss;
14529                 if (fm && dev_handle->is_meter_flow_id &&
14530                     dev_handle->split_flow_id)
14531                         mlx5_ipool_free(fm->flow_ipool,
14532                                         dev_handle->split_flow_id);
14533                 else if (dev_handle->split_flow_id &&
14534                     !dev_handle->is_meter_flow_id)
14535                         mlx5_ipool_free(priv->sh->ipool
14536                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14537                                         dev_handle->split_flow_id);
14538                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14539                            tmp_idx);
14540         }
14541         if (srss)
14542                 flow_dv_shared_rss_action_release(dev, srss);
14543 }
14544
14545 /**
14546  * Release array of hash RX queue objects.
14547  * Helper function.
14548  *
14549  * @param[in] dev
14550  *   Pointer to the Ethernet device structure.
14551  * @param[in, out] hrxqs
14552  *   Array of hash RX queue objects.
14553  *
14554  * @return
14555  *   Total number of references to hash RX queue objects in *hrxqs* array
14556  *   after this operation.
14557  */
14558 static int
14559 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14560                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14561 {
14562         size_t i;
14563         int remaining = 0;
14564
14565         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14566                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14567
14568                 if (!ret)
14569                         (*hrxqs)[i] = 0;
14570                 remaining += ret;
14571         }
14572         return remaining;
14573 }
14574
14575 /**
14576  * Release all hash RX queue objects representing shared RSS action.
14577  *
14578  * @param[in] dev
14579  *   Pointer to the Ethernet device structure.
14580  * @param[in, out] action
14581  *   Shared RSS action to remove hash RX queue objects from.
14582  *
14583  * @return
14584  *   Total number of references to hash RX queue objects stored in *action*
14585  *   after this operation.
14586  *   Expected to be 0 if no external references held.
14587  */
14588 static int
14589 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14590                                  struct mlx5_shared_action_rss *shared_rss)
14591 {
14592         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14593 }
14594
14595 /**
14596  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14597  * user input.
14598  *
14599  * Only one hash value is available for one L3+L4 combination:
14600  * for example:
14601  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14602  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14603  * same slot in mlx5_rss_hash_fields.
14604  *
14605  * @param[in] rss
14606  *   Pointer to the shared action RSS conf.
14607  * @param[in, out] hash_field
14608  *   hash_field variable needed to be adjusted.
14609  *
14610  * @return
14611  *   void
14612  */
14613 static void
14614 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14615                                      uint64_t *hash_field)
14616 {
14617         uint64_t rss_types = rss->origin.types;
14618
14619         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14620         case MLX5_RSS_HASH_IPV4:
14621                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14622                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14623                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14624                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14625                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14626                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14627                         else
14628                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14629                 }
14630                 return;
14631         case MLX5_RSS_HASH_IPV6:
14632                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14633                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14634                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14635                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14636                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14637                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14638                         else
14639                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14640                 }
14641                 return;
14642         case MLX5_RSS_HASH_IPV4_UDP:
14643                 /* fall-through. */
14644         case MLX5_RSS_HASH_IPV6_UDP:
14645                 if (rss_types & RTE_ETH_RSS_UDP) {
14646                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14647                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14648                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14649                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14650                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14651                         else
14652                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14653                 }
14654                 return;
14655         case MLX5_RSS_HASH_IPV4_TCP:
14656                 /* fall-through. */
14657         case MLX5_RSS_HASH_IPV6_TCP:
14658                 if (rss_types & RTE_ETH_RSS_TCP) {
14659                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14660                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14661                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14662                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14663                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14664                         else
14665                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14666                 }
14667                 return;
14668         default:
14669                 return;
14670         }
14671 }
14672
14673 /**
14674  * Setup shared RSS action.
14675  * Prepare set of hash RX queue objects sufficient to handle all valid
14676  * hash_fields combinations (see enum ibv_rx_hash_fields).
14677  *
14678  * @param[in] dev
14679  *   Pointer to the Ethernet device structure.
14680  * @param[in] action_idx
14681  *   Shared RSS action ipool index.
14682  * @param[in, out] action
14683  *   Partially initialized shared RSS action.
14684  * @param[out] error
14685  *   Perform verbose error reporting if not NULL. Initialized in case of
14686  *   error only.
14687  *
14688  * @return
14689  *   0 on success, otherwise negative errno value.
14690  */
14691 static int
14692 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14693                            uint32_t action_idx,
14694                            struct mlx5_shared_action_rss *shared_rss,
14695                            struct rte_flow_error *error)
14696 {
14697         struct mlx5_flow_rss_desc rss_desc = { 0 };
14698         size_t i;
14699         int err;
14700
14701         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
14702                                      !!dev->data->dev_started)) {
14703                 return rte_flow_error_set(error, rte_errno,
14704                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14705                                           "cannot setup indirection table");
14706         }
14707         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14708         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14709         rss_desc.const_q = shared_rss->origin.queue;
14710         rss_desc.queue_num = shared_rss->origin.queue_num;
14711         /* Set non-zero value to indicate a shared RSS. */
14712         rss_desc.shared_rss = action_idx;
14713         rss_desc.ind_tbl = shared_rss->ind_tbl;
14714         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14715                 uint32_t hrxq_idx;
14716                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14717                 int tunnel = 0;
14718
14719                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14720                 if (shared_rss->origin.level > 1) {
14721                         hash_fields |= IBV_RX_HASH_INNER;
14722                         tunnel = 1;
14723                 }
14724                 rss_desc.tunnel = tunnel;
14725                 rss_desc.hash_fields = hash_fields;
14726                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14727                 if (!hrxq_idx) {
14728                         rte_flow_error_set
14729                                 (error, rte_errno,
14730                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14731                                  "cannot get hash queue");
14732                         goto error_hrxq_new;
14733                 }
14734                 err = __flow_dv_action_rss_hrxq_set
14735                         (shared_rss, hash_fields, hrxq_idx);
14736                 MLX5_ASSERT(!err);
14737         }
14738         return 0;
14739 error_hrxq_new:
14740         err = rte_errno;
14741         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14742         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
14743                 shared_rss->ind_tbl = NULL;
14744         rte_errno = err;
14745         return -rte_errno;
14746 }
14747
14748 /**
14749  * Create shared RSS action.
14750  *
14751  * @param[in] dev
14752  *   Pointer to the Ethernet device structure.
14753  * @param[in] conf
14754  *   Shared action configuration.
14755  * @param[in] rss
14756  *   RSS action specification used to create shared action.
14757  * @param[out] error
14758  *   Perform verbose error reporting if not NULL. Initialized in case of
14759  *   error only.
14760  *
14761  * @return
14762  *   A valid shared action ID in case of success, 0 otherwise and
14763  *   rte_errno is set.
14764  */
14765 static uint32_t
14766 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14767                             const struct rte_flow_indir_action_conf *conf,
14768                             const struct rte_flow_action_rss *rss,
14769                             struct rte_flow_error *error)
14770 {
14771         struct mlx5_priv *priv = dev->data->dev_private;
14772         struct mlx5_shared_action_rss *shared_rss = NULL;
14773         void *queue = NULL;
14774         struct rte_flow_action_rss *origin;
14775         const uint8_t *rss_key;
14776         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14777         uint32_t idx;
14778
14779         RTE_SET_USED(conf);
14780         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14781                             0, SOCKET_ID_ANY);
14782         shared_rss = mlx5_ipool_zmalloc
14783                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14784         if (!shared_rss || !queue) {
14785                 rte_flow_error_set(error, ENOMEM,
14786                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14787                                    "cannot allocate resource memory");
14788                 goto error_rss_init;
14789         }
14790         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14791                 rte_flow_error_set(error, E2BIG,
14792                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14793                                    "rss action number out of range");
14794                 goto error_rss_init;
14795         }
14796         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14797                                           sizeof(*shared_rss->ind_tbl),
14798                                           0, SOCKET_ID_ANY);
14799         if (!shared_rss->ind_tbl) {
14800                 rte_flow_error_set(error, ENOMEM,
14801                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14802                                    "cannot allocate resource memory");
14803                 goto error_rss_init;
14804         }
14805         memcpy(queue, rss->queue, queue_size);
14806         shared_rss->ind_tbl->queues = queue;
14807         shared_rss->ind_tbl->queues_n = rss->queue_num;
14808         origin = &shared_rss->origin;
14809         origin->func = rss->func;
14810         origin->level = rss->level;
14811         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14812         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14813         /* NULL RSS key indicates default RSS key. */
14814         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14815         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14816         origin->key = &shared_rss->key[0];
14817         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14818         origin->queue = queue;
14819         origin->queue_num = rss->queue_num;
14820         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14821                 goto error_rss_init;
14822         rte_spinlock_init(&shared_rss->action_rss_sl);
14823         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14824         rte_spinlock_lock(&priv->shared_act_sl);
14825         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14826                      &priv->rss_shared_actions, idx, shared_rss, next);
14827         rte_spinlock_unlock(&priv->shared_act_sl);
14828         return idx;
14829 error_rss_init:
14830         if (shared_rss) {
14831                 if (shared_rss->ind_tbl)
14832                         mlx5_free(shared_rss->ind_tbl);
14833                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14834                                 idx);
14835         }
14836         if (queue)
14837                 mlx5_free(queue);
14838         return 0;
14839 }
14840
14841 /**
14842  * Destroy the shared RSS action.
14843  * Release related hash RX queue objects.
14844  *
14845  * @param[in] dev
14846  *   Pointer to the Ethernet device structure.
14847  * @param[in] idx
14848  *   The shared RSS action object ID to be removed.
14849  * @param[out] error
14850  *   Perform verbose error reporting if not NULL. Initialized in case of
14851  *   error only.
14852  *
14853  * @return
14854  *   0 on success, otherwise negative errno value.
14855  */
14856 static int
14857 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14858                              struct rte_flow_error *error)
14859 {
14860         struct mlx5_priv *priv = dev->data->dev_private;
14861         struct mlx5_shared_action_rss *shared_rss =
14862             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14863         uint32_t old_refcnt = 1;
14864         int remaining;
14865         uint16_t *queue = NULL;
14866
14867         if (!shared_rss)
14868                 return rte_flow_error_set(error, EINVAL,
14869                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14870                                           "invalid shared action");
14871         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14872                                          0, 0, __ATOMIC_ACQUIRE,
14873                                          __ATOMIC_RELAXED))
14874                 return rte_flow_error_set(error, EBUSY,
14875                                           RTE_FLOW_ERROR_TYPE_ACTION,
14876                                           NULL,
14877                                           "shared rss has references");
14878         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14879         if (remaining)
14880                 return rte_flow_error_set(error, EBUSY,
14881                                           RTE_FLOW_ERROR_TYPE_ACTION,
14882                                           NULL,
14883                                           "shared rss hrxq has references");
14884         queue = shared_rss->ind_tbl->queues;
14885         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
14886                                                !!dev->data->dev_started);
14887         if (remaining)
14888                 return rte_flow_error_set(error, EBUSY,
14889                                           RTE_FLOW_ERROR_TYPE_ACTION,
14890                                           NULL,
14891                                           "shared rss indirection table has"
14892                                           " references");
14893         mlx5_free(queue);
14894         rte_spinlock_lock(&priv->shared_act_sl);
14895         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14896                      &priv->rss_shared_actions, idx, shared_rss, next);
14897         rte_spinlock_unlock(&priv->shared_act_sl);
14898         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14899                         idx);
14900         return 0;
14901 }
14902
14903 /**
14904  * Create indirect action, lock free,
14905  * (mutex should be acquired by caller).
14906  * Dispatcher for action type specific call.
14907  *
14908  * @param[in] dev
14909  *   Pointer to the Ethernet device structure.
14910  * @param[in] conf
14911  *   Shared action configuration.
14912  * @param[in] action
14913  *   Action specification used to create indirect action.
14914  * @param[out] error
14915  *   Perform verbose error reporting if not NULL. Initialized in case of
14916  *   error only.
14917  *
14918  * @return
14919  *   A valid shared action handle in case of success, NULL otherwise and
14920  *   rte_errno is set.
14921  */
14922 static struct rte_flow_action_handle *
14923 flow_dv_action_create(struct rte_eth_dev *dev,
14924                       const struct rte_flow_indir_action_conf *conf,
14925                       const struct rte_flow_action *action,
14926                       struct rte_flow_error *err)
14927 {
14928         struct mlx5_priv *priv = dev->data->dev_private;
14929         uint32_t age_idx = 0;
14930         uint32_t idx = 0;
14931         uint32_t ret = 0;
14932
14933         switch (action->type) {
14934         case RTE_FLOW_ACTION_TYPE_RSS:
14935                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14936                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14937                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14938                 break;
14939         case RTE_FLOW_ACTION_TYPE_AGE:
14940                 age_idx = flow_dv_aso_age_alloc(dev, err);
14941                 if (!age_idx) {
14942                         ret = -rte_errno;
14943                         break;
14944                 }
14945                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14946                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14947                 flow_dv_aso_age_params_init(dev, age_idx,
14948                                         ((const struct rte_flow_action_age *)
14949                                                 action->conf)->context ?
14950                                         ((const struct rte_flow_action_age *)
14951                                                 action->conf)->context :
14952                                         (void *)(uintptr_t)idx,
14953                                         ((const struct rte_flow_action_age *)
14954                                                 action->conf)->timeout);
14955                 ret = age_idx;
14956                 break;
14957         case RTE_FLOW_ACTION_TYPE_COUNT:
14958                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14959                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14960                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14961                 break;
14962         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14963                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14964                                                          err);
14965                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14966                 break;
14967         default:
14968                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14969                                    NULL, "action type not supported");
14970                 break;
14971         }
14972         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14973 }
14974
14975 /**
14976  * Destroy the indirect action.
14977  * Release action related resources on the NIC and the memory.
14978  * Lock free, (mutex should be acquired by caller).
14979  * Dispatcher for action type specific call.
14980  *
14981  * @param[in] dev
14982  *   Pointer to the Ethernet device structure.
14983  * @param[in] handle
14984  *   The indirect action object handle to be removed.
14985  * @param[out] error
14986  *   Perform verbose error reporting if not NULL. Initialized in case of
14987  *   error only.
14988  *
14989  * @return
14990  *   0 on success, otherwise negative errno value.
14991  */
14992 static int
14993 flow_dv_action_destroy(struct rte_eth_dev *dev,
14994                        struct rte_flow_action_handle *handle,
14995                        struct rte_flow_error *error)
14996 {
14997         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14998         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14999         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15000         struct mlx5_flow_counter *cnt;
15001         uint32_t no_flow_refcnt = 1;
15002         int ret;
15003
15004         switch (type) {
15005         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15006                 return __flow_dv_action_rss_release(dev, idx, error);
15007         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15008                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15009                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15010                                                  &no_flow_refcnt, 1, false,
15011                                                  __ATOMIC_ACQUIRE,
15012                                                  __ATOMIC_RELAXED))
15013                         return rte_flow_error_set(error, EBUSY,
15014                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15015                                                   NULL,
15016                                                   "Indirect count action has references");
15017                 flow_dv_counter_free(dev, idx);
15018                 return 0;
15019         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15020                 ret = flow_dv_aso_age_release(dev, idx);
15021                 if (ret)
15022                         /*
15023                          * In this case, the last flow has a reference will
15024                          * actually release the age action.
15025                          */
15026                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15027                                 " released with references %d.", idx, ret);
15028                 return 0;
15029         case MLX5_INDIRECT_ACTION_TYPE_CT:
15030                 ret = flow_dv_aso_ct_release(dev, idx, error);
15031                 if (ret < 0)
15032                         return ret;
15033                 if (ret > 0)
15034                         DRV_LOG(DEBUG, "Connection tracking object %u still "
15035                                 "has references %d.", idx, ret);
15036                 return 0;
15037         default:
15038                 return rte_flow_error_set(error, ENOTSUP,
15039                                           RTE_FLOW_ERROR_TYPE_ACTION,
15040                                           NULL,
15041                                           "action type not supported");
15042         }
15043 }
15044
15045 /**
15046  * Updates in place shared RSS action configuration.
15047  *
15048  * @param[in] dev
15049  *   Pointer to the Ethernet device structure.
15050  * @param[in] idx
15051  *   The shared RSS action object ID to be updated.
15052  * @param[in] action_conf
15053  *   RSS action specification used to modify *shared_rss*.
15054  * @param[out] error
15055  *   Perform verbose error reporting if not NULL. Initialized in case of
15056  *   error only.
15057  *
15058  * @return
15059  *   0 on success, otherwise negative errno value.
15060  * @note: currently only support update of RSS queues.
15061  */
15062 static int
15063 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15064                             const struct rte_flow_action_rss *action_conf,
15065                             struct rte_flow_error *error)
15066 {
15067         struct mlx5_priv *priv = dev->data->dev_private;
15068         struct mlx5_shared_action_rss *shared_rss =
15069             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15070         int ret = 0;
15071         void *queue = NULL;
15072         uint16_t *queue_old = NULL;
15073         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15074         bool dev_started = !!dev->data->dev_started;
15075
15076         if (!shared_rss)
15077                 return rte_flow_error_set(error, EINVAL,
15078                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15079                                           "invalid shared action to update");
15080         if (priv->obj_ops.ind_table_modify == NULL)
15081                 return rte_flow_error_set(error, ENOTSUP,
15082                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15083                                           "cannot modify indirection table");
15084         queue = mlx5_malloc(MLX5_MEM_ZERO,
15085                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15086                             0, SOCKET_ID_ANY);
15087         if (!queue)
15088                 return rte_flow_error_set(error, ENOMEM,
15089                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15090                                           NULL,
15091                                           "cannot allocate resource memory");
15092         memcpy(queue, action_conf->queue, queue_size);
15093         MLX5_ASSERT(shared_rss->ind_tbl);
15094         rte_spinlock_lock(&shared_rss->action_rss_sl);
15095         queue_old = shared_rss->ind_tbl->queues;
15096         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15097                                         queue, action_conf->queue_num,
15098                                         true /* standalone */,
15099                                         dev_started /* ref_new_qs */,
15100                                         dev_started /* deref_old_qs */);
15101         if (ret) {
15102                 mlx5_free(queue);
15103                 ret = rte_flow_error_set(error, rte_errno,
15104                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15105                                           "cannot update indirection table");
15106         } else {
15107                 mlx5_free(queue_old);
15108                 shared_rss->origin.queue = queue;
15109                 shared_rss->origin.queue_num = action_conf->queue_num;
15110         }
15111         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15112         return ret;
15113 }
15114
15115 /*
15116  * Updates in place conntrack context or direction.
15117  * Context update should be synchronized.
15118  *
15119  * @param[in] dev
15120  *   Pointer to the Ethernet device structure.
15121  * @param[in] idx
15122  *   The conntrack object ID to be updated.
15123  * @param[in] update
15124  *   Pointer to the structure of information to update.
15125  * @param[out] error
15126  *   Perform verbose error reporting if not NULL. Initialized in case of
15127  *   error only.
15128  *
15129  * @return
15130  *   0 on success, otherwise negative errno value.
15131  */
15132 static int
15133 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15134                            const struct rte_flow_modify_conntrack *update,
15135                            struct rte_flow_error *error)
15136 {
15137         struct mlx5_priv *priv = dev->data->dev_private;
15138         struct mlx5_aso_ct_action *ct;
15139         const struct rte_flow_action_conntrack *new_prf;
15140         int ret = 0;
15141         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15142         uint32_t dev_idx;
15143
15144         if (PORT_ID(priv) != owner)
15145                 return rte_flow_error_set(error, EACCES,
15146                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15147                                           NULL,
15148                                           "CT object owned by another port");
15149         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15150         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15151         if (!ct->refcnt)
15152                 return rte_flow_error_set(error, ENOMEM,
15153                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15154                                           NULL,
15155                                           "CT object is inactive");
15156         new_prf = &update->new_ct;
15157         if (update->direction)
15158                 ct->is_original = !!new_prf->is_original_dir;
15159         if (update->state) {
15160                 /* Only validate the profile when it needs to be updated. */
15161                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15162                 if (ret)
15163                         return ret;
15164                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15165                 if (ret)
15166                         return rte_flow_error_set(error, EIO,
15167                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15168                                         NULL,
15169                                         "Failed to send CT context update WQE");
15170                 /* Block until ready or a failure. */
15171                 ret = mlx5_aso_ct_available(priv->sh, ct);
15172                 if (ret)
15173                         rte_flow_error_set(error, rte_errno,
15174                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15175                                            NULL,
15176                                            "Timeout to get the CT update");
15177         }
15178         return ret;
15179 }
15180
15181 /**
15182  * Updates in place shared action configuration, lock free,
15183  * (mutex should be acquired by caller).
15184  *
15185  * @param[in] dev
15186  *   Pointer to the Ethernet device structure.
15187  * @param[in] handle
15188  *   The indirect action object handle to be updated.
15189  * @param[in] update
15190  *   Action specification used to modify the action pointed by *handle*.
15191  *   *update* could be of same type with the action pointed by the *handle*
15192  *   handle argument, or some other structures like a wrapper, depending on
15193  *   the indirect action type.
15194  * @param[out] error
15195  *   Perform verbose error reporting if not NULL. Initialized in case of
15196  *   error only.
15197  *
15198  * @return
15199  *   0 on success, otherwise negative errno value.
15200  */
15201 static int
15202 flow_dv_action_update(struct rte_eth_dev *dev,
15203                         struct rte_flow_action_handle *handle,
15204                         const void *update,
15205                         struct rte_flow_error *err)
15206 {
15207         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15208         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15209         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15210         const void *action_conf;
15211
15212         switch (type) {
15213         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15214                 action_conf = ((const struct rte_flow_action *)update)->conf;
15215                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15216         case MLX5_INDIRECT_ACTION_TYPE_CT:
15217                 return __flow_dv_action_ct_update(dev, idx, update, err);
15218         default:
15219                 return rte_flow_error_set(err, ENOTSUP,
15220                                           RTE_FLOW_ERROR_TYPE_ACTION,
15221                                           NULL,
15222                                           "action type update not supported");
15223         }
15224 }
15225
15226 /**
15227  * Destroy the meter sub policy table rules.
15228  * Lock free, (mutex should be acquired by caller).
15229  *
15230  * @param[in] dev
15231  *   Pointer to Ethernet device.
15232  * @param[in] sub_policy
15233  *   Pointer to meter sub policy table.
15234  */
15235 static void
15236 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15237                              struct mlx5_flow_meter_sub_policy *sub_policy)
15238 {
15239         struct mlx5_priv *priv = dev->data->dev_private;
15240         struct mlx5_flow_tbl_data_entry *tbl;
15241         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15242         struct mlx5_flow_meter_info *next_fm;
15243         struct mlx5_sub_policy_color_rule *color_rule;
15244         void *tmp;
15245         uint32_t i;
15246
15247         for (i = 0; i < RTE_COLORS; i++) {
15248                 next_fm = NULL;
15249                 if (i == RTE_COLOR_GREEN && policy &&
15250                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15251                         next_fm = mlx5_flow_meter_find(priv,
15252                                         policy->act_cnt[i].next_mtr_id, NULL);
15253                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15254                                    next_port, tmp) {
15255                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15256                         tbl = container_of(color_rule->matcher->tbl,
15257                                            typeof(*tbl), tbl);
15258                         mlx5_list_unregister(tbl->matchers,
15259                                              &color_rule->matcher->entry);
15260                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15261                                      color_rule, next_port);
15262                         mlx5_free(color_rule);
15263                         if (next_fm)
15264                                 mlx5_flow_meter_detach(priv, next_fm);
15265                 }
15266         }
15267         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15268                 if (sub_policy->rix_hrxq[i]) {
15269                         if (policy && !policy->is_hierarchy)
15270                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15271                         sub_policy->rix_hrxq[i] = 0;
15272                 }
15273                 if (sub_policy->jump_tbl[i]) {
15274                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15275                                                      sub_policy->jump_tbl[i]);
15276                         sub_policy->jump_tbl[i] = NULL;
15277                 }
15278         }
15279         if (sub_policy->tbl_rsc) {
15280                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15281                                              sub_policy->tbl_rsc);
15282                 sub_policy->tbl_rsc = NULL;
15283         }
15284 }
15285
15286 /**
15287  * Destroy policy rules, lock free,
15288  * (mutex should be acquired by caller).
15289  * Dispatcher for action type specific call.
15290  *
15291  * @param[in] dev
15292  *   Pointer to the Ethernet device structure.
15293  * @param[in] mtr_policy
15294  *   Meter policy struct.
15295  */
15296 static void
15297 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15298                              struct mlx5_flow_meter_policy *mtr_policy)
15299 {
15300         uint32_t i, j;
15301         struct mlx5_flow_meter_sub_policy *sub_policy;
15302         uint16_t sub_policy_num;
15303
15304         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15305                 sub_policy_num = (mtr_policy->sub_policy_num >>
15306                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15307                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15308                 for (j = 0; j < sub_policy_num; j++) {
15309                         sub_policy = mtr_policy->sub_policys[i][j];
15310                         if (sub_policy)
15311                                 __flow_dv_destroy_sub_policy_rules(dev,
15312                                                                    sub_policy);
15313                 }
15314         }
15315 }
15316
15317 /**
15318  * Destroy policy action, lock free,
15319  * (mutex should be acquired by caller).
15320  * Dispatcher for action type specific call.
15321  *
15322  * @param[in] dev
15323  *   Pointer to the Ethernet device structure.
15324  * @param[in] mtr_policy
15325  *   Meter policy struct.
15326  */
15327 static void
15328 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15329                       struct mlx5_flow_meter_policy *mtr_policy)
15330 {
15331         struct rte_flow_action *rss_action;
15332         struct mlx5_flow_handle dev_handle;
15333         uint32_t i, j;
15334
15335         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15336                 if (mtr_policy->act_cnt[i].rix_mark) {
15337                         flow_dv_tag_release(dev,
15338                                 mtr_policy->act_cnt[i].rix_mark);
15339                         mtr_policy->act_cnt[i].rix_mark = 0;
15340                 }
15341                 if (mtr_policy->act_cnt[i].modify_hdr) {
15342                         dev_handle.dvh.modify_hdr =
15343                                 mtr_policy->act_cnt[i].modify_hdr;
15344                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15345                 }
15346                 switch (mtr_policy->act_cnt[i].fate_action) {
15347                 case MLX5_FLOW_FATE_SHARED_RSS:
15348                         rss_action = mtr_policy->act_cnt[i].rss;
15349                         mlx5_free(rss_action);
15350                         break;
15351                 case MLX5_FLOW_FATE_PORT_ID:
15352                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15353                                 flow_dv_port_id_action_resource_release(dev,
15354                                 mtr_policy->act_cnt[i].rix_port_id_action);
15355                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15356                         }
15357                         break;
15358                 case MLX5_FLOW_FATE_DROP:
15359                 case MLX5_FLOW_FATE_JUMP:
15360                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15361                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15362                                                 NULL;
15363                         break;
15364                 default:
15365                         /*Queue action do nothing*/
15366                         break;
15367                 }
15368         }
15369         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15370                 mtr_policy->dr_drop_action[j] = NULL;
15371 }
15372
15373 /**
15374  * Create policy action per domain, lock free,
15375  * (mutex should be acquired by caller).
15376  * Dispatcher for action type specific call.
15377  *
15378  * @param[in] dev
15379  *   Pointer to the Ethernet device structure.
15380  * @param[in] mtr_policy
15381  *   Meter policy struct.
15382  * @param[in] action
15383  *   Action specification used to create meter actions.
15384  * @param[out] error
15385  *   Perform verbose error reporting if not NULL. Initialized in case of
15386  *   error only.
15387  *
15388  * @return
15389  *   0 on success, otherwise negative errno value.
15390  */
15391 static int
15392 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15393                         struct mlx5_flow_meter_policy *mtr_policy,
15394                         const struct rte_flow_action *actions[RTE_COLORS],
15395                         enum mlx5_meter_domain domain,
15396                         struct rte_mtr_error *error)
15397 {
15398         struct mlx5_priv *priv = dev->data->dev_private;
15399         struct rte_flow_error flow_err;
15400         const struct rte_flow_action *act;
15401         uint64_t action_flags;
15402         struct mlx5_flow_handle dh;
15403         struct mlx5_flow dev_flow;
15404         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15405         int i, ret;
15406         uint8_t egress, transfer;
15407         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15408         union {
15409                 struct mlx5_flow_dv_modify_hdr_resource res;
15410                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15411                             sizeof(struct mlx5_modification_cmd) *
15412                             (MLX5_MAX_MODIFY_NUM + 1)];
15413         } mhdr_dummy;
15414         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15415         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
15416
15417         MLX5_ASSERT(wks);
15418         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15419         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15420         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15421         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15422         memset(&port_id_action, 0,
15423                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15424         memset(mhdr_res, 0, sizeof(*mhdr_res));
15425         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15426                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15427                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15428         dev_flow.handle = &dh;
15429         dev_flow.dv.port_id_action = &port_id_action;
15430         dev_flow.external = true;
15431         for (i = 0; i < RTE_COLORS; i++) {
15432                 if (i < MLX5_MTR_RTE_COLORS)
15433                         act_cnt = &mtr_policy->act_cnt[i];
15434                 /* Skip the color policy actions creation. */
15435                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15436                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15437                         continue;
15438                 action_flags = 0;
15439                 for (act = actions[i];
15440                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15441                         switch (act->type) {
15442                         case RTE_FLOW_ACTION_TYPE_MARK:
15443                         {
15444                                 uint32_t tag_be = mlx5_flow_mark_set
15445                                         (((const struct rte_flow_action_mark *)
15446                                         (act->conf))->id);
15447
15448                                 if (i >= MLX5_MTR_RTE_COLORS)
15449                                         return -rte_mtr_error_set(error,
15450                                           ENOTSUP,
15451                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15452                                           NULL,
15453                                           "cannot create policy "
15454                                           "mark action for this color");
15455                                 wks->mark = 1;
15456                                 if (flow_dv_tag_resource_register(dev, tag_be,
15457                                                   &dev_flow, &flow_err))
15458                                         return -rte_mtr_error_set(error,
15459                                         ENOTSUP,
15460                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15461                                         NULL,
15462                                         "cannot setup policy mark action");
15463                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15464                                 act_cnt->rix_mark =
15465                                         dev_flow.handle->dvh.rix_tag;
15466                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15467                                 break;
15468                         }
15469                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15470                                 if (i >= MLX5_MTR_RTE_COLORS)
15471                                         return -rte_mtr_error_set(error,
15472                                           ENOTSUP,
15473                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15474                                           NULL,
15475                                           "cannot create policy "
15476                                           "set tag action for this color");
15477                                 if (flow_dv_convert_action_set_tag
15478                                 (dev, mhdr_res,
15479                                 (const struct rte_flow_action_set_tag *)
15480                                 act->conf,  &flow_err))
15481                                         return -rte_mtr_error_set(error,
15482                                         ENOTSUP,
15483                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15484                                         NULL, "cannot convert policy "
15485                                         "set tag action");
15486                                 if (!mhdr_res->actions_num)
15487                                         return -rte_mtr_error_set(error,
15488                                         ENOTSUP,
15489                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15490                                         NULL, "cannot find policy "
15491                                         "set tag action");
15492                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15493                                 break;
15494                         case RTE_FLOW_ACTION_TYPE_DROP:
15495                         {
15496                                 struct mlx5_flow_mtr_mng *mtrmng =
15497                                                 priv->sh->mtrmng;
15498                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15499
15500                                 /*
15501                                  * Create the drop table with
15502                                  * METER DROP level.
15503                                  */
15504                                 if (!mtrmng->drop_tbl[domain]) {
15505                                         mtrmng->drop_tbl[domain] =
15506                                         flow_dv_tbl_resource_get(dev,
15507                                         MLX5_FLOW_TABLE_LEVEL_METER,
15508                                         egress, transfer, false, NULL, 0,
15509                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15510                                         if (!mtrmng->drop_tbl[domain])
15511                                                 return -rte_mtr_error_set
15512                                         (error, ENOTSUP,
15513                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15514                                         NULL,
15515                                         "Failed to create meter drop table");
15516                                 }
15517                                 tbl_data = container_of
15518                                 (mtrmng->drop_tbl[domain],
15519                                 struct mlx5_flow_tbl_data_entry, tbl);
15520                                 if (i < MLX5_MTR_RTE_COLORS) {
15521                                         act_cnt->dr_jump_action[domain] =
15522                                                 tbl_data->jump.action;
15523                                         act_cnt->fate_action =
15524                                                 MLX5_FLOW_FATE_DROP;
15525                                 }
15526                                 if (i == RTE_COLOR_RED)
15527                                         mtr_policy->dr_drop_action[domain] =
15528                                                 tbl_data->jump.action;
15529                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15530                                 break;
15531                         }
15532                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15533                         {
15534                                 if (i >= MLX5_MTR_RTE_COLORS)
15535                                         return -rte_mtr_error_set(error,
15536                                         ENOTSUP,
15537                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15538                                         NULL, "cannot create policy "
15539                                         "fate queue for this color");
15540                                 act_cnt->queue =
15541                                 ((const struct rte_flow_action_queue *)
15542                                         (act->conf))->index;
15543                                 act_cnt->fate_action =
15544                                         MLX5_FLOW_FATE_QUEUE;
15545                                 dev_flow.handle->fate_action =
15546                                         MLX5_FLOW_FATE_QUEUE;
15547                                 mtr_policy->is_queue = 1;
15548                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15549                                 break;
15550                         }
15551                         case RTE_FLOW_ACTION_TYPE_RSS:
15552                         {
15553                                 int rss_size;
15554
15555                                 if (i >= MLX5_MTR_RTE_COLORS)
15556                                         return -rte_mtr_error_set(error,
15557                                           ENOTSUP,
15558                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15559                                           NULL,
15560                                           "cannot create policy "
15561                                           "rss action for this color");
15562                                 /*
15563                                  * Save RSS conf into policy struct
15564                                  * for translate stage.
15565                                  */
15566                                 rss_size = (int)rte_flow_conv
15567                                         (RTE_FLOW_CONV_OP_ACTION,
15568                                         NULL, 0, act, &flow_err);
15569                                 if (rss_size <= 0)
15570                                         return -rte_mtr_error_set(error,
15571                                           ENOTSUP,
15572                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15573                                           NULL, "Get the wrong "
15574                                           "rss action struct size");
15575                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15576                                                 rss_size, 0, SOCKET_ID_ANY);
15577                                 if (!act_cnt->rss)
15578                                         return -rte_mtr_error_set(error,
15579                                           ENOTSUP,
15580                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15581                                           NULL,
15582                                           "Fail to malloc rss action memory");
15583                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15584                                         act_cnt->rss, rss_size,
15585                                         act, &flow_err);
15586                                 if (ret < 0)
15587                                         return -rte_mtr_error_set(error,
15588                                           ENOTSUP,
15589                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15590                                           NULL, "Fail to save "
15591                                           "rss action into policy struct");
15592                                 act_cnt->fate_action =
15593                                         MLX5_FLOW_FATE_SHARED_RSS;
15594                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15595                                 break;
15596                         }
15597                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15598                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15599                         {
15600                                 struct mlx5_flow_dv_port_id_action_resource
15601                                         port_id_resource;
15602                                 uint32_t port_id = 0;
15603
15604                                 if (i >= MLX5_MTR_RTE_COLORS)
15605                                         return -rte_mtr_error_set(error,
15606                                         ENOTSUP,
15607                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15608                                         NULL, "cannot create policy "
15609                                         "port action for this color");
15610                                 memset(&port_id_resource, 0,
15611                                         sizeof(port_id_resource));
15612                                 if (flow_dv_translate_action_port_id(dev, act,
15613                                                 &port_id, &flow_err))
15614                                         return -rte_mtr_error_set(error,
15615                                         ENOTSUP,
15616                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15617                                         NULL, "cannot translate "
15618                                         "policy port action");
15619                                 port_id_resource.port_id = port_id;
15620                                 if (flow_dv_port_id_action_resource_register
15621                                         (dev, &port_id_resource,
15622                                         &dev_flow, &flow_err))
15623                                         return -rte_mtr_error_set(error,
15624                                         ENOTSUP,
15625                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15626                                         NULL, "cannot setup "
15627                                         "policy port action");
15628                                 act_cnt->rix_port_id_action =
15629                                         dev_flow.handle->rix_port_id_action;
15630                                 act_cnt->fate_action =
15631                                         MLX5_FLOW_FATE_PORT_ID;
15632                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15633                                 break;
15634                         }
15635                         case RTE_FLOW_ACTION_TYPE_JUMP:
15636                         {
15637                                 uint32_t jump_group = 0;
15638                                 uint32_t table = 0;
15639                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15640                                 struct flow_grp_info grp_info = {
15641                                         .external = !!dev_flow.external,
15642                                         .transfer = !!transfer,
15643                                         .fdb_def_rule = !!priv->fdb_def_rule,
15644                                         .std_tbl_fix = 0,
15645                                         .skip_scale = dev_flow.skip_scale &
15646                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15647                                 };
15648                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15649                                         mtr_policy->sub_policys[domain][0];
15650
15651                                 if (i >= MLX5_MTR_RTE_COLORS)
15652                                         return -rte_mtr_error_set(error,
15653                                           ENOTSUP,
15654                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15655                                           NULL,
15656                                           "cannot create policy "
15657                                           "jump action for this color");
15658                                 jump_group =
15659                                 ((const struct rte_flow_action_jump *)
15660                                                         act->conf)->group;
15661                                 if (mlx5_flow_group_to_table(dev, NULL,
15662                                                        jump_group,
15663                                                        &table,
15664                                                        &grp_info, &flow_err))
15665                                         return -rte_mtr_error_set(error,
15666                                         ENOTSUP,
15667                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15668                                         NULL, "cannot setup "
15669                                         "policy jump action");
15670                                 sub_policy->jump_tbl[i] =
15671                                 flow_dv_tbl_resource_get(dev,
15672                                         table, egress,
15673                                         transfer,
15674                                         !!dev_flow.external,
15675                                         NULL, jump_group, 0,
15676                                         0, &flow_err);
15677                                 if
15678                                 (!sub_policy->jump_tbl[i])
15679                                         return  -rte_mtr_error_set(error,
15680                                         ENOTSUP,
15681                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15682                                         NULL, "cannot create jump action.");
15683                                 tbl_data = container_of
15684                                 (sub_policy->jump_tbl[i],
15685                                 struct mlx5_flow_tbl_data_entry, tbl);
15686                                 act_cnt->dr_jump_action[domain] =
15687                                         tbl_data->jump.action;
15688                                 act_cnt->fate_action =
15689                                         MLX5_FLOW_FATE_JUMP;
15690                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15691                                 break;
15692                         }
15693                         /*
15694                          * No need to check meter hierarchy for Y or R colors
15695                          * here since it is done in the validation stage.
15696                          */
15697                         case RTE_FLOW_ACTION_TYPE_METER:
15698                         {
15699                                 const struct rte_flow_action_meter *mtr;
15700                                 struct mlx5_flow_meter_info *next_fm;
15701                                 struct mlx5_flow_meter_policy *next_policy;
15702                                 struct rte_flow_action tag_action;
15703                                 struct mlx5_rte_flow_action_set_tag set_tag;
15704                                 uint32_t next_mtr_idx = 0;
15705
15706                                 mtr = act->conf;
15707                                 next_fm = mlx5_flow_meter_find(priv,
15708                                                         mtr->mtr_id,
15709                                                         &next_mtr_idx);
15710                                 if (!next_fm)
15711                                         return -rte_mtr_error_set(error, EINVAL,
15712                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15713                                                 "Fail to find next meter.");
15714                                 if (next_fm->def_policy)
15715                                         return -rte_mtr_error_set(error, EINVAL,
15716                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15717                                 "Hierarchy only supports termination meter.");
15718                                 next_policy = mlx5_flow_meter_policy_find(dev,
15719                                                 next_fm->policy_id, NULL);
15720                                 MLX5_ASSERT(next_policy);
15721                                 if (next_fm->drop_cnt) {
15722                                         set_tag.id =
15723                                                 (enum modify_reg)
15724                                                 mlx5_flow_get_reg_id(dev,
15725                                                 MLX5_MTR_ID,
15726                                                 0,
15727                                                 (struct rte_flow_error *)error);
15728                                         set_tag.offset = (priv->mtr_reg_share ?
15729                                                 MLX5_MTR_COLOR_BITS : 0);
15730                                         set_tag.length = (priv->mtr_reg_share ?
15731                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15732                                                MLX5_REG_BITS);
15733                                         set_tag.data = next_mtr_idx;
15734                                         tag_action.type =
15735                                                 (enum rte_flow_action_type)
15736                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15737                                         tag_action.conf = &set_tag;
15738                                         if (flow_dv_convert_action_set_reg
15739                                                 (mhdr_res, &tag_action,
15740                                                 (struct rte_flow_error *)error))
15741                                                 return -rte_errno;
15742                                         action_flags |=
15743                                                 MLX5_FLOW_ACTION_SET_TAG;
15744                                 }
15745                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15746                                 act_cnt->next_mtr_id = next_fm->meter_id;
15747                                 act_cnt->next_sub_policy = NULL;
15748                                 mtr_policy->is_hierarchy = 1;
15749                                 mtr_policy->dev = next_policy->dev;
15750                                 action_flags |=
15751                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15752                                 break;
15753                         }
15754                         default:
15755                                 return -rte_mtr_error_set(error, ENOTSUP,
15756                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15757                                           NULL, "action type not supported");
15758                         }
15759                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15760                                 /* create modify action if needed. */
15761                                 dev_flow.dv.group = 1;
15762                                 if (flow_dv_modify_hdr_resource_register
15763                                         (dev, mhdr_res, &dev_flow, &flow_err))
15764                                         return -rte_mtr_error_set(error,
15765                                                 ENOTSUP,
15766                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15767                                                 NULL, "cannot register policy "
15768                                                 "set tag action");
15769                                 act_cnt->modify_hdr =
15770                                         dev_flow.handle->dvh.modify_hdr;
15771                         }
15772                 }
15773         }
15774         return 0;
15775 }
15776
15777 /**
15778  * Create policy action per domain, lock free,
15779  * (mutex should be acquired by caller).
15780  * Dispatcher for action type specific call.
15781  *
15782  * @param[in] dev
15783  *   Pointer to the Ethernet device structure.
15784  * @param[in] mtr_policy
15785  *   Meter policy struct.
15786  * @param[in] action
15787  *   Action specification used to create meter actions.
15788  * @param[out] error
15789  *   Perform verbose error reporting if not NULL. Initialized in case of
15790  *   error only.
15791  *
15792  * @return
15793  *   0 on success, otherwise negative errno value.
15794  */
15795 static int
15796 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15797                       struct mlx5_flow_meter_policy *mtr_policy,
15798                       const struct rte_flow_action *actions[RTE_COLORS],
15799                       struct rte_mtr_error *error)
15800 {
15801         int ret, i;
15802         uint16_t sub_policy_num;
15803
15804         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15805                 sub_policy_num = (mtr_policy->sub_policy_num >>
15806                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15807                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15808                 if (sub_policy_num) {
15809                         ret = __flow_dv_create_domain_policy_acts(dev,
15810                                 mtr_policy, actions,
15811                                 (enum mlx5_meter_domain)i, error);
15812                         /* Cleaning resource is done in the caller level. */
15813                         if (ret)
15814                                 return ret;
15815                 }
15816         }
15817         return 0;
15818 }
15819
15820 /**
15821  * Query a DV flow rule for its statistics via DevX.
15822  *
15823  * @param[in] dev
15824  *   Pointer to Ethernet device.
15825  * @param[in] cnt_idx
15826  *   Index to the flow counter.
15827  * @param[out] data
15828  *   Data retrieved by the query.
15829  * @param[out] error
15830  *   Perform verbose error reporting if not NULL.
15831  *
15832  * @return
15833  *   0 on success, a negative errno value otherwise and rte_errno is set.
15834  */
15835 int
15836 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15837                     struct rte_flow_error *error)
15838 {
15839         struct mlx5_priv *priv = dev->data->dev_private;
15840         struct rte_flow_query_count *qc = data;
15841
15842         if (!priv->sh->devx)
15843                 return rte_flow_error_set(error, ENOTSUP,
15844                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15845                                           NULL,
15846                                           "counters are not supported");
15847         if (cnt_idx) {
15848                 uint64_t pkts, bytes;
15849                 struct mlx5_flow_counter *cnt;
15850                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15851
15852                 if (err)
15853                         return rte_flow_error_set(error, -err,
15854                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15855                                         NULL, "cannot read counters");
15856                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15857                 qc->hits_set = 1;
15858                 qc->bytes_set = 1;
15859                 qc->hits = pkts - cnt->hits;
15860                 qc->bytes = bytes - cnt->bytes;
15861                 if (qc->reset) {
15862                         cnt->hits = pkts;
15863                         cnt->bytes = bytes;
15864                 }
15865                 return 0;
15866         }
15867         return rte_flow_error_set(error, EINVAL,
15868                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15869                                   NULL,
15870                                   "counters are not available");
15871 }
15872
15873
15874 /**
15875  * Query counter's action pointer for a DV flow rule via DevX.
15876  *
15877  * @param[in] dev
15878  *   Pointer to Ethernet device.
15879  * @param[in] cnt_idx
15880  *   Index to the flow counter.
15881  * @param[out] action_ptr
15882  *   Action pointer for counter.
15883  * @param[out] error
15884  *   Perform verbose error reporting if not NULL.
15885  *
15886  * @return
15887  *   0 on success, a negative errno value otherwise and rte_errno is set.
15888  */
15889 int
15890 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15891         void **action_ptr, struct rte_flow_error *error)
15892 {
15893         struct mlx5_priv *priv = dev->data->dev_private;
15894
15895         if (!priv->sh->devx || !action_ptr)
15896                 return rte_flow_error_set(error, ENOTSUP,
15897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15898                                           NULL,
15899                                           "counters are not supported");
15900
15901         if (cnt_idx) {
15902                 struct mlx5_flow_counter *cnt = NULL;
15903                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15904                 if (cnt) {
15905                         *action_ptr = cnt->action;
15906                         return 0;
15907                 }
15908         }
15909         return rte_flow_error_set(error, EINVAL,
15910                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15911                                   NULL,
15912                                   "counters are not available");
15913 }
15914
15915 static int
15916 flow_dv_action_query(struct rte_eth_dev *dev,
15917                      const struct rte_flow_action_handle *handle, void *data,
15918                      struct rte_flow_error *error)
15919 {
15920         struct mlx5_age_param *age_param;
15921         struct rte_flow_query_age *resp;
15922         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15923         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15924         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15925         struct mlx5_priv *priv = dev->data->dev_private;
15926         struct mlx5_aso_ct_action *ct;
15927         uint16_t owner;
15928         uint32_t dev_idx;
15929
15930         switch (type) {
15931         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15932                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15933                 resp = data;
15934                 resp->aged = __atomic_load_n(&age_param->state,
15935                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15936                                                                           1 : 0;
15937                 resp->sec_since_last_hit_valid = !resp->aged;
15938                 if (resp->sec_since_last_hit_valid)
15939                         resp->sec_since_last_hit = __atomic_load_n
15940                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15941                 return 0;
15942         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15943                 return flow_dv_query_count(dev, idx, data, error);
15944         case MLX5_INDIRECT_ACTION_TYPE_CT:
15945                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15946                 if (owner != PORT_ID(priv))
15947                         return rte_flow_error_set(error, EACCES,
15948                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15949                                         NULL,
15950                                         "CT object owned by another port");
15951                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15952                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15953                 MLX5_ASSERT(ct);
15954                 if (!ct->refcnt)
15955                         return rte_flow_error_set(error, EFAULT,
15956                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15957                                         NULL,
15958                                         "CT object is inactive");
15959                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15960                                                         ct->peer;
15961                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15962                                                         ct->is_original;
15963                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15964                         return rte_flow_error_set(error, EIO,
15965                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15966                                         NULL,
15967                                         "Failed to query CT context");
15968                 return 0;
15969         default:
15970                 return rte_flow_error_set(error, ENOTSUP,
15971                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15972                                           "action type query not supported");
15973         }
15974 }
15975
15976 /**
15977  * Query a flow rule AGE action for aging information.
15978  *
15979  * @param[in] dev
15980  *   Pointer to Ethernet device.
15981  * @param[in] flow
15982  *   Pointer to the sub flow.
15983  * @param[out] data
15984  *   data retrieved by the query.
15985  * @param[out] error
15986  *   Perform verbose error reporting if not NULL.
15987  *
15988  * @return
15989  *   0 on success, a negative errno value otherwise and rte_errno is set.
15990  */
15991 static int
15992 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15993                   void *data, struct rte_flow_error *error)
15994 {
15995         struct rte_flow_query_age *resp = data;
15996         struct mlx5_age_param *age_param;
15997
15998         if (flow->age) {
15999                 struct mlx5_aso_age_action *act =
16000                                      flow_aso_age_get_by_idx(dev, flow->age);
16001
16002                 age_param = &act->age_params;
16003         } else if (flow->counter) {
16004                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
16005
16006                 if (!age_param || !age_param->timeout)
16007                         return rte_flow_error_set
16008                                         (error, EINVAL,
16009                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16010                                          NULL, "cannot read age data");
16011         } else {
16012                 return rte_flow_error_set(error, EINVAL,
16013                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16014                                           NULL, "age data not available");
16015         }
16016         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16017                                      AGE_TMOUT ? 1 : 0;
16018         resp->sec_since_last_hit_valid = !resp->aged;
16019         if (resp->sec_since_last_hit_valid)
16020                 resp->sec_since_last_hit = __atomic_load_n
16021                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16022         return 0;
16023 }
16024
16025 /**
16026  * Query a flow.
16027  *
16028  * @see rte_flow_query()
16029  * @see rte_flow_ops
16030  */
16031 static int
16032 flow_dv_query(struct rte_eth_dev *dev,
16033               struct rte_flow *flow __rte_unused,
16034               const struct rte_flow_action *actions __rte_unused,
16035               void *data __rte_unused,
16036               struct rte_flow_error *error __rte_unused)
16037 {
16038         int ret = -EINVAL;
16039
16040         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16041                 switch (actions->type) {
16042                 case RTE_FLOW_ACTION_TYPE_VOID:
16043                         break;
16044                 case RTE_FLOW_ACTION_TYPE_COUNT:
16045                         ret = flow_dv_query_count(dev, flow->counter, data,
16046                                                   error);
16047                         break;
16048                 case RTE_FLOW_ACTION_TYPE_AGE:
16049                         ret = flow_dv_query_age(dev, flow, data, error);
16050                         break;
16051                 default:
16052                         return rte_flow_error_set(error, ENOTSUP,
16053                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16054                                                   actions,
16055                                                   "action not supported");
16056                 }
16057         }
16058         return ret;
16059 }
16060
16061 /**
16062  * Destroy the meter table set.
16063  * Lock free, (mutex should be acquired by caller).
16064  *
16065  * @param[in] dev
16066  *   Pointer to Ethernet device.
16067  * @param[in] fm
16068  *   Meter information table.
16069  */
16070 static void
16071 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16072                         struct mlx5_flow_meter_info *fm)
16073 {
16074         struct mlx5_priv *priv = dev->data->dev_private;
16075         int i;
16076
16077         if (!fm || !priv->config.dv_flow_en)
16078                 return;
16079         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16080                 if (fm->drop_rule[i]) {
16081                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16082                         fm->drop_rule[i] = NULL;
16083                 }
16084         }
16085 }
16086
16087 static void
16088 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16089 {
16090         struct mlx5_priv *priv = dev->data->dev_private;
16091         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16092         struct mlx5_flow_tbl_data_entry *tbl;
16093         int i, j;
16094
16095         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16096                 if (mtrmng->def_rule[i]) {
16097                         claim_zero(mlx5_flow_os_destroy_flow
16098                                         (mtrmng->def_rule[i]));
16099                         mtrmng->def_rule[i] = NULL;
16100                 }
16101                 if (mtrmng->def_matcher[i]) {
16102                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16103                                 struct mlx5_flow_tbl_data_entry, tbl);
16104                         mlx5_list_unregister(tbl->matchers,
16105                                              &mtrmng->def_matcher[i]->entry);
16106                         mtrmng->def_matcher[i] = NULL;
16107                 }
16108                 for (j = 0; j < MLX5_REG_BITS; j++) {
16109                         if (mtrmng->drop_matcher[i][j]) {
16110                                 tbl =
16111                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16112                                              struct mlx5_flow_tbl_data_entry,
16113                                              tbl);
16114                                 mlx5_list_unregister(tbl->matchers,
16115                                             &mtrmng->drop_matcher[i][j]->entry);
16116                                 mtrmng->drop_matcher[i][j] = NULL;
16117                         }
16118                 }
16119                 if (mtrmng->drop_tbl[i]) {
16120                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16121                                 mtrmng->drop_tbl[i]);
16122                         mtrmng->drop_tbl[i] = NULL;
16123                 }
16124         }
16125 }
16126
16127 /* Number of meter flow actions, count and jump or count and drop. */
16128 #define METER_ACTIONS 2
16129
16130 static void
16131 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16132                                     enum mlx5_meter_domain domain)
16133 {
16134         struct mlx5_priv *priv = dev->data->dev_private;
16135         struct mlx5_flow_meter_def_policy *def_policy =
16136                         priv->sh->mtrmng->def_policy[domain];
16137
16138         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16139         mlx5_free(def_policy);
16140         priv->sh->mtrmng->def_policy[domain] = NULL;
16141 }
16142
16143 /**
16144  * Destroy the default policy table set.
16145  *
16146  * @param[in] dev
16147  *   Pointer to Ethernet device.
16148  */
16149 static void
16150 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16151 {
16152         struct mlx5_priv *priv = dev->data->dev_private;
16153         int i;
16154
16155         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16156                 if (priv->sh->mtrmng->def_policy[i])
16157                         __flow_dv_destroy_domain_def_policy(dev,
16158                                         (enum mlx5_meter_domain)i);
16159         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16160 }
16161
16162 static int
16163 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16164                         uint32_t color_reg_c_idx,
16165                         enum rte_color color, void *matcher_object,
16166                         int actions_n, void *actions,
16167                         bool match_src_port, const struct rte_flow_item *item,
16168                         void **rule, const struct rte_flow_attr *attr)
16169 {
16170         int ret;
16171         struct mlx5_flow_dv_match_params value = {
16172                 .size = sizeof(value.buf),
16173         };
16174         struct mlx5_flow_dv_match_params matcher = {
16175                 .size = sizeof(matcher.buf),
16176         };
16177         struct mlx5_priv *priv = dev->data->dev_private;
16178         uint8_t misc_mask;
16179
16180         if (match_src_port && (priv->representor || priv->master)) {
16181                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16182                                                    value.buf, item, attr)) {
16183                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16184                                 " value with port.", color);
16185                         return -1;
16186                 }
16187         }
16188         flow_dv_match_meta_reg(matcher.buf, value.buf,
16189                                (enum modify_reg)color_reg_c_idx,
16190                                rte_col_2_mlx5_col(color), UINT32_MAX);
16191         misc_mask = flow_dv_matcher_enable(value.buf);
16192         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16193         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16194                                        actions_n, actions, rule);
16195         if (ret) {
16196                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16197                 return -1;
16198         }
16199         return 0;
16200 }
16201
16202 static int
16203 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16204                         uint32_t color_reg_c_idx,
16205                         uint16_t priority,
16206                         struct mlx5_flow_meter_sub_policy *sub_policy,
16207                         const struct rte_flow_attr *attr,
16208                         bool match_src_port,
16209                         const struct rte_flow_item *item,
16210                         struct mlx5_flow_dv_matcher **policy_matcher,
16211                         struct rte_flow_error *error)
16212 {
16213         struct mlx5_list_entry *entry;
16214         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16215         struct mlx5_flow_dv_matcher matcher = {
16216                 .mask = {
16217                         .size = sizeof(matcher.mask.buf),
16218                 },
16219                 .tbl = tbl_rsc,
16220         };
16221         struct mlx5_flow_dv_match_params value = {
16222                 .size = sizeof(value.buf),
16223         };
16224         struct mlx5_flow_cb_ctx ctx = {
16225                 .error = error,
16226                 .data = &matcher,
16227         };
16228         struct mlx5_flow_tbl_data_entry *tbl_data;
16229         struct mlx5_priv *priv = dev->data->dev_private;
16230         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16231
16232         if (match_src_port && (priv->representor || priv->master)) {
16233                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16234                                                    value.buf, item, attr)) {
16235                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16236                                 " with port.", priority);
16237                         return -1;
16238                 }
16239         }
16240         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16241         if (priority < RTE_COLOR_RED)
16242                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16243                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16244         matcher.priority = priority;
16245         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16246                                     matcher.mask.size);
16247         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16248         if (!entry) {
16249                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16250                 return -1;
16251         }
16252         *policy_matcher =
16253                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16254         return 0;
16255 }
16256
16257 /**
16258  * Create the policy rules per domain.
16259  *
16260  * @param[in] dev
16261  *   Pointer to Ethernet device.
16262  * @param[in] sub_policy
16263  *    Pointer to sub policy table..
16264  * @param[in] egress
16265  *   Direction of the table.
16266  * @param[in] transfer
16267  *   E-Switch or NIC flow.
16268  * @param[in] acts
16269  *   Pointer to policy action list per color.
16270  *
16271  * @return
16272  *   0 on success, -1 otherwise.
16273  */
16274 static int
16275 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16276                 struct mlx5_flow_meter_sub_policy *sub_policy,
16277                 uint8_t egress, uint8_t transfer, bool match_src_port,
16278                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16279 {
16280         struct mlx5_priv *priv = dev->data->dev_private;
16281         struct rte_flow_error flow_err;
16282         uint32_t color_reg_c_idx;
16283         struct rte_flow_attr attr = {
16284                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16285                 .priority = 0,
16286                 .ingress = 0,
16287                 .egress = !!egress,
16288                 .transfer = !!transfer,
16289                 .reserved = 0,
16290         };
16291         int i;
16292         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16293         struct mlx5_sub_policy_color_rule *color_rule;
16294         bool svport_match;
16295         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16296
16297         if (ret < 0)
16298                 return -1;
16299         /* Create policy table with POLICY level. */
16300         if (!sub_policy->tbl_rsc)
16301                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16302                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16303                                 egress, transfer, false, NULL, 0, 0,
16304                                 sub_policy->idx, &flow_err);
16305         if (!sub_policy->tbl_rsc) {
16306                 DRV_LOG(ERR,
16307                         "Failed to create meter sub policy table.");
16308                 return -1;
16309         }
16310         /* Prepare matchers. */
16311         color_reg_c_idx = ret;
16312         for (i = 0; i < RTE_COLORS; i++) {
16313                 TAILQ_INIT(&sub_policy->color_rules[i]);
16314                 if (!acts[i].actions_n)
16315                         continue;
16316                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16317                                 sizeof(struct mlx5_sub_policy_color_rule),
16318                                 0, SOCKET_ID_ANY);
16319                 if (!color_rule) {
16320                         DRV_LOG(ERR, "No memory to create color rule.");
16321                         goto err_exit;
16322                 }
16323                 tmp_rules[i] = color_rule;
16324                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16325                                   color_rule, next_port);
16326                 color_rule->src_port = priv->representor_id;
16327                 /* No use. */
16328                 attr.priority = i;
16329                 /* Create matchers for colors. */
16330                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16331                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16332                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16333                                 &attr, svport_match, NULL,
16334                                 &color_rule->matcher, &flow_err)) {
16335                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16336                         goto err_exit;
16337                 }
16338                 /* Create flow, matching color. */
16339                 if (__flow_dv_create_policy_flow(dev,
16340                                 color_reg_c_idx, (enum rte_color)i,
16341                                 color_rule->matcher->matcher_object,
16342                                 acts[i].actions_n, acts[i].dv_actions,
16343                                 svport_match, NULL, &color_rule->rule,
16344                                 &attr)) {
16345                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16346                         goto err_exit;
16347                 }
16348         }
16349         return 0;
16350 err_exit:
16351         /* All the policy rules will be cleared. */
16352         do {
16353                 color_rule = tmp_rules[i];
16354                 if (color_rule) {
16355                         if (color_rule->rule)
16356                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16357                         if (color_rule->matcher) {
16358                                 struct mlx5_flow_tbl_data_entry *tbl =
16359                                         container_of(color_rule->matcher->tbl,
16360                                                      typeof(*tbl), tbl);
16361                                 mlx5_list_unregister(tbl->matchers,
16362                                                 &color_rule->matcher->entry);
16363                         }
16364                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16365                                      color_rule, next_port);
16366                         mlx5_free(color_rule);
16367                 }
16368         } while (i--);
16369         return -1;
16370 }
16371
16372 static int
16373 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16374                         struct mlx5_flow_meter_policy *mtr_policy,
16375                         struct mlx5_flow_meter_sub_policy *sub_policy,
16376                         uint32_t domain)
16377 {
16378         struct mlx5_priv *priv = dev->data->dev_private;
16379         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16380         struct mlx5_flow_dv_tag_resource *tag;
16381         struct mlx5_flow_dv_port_id_action_resource *port_action;
16382         struct mlx5_hrxq *hrxq;
16383         struct mlx5_flow_meter_info *next_fm = NULL;
16384         struct mlx5_flow_meter_policy *next_policy;
16385         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16386         struct mlx5_flow_tbl_data_entry *tbl_data;
16387         struct rte_flow_error error;
16388         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16389         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16390         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16391         bool match_src_port = false;
16392         int i;
16393
16394         /* If RSS or Queue, no previous actions / rules is created. */
16395         for (i = 0; i < RTE_COLORS; i++) {
16396                 acts[i].actions_n = 0;
16397                 if (i == RTE_COLOR_RED) {
16398                         /* Only support drop on red. */
16399                         acts[i].dv_actions[0] =
16400                                 mtr_policy->dr_drop_action[domain];
16401                         acts[i].actions_n = 1;
16402                         continue;
16403                 }
16404                 if (i == RTE_COLOR_GREEN &&
16405                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16406                         struct rte_flow_attr attr = {
16407                                 .transfer = transfer
16408                         };
16409
16410                         next_fm = mlx5_flow_meter_find(priv,
16411                                         mtr_policy->act_cnt[i].next_mtr_id,
16412                                         NULL);
16413                         if (!next_fm) {
16414                                 DRV_LOG(ERR,
16415                                         "Failed to get next hierarchy meter.");
16416                                 goto err_exit;
16417                         }
16418                         if (mlx5_flow_meter_attach(priv, next_fm,
16419                                                    &attr, &error)) {
16420                                 DRV_LOG(ERR, "%s", error.message);
16421                                 next_fm = NULL;
16422                                 goto err_exit;
16423                         }
16424                         /* Meter action must be the first for TX. */
16425                         if (mtr_first) {
16426                                 acts[i].dv_actions[acts[i].actions_n] =
16427                                         next_fm->meter_action;
16428                                 acts[i].actions_n++;
16429                         }
16430                 }
16431                 if (mtr_policy->act_cnt[i].rix_mark) {
16432                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16433                                         mtr_policy->act_cnt[i].rix_mark);
16434                         if (!tag) {
16435                                 DRV_LOG(ERR, "Failed to find "
16436                                 "mark action for policy.");
16437                                 goto err_exit;
16438                         }
16439                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16440                         acts[i].actions_n++;
16441                 }
16442                 if (mtr_policy->act_cnt[i].modify_hdr) {
16443                         acts[i].dv_actions[acts[i].actions_n] =
16444                                 mtr_policy->act_cnt[i].modify_hdr->action;
16445                         acts[i].actions_n++;
16446                 }
16447                 if (mtr_policy->act_cnt[i].fate_action) {
16448                         switch (mtr_policy->act_cnt[i].fate_action) {
16449                         case MLX5_FLOW_FATE_PORT_ID:
16450                                 port_action = mlx5_ipool_get
16451                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16452                                 mtr_policy->act_cnt[i].rix_port_id_action);
16453                                 if (!port_action) {
16454                                         DRV_LOG(ERR, "Failed to find "
16455                                                 "port action for policy.");
16456                                         goto err_exit;
16457                                 }
16458                                 acts[i].dv_actions[acts[i].actions_n] =
16459                                         port_action->action;
16460                                 acts[i].actions_n++;
16461                                 mtr_policy->dev = dev;
16462                                 match_src_port = true;
16463                                 break;
16464                         case MLX5_FLOW_FATE_DROP:
16465                         case MLX5_FLOW_FATE_JUMP:
16466                                 acts[i].dv_actions[acts[i].actions_n] =
16467                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16468                                 acts[i].actions_n++;
16469                                 break;
16470                         case MLX5_FLOW_FATE_SHARED_RSS:
16471                         case MLX5_FLOW_FATE_QUEUE:
16472                                 hrxq = mlx5_ipool_get
16473                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16474                                          sub_policy->rix_hrxq[i]);
16475                                 if (!hrxq) {
16476                                         DRV_LOG(ERR, "Failed to find "
16477                                                 "queue action for policy.");
16478                                         goto err_exit;
16479                                 }
16480                                 acts[i].dv_actions[acts[i].actions_n] =
16481                                         hrxq->action;
16482                                 acts[i].actions_n++;
16483                                 break;
16484                         case MLX5_FLOW_FATE_MTR:
16485                                 if (!next_fm) {
16486                                         DRV_LOG(ERR,
16487                                                 "No next hierarchy meter.");
16488                                         goto err_exit;
16489                                 }
16490                                 if (!mtr_first) {
16491                                         acts[i].dv_actions[acts[i].actions_n] =
16492                                                         next_fm->meter_action;
16493                                         acts[i].actions_n++;
16494                                 }
16495                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16496                                         next_sub_policy =
16497                                         mtr_policy->act_cnt[i].next_sub_policy;
16498                                 } else {
16499                                         next_policy =
16500                                                 mlx5_flow_meter_policy_find(dev,
16501                                                 next_fm->policy_id, NULL);
16502                                         MLX5_ASSERT(next_policy);
16503                                         next_sub_policy =
16504                                         next_policy->sub_policys[domain][0];
16505                                 }
16506                                 tbl_data =
16507                                         container_of(next_sub_policy->tbl_rsc,
16508                                         struct mlx5_flow_tbl_data_entry, tbl);
16509                                 acts[i].dv_actions[acts[i].actions_n++] =
16510                                                         tbl_data->jump.action;
16511                                 if (mtr_policy->act_cnt[i].modify_hdr)
16512                                         match_src_port = !!transfer;
16513                                 break;
16514                         default:
16515                                 /*Queue action do nothing*/
16516                                 break;
16517                         }
16518                 }
16519         }
16520         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16521                                 egress, transfer, match_src_port, acts)) {
16522                 DRV_LOG(ERR,
16523                         "Failed to create policy rules per domain.");
16524                 goto err_exit;
16525         }
16526         return 0;
16527 err_exit:
16528         if (next_fm)
16529                 mlx5_flow_meter_detach(priv, next_fm);
16530         return -1;
16531 }
16532
16533 /**
16534  * Create the policy rules.
16535  *
16536  * @param[in] dev
16537  *   Pointer to Ethernet device.
16538  * @param[in,out] mtr_policy
16539  *   Pointer to meter policy table.
16540  *
16541  * @return
16542  *   0 on success, -1 otherwise.
16543  */
16544 static int
16545 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16546                              struct mlx5_flow_meter_policy *mtr_policy)
16547 {
16548         int i;
16549         uint16_t sub_policy_num;
16550
16551         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16552                 sub_policy_num = (mtr_policy->sub_policy_num >>
16553                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16554                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16555                 if (!sub_policy_num)
16556                         continue;
16557                 /* Prepare actions list and create policy rules. */
16558                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16559                         mtr_policy->sub_policys[i][0], i)) {
16560                         DRV_LOG(ERR, "Failed to create policy action "
16561                                 "list per domain.");
16562                         return -1;
16563                 }
16564         }
16565         return 0;
16566 }
16567
16568 static int
16569 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16570 {
16571         struct mlx5_priv *priv = dev->data->dev_private;
16572         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16573         struct mlx5_flow_meter_def_policy *def_policy;
16574         struct mlx5_flow_tbl_resource *jump_tbl;
16575         struct mlx5_flow_tbl_data_entry *tbl_data;
16576         uint8_t egress, transfer;
16577         struct rte_flow_error error;
16578         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16579         int ret;
16580
16581         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16582         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16583         def_policy = mtrmng->def_policy[domain];
16584         if (!def_policy) {
16585                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16586                         sizeof(struct mlx5_flow_meter_def_policy),
16587                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16588                 if (!def_policy) {
16589                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16590                         goto def_policy_error;
16591                 }
16592                 mtrmng->def_policy[domain] = def_policy;
16593                 /* Create the meter suffix table with SUFFIX level. */
16594                 jump_tbl = flow_dv_tbl_resource_get(dev,
16595                                 MLX5_FLOW_TABLE_LEVEL_METER,
16596                                 egress, transfer, false, NULL, 0,
16597                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16598                 if (!jump_tbl) {
16599                         DRV_LOG(ERR,
16600                                 "Failed to create meter suffix table.");
16601                         goto def_policy_error;
16602                 }
16603                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16604                 tbl_data = container_of(jump_tbl,
16605                                         struct mlx5_flow_tbl_data_entry, tbl);
16606                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16607                                                 tbl_data->jump.action;
16608                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16609                 acts[RTE_COLOR_GREEN].actions_n = 1;
16610                 /*
16611                  * YELLOW has the same default policy as GREEN does.
16612                  * G & Y share the same table and action. The 2nd time of table
16613                  * resource getting is just to update the reference count for
16614                  * the releasing stage.
16615                  */
16616                 jump_tbl = flow_dv_tbl_resource_get(dev,
16617                                 MLX5_FLOW_TABLE_LEVEL_METER,
16618                                 egress, transfer, false, NULL, 0,
16619                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16620                 if (!jump_tbl) {
16621                         DRV_LOG(ERR,
16622                                 "Failed to get meter suffix table.");
16623                         goto def_policy_error;
16624                 }
16625                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16626                 tbl_data = container_of(jump_tbl,
16627                                         struct mlx5_flow_tbl_data_entry, tbl);
16628                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16629                                                 tbl_data->jump.action;
16630                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16631                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16632                 /* Create jump action to the drop table. */
16633                 if (!mtrmng->drop_tbl[domain]) {
16634                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16635                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16636                                  egress, transfer, false, NULL, 0,
16637                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16638                         if (!mtrmng->drop_tbl[domain]) {
16639                                 DRV_LOG(ERR, "Failed to create meter "
16640                                         "drop table for default policy.");
16641                                 goto def_policy_error;
16642                         }
16643                 }
16644                 /* all RED: unique Drop table for jump action. */
16645                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16646                                         struct mlx5_flow_tbl_data_entry, tbl);
16647                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16648                                                 tbl_data->jump.action;
16649                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16650                 acts[RTE_COLOR_RED].actions_n = 1;
16651                 /* Create default policy rules. */
16652                 ret = __flow_dv_create_domain_policy_rules(dev,
16653                                         &def_policy->sub_policy,
16654                                         egress, transfer, false, acts);
16655                 if (ret) {
16656                         DRV_LOG(ERR, "Failed to create default policy rules.");
16657                         goto def_policy_error;
16658                 }
16659         }
16660         return 0;
16661 def_policy_error:
16662         __flow_dv_destroy_domain_def_policy(dev,
16663                                             (enum mlx5_meter_domain)domain);
16664         return -1;
16665 }
16666
16667 /**
16668  * Create the default policy table set.
16669  *
16670  * @param[in] dev
16671  *   Pointer to Ethernet device.
16672  * @return
16673  *   0 on success, -1 otherwise.
16674  */
16675 static int
16676 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16677 {
16678         struct mlx5_priv *priv = dev->data->dev_private;
16679         int i;
16680
16681         /* Non-termination policy table. */
16682         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16683                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16684                         continue;
16685                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16686                         DRV_LOG(ERR, "Failed to create default policy");
16687                         /* Rollback the created default policies for others. */
16688                         flow_dv_destroy_def_policy(dev);
16689                         return -1;
16690                 }
16691         }
16692         return 0;
16693 }
16694
16695 /**
16696  * Create the needed meter tables.
16697  * Lock free, (mutex should be acquired by caller).
16698  *
16699  * @param[in] dev
16700  *   Pointer to Ethernet device.
16701  * @param[in] fm
16702  *   Meter information table.
16703  * @param[in] mtr_idx
16704  *   Meter index.
16705  * @param[in] domain_bitmap
16706  *   Domain bitmap.
16707  * @return
16708  *   0 on success, -1 otherwise.
16709  */
16710 static int
16711 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16712                         struct mlx5_flow_meter_info *fm,
16713                         uint32_t mtr_idx,
16714                         uint8_t domain_bitmap)
16715 {
16716         struct mlx5_priv *priv = dev->data->dev_private;
16717         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16718         struct rte_flow_error error;
16719         struct mlx5_flow_tbl_data_entry *tbl_data;
16720         uint8_t egress, transfer;
16721         void *actions[METER_ACTIONS];
16722         int domain, ret, i;
16723         struct mlx5_flow_counter *cnt;
16724         struct mlx5_flow_dv_match_params value = {
16725                 .size = sizeof(value.buf),
16726         };
16727         struct mlx5_flow_dv_match_params matcher_para = {
16728                 .size = sizeof(matcher_para.buf),
16729         };
16730         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16731                                                      0, &error);
16732         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16733         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16734         struct mlx5_list_entry *entry;
16735         struct mlx5_flow_dv_matcher matcher = {
16736                 .mask = {
16737                         .size = sizeof(matcher.mask.buf),
16738                 },
16739         };
16740         struct mlx5_flow_dv_matcher *drop_matcher;
16741         struct mlx5_flow_cb_ctx ctx = {
16742                 .error = &error,
16743                 .data = &matcher,
16744         };
16745         uint8_t misc_mask;
16746
16747         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16748                 rte_errno = ENOTSUP;
16749                 return -1;
16750         }
16751         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16752                 if (!(domain_bitmap & (1 << domain)) ||
16753                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16754                         continue;
16755                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16756                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16757                 /* Create the drop table with METER DROP level. */
16758                 if (!mtrmng->drop_tbl[domain]) {
16759                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16760                                         MLX5_FLOW_TABLE_LEVEL_METER,
16761                                         egress, transfer, false, NULL, 0,
16762                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16763                         if (!mtrmng->drop_tbl[domain]) {
16764                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16765                                 goto policy_error;
16766                         }
16767                 }
16768                 /* Create default matcher in drop table. */
16769                 matcher.tbl = mtrmng->drop_tbl[domain],
16770                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16771                                 struct mlx5_flow_tbl_data_entry, tbl);
16772                 if (!mtrmng->def_matcher[domain]) {
16773                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16774                                        (enum modify_reg)mtr_id_reg_c,
16775                                        0, 0);
16776                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16777                         matcher.crc = rte_raw_cksum
16778                                         ((const void *)matcher.mask.buf,
16779                                         matcher.mask.size);
16780                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16781                         if (!entry) {
16782                                 DRV_LOG(ERR, "Failed to register meter "
16783                                 "drop default matcher.");
16784                                 goto policy_error;
16785                         }
16786                         mtrmng->def_matcher[domain] = container_of(entry,
16787                         struct mlx5_flow_dv_matcher, entry);
16788                 }
16789                 /* Create default rule in drop table. */
16790                 if (!mtrmng->def_rule[domain]) {
16791                         i = 0;
16792                         actions[i++] = priv->sh->dr_drop_action;
16793                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16794                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16795                         misc_mask = flow_dv_matcher_enable(value.buf);
16796                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16797                         ret = mlx5_flow_os_create_flow
16798                                 (mtrmng->def_matcher[domain]->matcher_object,
16799                                 (void *)&value, i, actions,
16800                                 &mtrmng->def_rule[domain]);
16801                         if (ret) {
16802                                 DRV_LOG(ERR, "Failed to create meter "
16803                                 "default drop rule for drop table.");
16804                                 goto policy_error;
16805                         }
16806                 }
16807                 if (!fm->drop_cnt)
16808                         continue;
16809                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16810                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16811                         /* Create matchers for Drop. */
16812                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16813                                         (enum modify_reg)mtr_id_reg_c, 0,
16814                                         (mtr_id_mask << mtr_id_offset));
16815                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16816                         matcher.crc = rte_raw_cksum
16817                                         ((const void *)matcher.mask.buf,
16818                                         matcher.mask.size);
16819                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16820                         if (!entry) {
16821                                 DRV_LOG(ERR,
16822                                 "Failed to register meter drop matcher.");
16823                                 goto policy_error;
16824                         }
16825                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16826                                 container_of(entry, struct mlx5_flow_dv_matcher,
16827                                              entry);
16828                 }
16829                 drop_matcher =
16830                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16831                 /* Create drop rule, matching meter_id only. */
16832                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16833                                 (enum modify_reg)mtr_id_reg_c,
16834                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16835                 i = 0;
16836                 cnt = flow_dv_counter_get_by_idx(dev,
16837                                         fm->drop_cnt, NULL);
16838                 actions[i++] = cnt->action;
16839                 actions[i++] = priv->sh->dr_drop_action;
16840                 misc_mask = flow_dv_matcher_enable(value.buf);
16841                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16842                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16843                                                (void *)&value, i, actions,
16844                                                &fm->drop_rule[domain]);
16845                 if (ret) {
16846                         DRV_LOG(ERR, "Failed to create meter "
16847                                 "drop rule for drop table.");
16848                                 goto policy_error;
16849                 }
16850         }
16851         return 0;
16852 policy_error:
16853         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16854                 if (fm->drop_rule[i]) {
16855                         claim_zero(mlx5_flow_os_destroy_flow
16856                                 (fm->drop_rule[i]));
16857                         fm->drop_rule[i] = NULL;
16858                 }
16859         }
16860         return -1;
16861 }
16862
16863 static struct mlx5_flow_meter_sub_policy *
16864 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16865                 struct mlx5_flow_meter_policy *mtr_policy,
16866                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16867                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16868                 bool *is_reuse)
16869 {
16870         struct mlx5_priv *priv = dev->data->dev_private;
16871         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16872         uint32_t sub_policy_idx = 0;
16873         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16874         uint32_t i, j;
16875         struct mlx5_hrxq *hrxq;
16876         struct mlx5_flow_handle dh;
16877         struct mlx5_meter_policy_action_container *act_cnt;
16878         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16879         uint16_t sub_policy_num;
16880         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
16881
16882         MLX5_ASSERT(wks);
16883         rte_spinlock_lock(&mtr_policy->sl);
16884         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16885                 if (!rss_desc[i])
16886                         continue;
16887                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16888                 if (!hrxq_idx[i]) {
16889                         rte_spinlock_unlock(&mtr_policy->sl);
16890                         return NULL;
16891                 }
16892         }
16893         sub_policy_num = (mtr_policy->sub_policy_num >>
16894                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16895                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16896         for (j = 0; j < sub_policy_num; j++) {
16897                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16898                         if (rss_desc[i] &&
16899                             hrxq_idx[i] !=
16900                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16901                                 break;
16902                 }
16903                 if (i >= MLX5_MTR_RTE_COLORS) {
16904                         /*
16905                          * Found the sub policy table with
16906                          * the same queue per color.
16907                          */
16908                         rte_spinlock_unlock(&mtr_policy->sl);
16909                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16910                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16911                         *is_reuse = true;
16912                         return mtr_policy->sub_policys[domain][j];
16913                 }
16914         }
16915         /* Create sub policy. */
16916         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16917                 /* Reuse the first pre-allocated sub_policy. */
16918                 sub_policy = mtr_policy->sub_policys[domain][0];
16919                 sub_policy_idx = sub_policy->idx;
16920         } else {
16921                 sub_policy = mlx5_ipool_zmalloc
16922                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16923                                  &sub_policy_idx);
16924                 if (!sub_policy ||
16925                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16926                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16927                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16928                         goto rss_sub_policy_error;
16929                 }
16930                 sub_policy->idx = sub_policy_idx;
16931                 sub_policy->main_policy = mtr_policy;
16932         }
16933         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16934                 if (!rss_desc[i])
16935                         continue;
16936                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16937                 if (mtr_policy->is_hierarchy) {
16938                         act_cnt = &mtr_policy->act_cnt[i];
16939                         act_cnt->next_sub_policy = next_sub_policy;
16940                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16941                 } else {
16942                         /*
16943                          * Overwrite the last action from
16944                          * RSS action to Queue action.
16945                          */
16946                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16947                                               hrxq_idx[i]);
16948                         if (!hrxq) {
16949                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16950                                 goto rss_sub_policy_error;
16951                         }
16952                         act_cnt = &mtr_policy->act_cnt[i];
16953                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16954                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16955                                 if (act_cnt->rix_mark)
16956                                         wks->mark = 1;
16957                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16958                                 dh.rix_hrxq = hrxq_idx[i];
16959                                 flow_drv_rxq_flags_set(dev, &dh);
16960                         }
16961                 }
16962         }
16963         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16964                                                sub_policy, domain)) {
16965                 DRV_LOG(ERR, "Failed to create policy "
16966                         "rules for ingress domain.");
16967                 goto rss_sub_policy_error;
16968         }
16969         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16970                 i = (mtr_policy->sub_policy_num >>
16971                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16972                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16973                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16974                         DRV_LOG(ERR, "No free sub-policy slot.");
16975                         goto rss_sub_policy_error;
16976                 }
16977                 mtr_policy->sub_policys[domain][i] = sub_policy;
16978                 i++;
16979                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16980                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16981                 mtr_policy->sub_policy_num |=
16982                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16983                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16984         }
16985         rte_spinlock_unlock(&mtr_policy->sl);
16986         *is_reuse = false;
16987         return sub_policy;
16988 rss_sub_policy_error:
16989         if (sub_policy) {
16990                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16991                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16992                         i = (mtr_policy->sub_policy_num >>
16993                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16994                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16995                         mtr_policy->sub_policys[domain][i] = NULL;
16996                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16997                                         sub_policy->idx);
16998                 }
16999         }
17000         rte_spinlock_unlock(&mtr_policy->sl);
17001         return NULL;
17002 }
17003
17004 /**
17005  * Find the policy table for prefix table with RSS.
17006  *
17007  * @param[in] dev
17008  *   Pointer to Ethernet device.
17009  * @param[in] mtr_policy
17010  *   Pointer to meter policy table.
17011  * @param[in] rss_desc
17012  *   Pointer to rss_desc
17013  * @return
17014  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17015  */
17016 static struct mlx5_flow_meter_sub_policy *
17017 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17018                 struct mlx5_flow_meter_policy *mtr_policy,
17019                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17020 {
17021         struct mlx5_priv *priv = dev->data->dev_private;
17022         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17023         struct mlx5_flow_meter_info *next_fm;
17024         struct mlx5_flow_meter_policy *next_policy;
17025         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17026         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17027         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17028         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17029         bool reuse_sub_policy;
17030         uint32_t i = 0;
17031         uint32_t j = 0;
17032
17033         while (true) {
17034                 /* Iterate hierarchy to get all policies in this hierarchy. */
17035                 policies[i++] = mtr_policy;
17036                 if (!mtr_policy->is_hierarchy)
17037                         break;
17038                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17039                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17040                         return NULL;
17041                 }
17042                 next_fm = mlx5_flow_meter_find(priv,
17043                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17044                 if (!next_fm) {
17045                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17046                         return NULL;
17047                 }
17048                 next_policy =
17049                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17050                                                     NULL);
17051                 MLX5_ASSERT(next_policy);
17052                 mtr_policy = next_policy;
17053         }
17054         while (i) {
17055                 /**
17056                  * From last policy to the first one in hierarchy,
17057                  * create / get the sub policy for each of them.
17058                  */
17059                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17060                                                         policies[--i],
17061                                                         rss_desc,
17062                                                         next_sub_policy,
17063                                                         &reuse_sub_policy);
17064                 if (!sub_policy) {
17065                         DRV_LOG(ERR, "Failed to get the sub policy.");
17066                         goto err_exit;
17067                 }
17068                 if (!reuse_sub_policy)
17069                         sub_policies[j++] = sub_policy;
17070                 next_sub_policy = sub_policy;
17071         }
17072         return sub_policy;
17073 err_exit:
17074         while (j) {
17075                 uint16_t sub_policy_num;
17076
17077                 sub_policy = sub_policies[--j];
17078                 mtr_policy = sub_policy->main_policy;
17079                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17080                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17081                         sub_policy_num = (mtr_policy->sub_policy_num >>
17082                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17083                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17084                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17085                                                                         NULL;
17086                         sub_policy_num--;
17087                         mtr_policy->sub_policy_num &=
17088                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17089                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17090                         mtr_policy->sub_policy_num |=
17091                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17092                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17093                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17094                                         sub_policy->idx);
17095                 }
17096         }
17097         return NULL;
17098 }
17099
17100 /**
17101  * Create the sub policy tag rule for all meters in hierarchy.
17102  *
17103  * @param[in] dev
17104  *   Pointer to Ethernet device.
17105  * @param[in] fm
17106  *   Meter information table.
17107  * @param[in] src_port
17108  *   The src port this extra rule should use.
17109  * @param[in] item
17110  *   The src port match item.
17111  * @param[out] error
17112  *   Perform verbose error reporting if not NULL.
17113  * @return
17114  *   0 on success, a negative errno value otherwise and rte_errno is set.
17115  */
17116 static int
17117 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17118                                 struct mlx5_flow_meter_info *fm,
17119                                 int32_t src_port,
17120                                 const struct rte_flow_item *item,
17121                                 struct rte_flow_error *error)
17122 {
17123         struct mlx5_priv *priv = dev->data->dev_private;
17124         struct mlx5_flow_meter_policy *mtr_policy;
17125         struct mlx5_flow_meter_sub_policy *sub_policy;
17126         struct mlx5_flow_meter_info *next_fm = NULL;
17127         struct mlx5_flow_meter_policy *next_policy;
17128         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17129         struct mlx5_flow_tbl_data_entry *tbl_data;
17130         struct mlx5_sub_policy_color_rule *color_rule;
17131         struct mlx5_meter_policy_acts acts;
17132         uint32_t color_reg_c_idx;
17133         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17134         struct rte_flow_attr attr = {
17135                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17136                 .priority = 0,
17137                 .ingress = 0,
17138                 .egress = 0,
17139                 .transfer = 1,
17140                 .reserved = 0,
17141         };
17142         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17143         int i;
17144
17145         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17146         MLX5_ASSERT(mtr_policy);
17147         if (!mtr_policy->is_hierarchy)
17148                 return 0;
17149         next_fm = mlx5_flow_meter_find(priv,
17150                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17151         if (!next_fm) {
17152                 return rte_flow_error_set(error, EINVAL,
17153                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17154                                 "Failed to find next meter in hierarchy.");
17155         }
17156         if (!next_fm->drop_cnt)
17157                 goto exit;
17158         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17159         sub_policy = mtr_policy->sub_policys[domain][0];
17160         for (i = 0; i < RTE_COLORS; i++) {
17161                 bool rule_exist = false;
17162                 struct mlx5_meter_policy_action_container *act_cnt;
17163
17164                 if (i >= RTE_COLOR_YELLOW)
17165                         break;
17166                 TAILQ_FOREACH(color_rule,
17167                               &sub_policy->color_rules[i], next_port)
17168                         if (color_rule->src_port == src_port) {
17169                                 rule_exist = true;
17170                                 break;
17171                         }
17172                 if (rule_exist)
17173                         continue;
17174                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17175                                 sizeof(struct mlx5_sub_policy_color_rule),
17176                                 0, SOCKET_ID_ANY);
17177                 if (!color_rule)
17178                         return rte_flow_error_set(error, ENOMEM,
17179                                 RTE_FLOW_ERROR_TYPE_ACTION,
17180                                 NULL, "No memory to create tag color rule.");
17181                 color_rule->src_port = src_port;
17182                 attr.priority = i;
17183                 next_policy = mlx5_flow_meter_policy_find(dev,
17184                                                 next_fm->policy_id, NULL);
17185                 MLX5_ASSERT(next_policy);
17186                 next_sub_policy = next_policy->sub_policys[domain][0];
17187                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17188                                         struct mlx5_flow_tbl_data_entry, tbl);
17189                 act_cnt = &mtr_policy->act_cnt[i];
17190                 if (mtr_first) {
17191                         acts.dv_actions[0] = next_fm->meter_action;
17192                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17193                 } else {
17194                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17195                         acts.dv_actions[1] = next_fm->meter_action;
17196                 }
17197                 acts.dv_actions[2] = tbl_data->jump.action;
17198                 acts.actions_n = 3;
17199                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17200                         next_fm = NULL;
17201                         goto err_exit;
17202                 }
17203                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17204                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17205                                 &attr, true, item,
17206                                 &color_rule->matcher, error)) {
17207                         rte_flow_error_set(error, errno,
17208                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17209                                 "Failed to create hierarchy meter matcher.");
17210                         goto err_exit;
17211                 }
17212                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17213                                         (enum rte_color)i,
17214                                         color_rule->matcher->matcher_object,
17215                                         acts.actions_n, acts.dv_actions,
17216                                         true, item,
17217                                         &color_rule->rule, &attr)) {
17218                         rte_flow_error_set(error, errno,
17219                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17220                                 "Failed to create hierarchy meter rule.");
17221                         goto err_exit;
17222                 }
17223                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17224                                   color_rule, next_port);
17225         }
17226 exit:
17227         /**
17228          * Recursive call to iterate all meters in hierarchy and
17229          * create needed rules.
17230          */
17231         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17232                                                 src_port, item, error);
17233 err_exit:
17234         if (color_rule) {
17235                 if (color_rule->rule)
17236                         mlx5_flow_os_destroy_flow(color_rule->rule);
17237                 if (color_rule->matcher) {
17238                         struct mlx5_flow_tbl_data_entry *tbl =
17239                                 container_of(color_rule->matcher->tbl,
17240                                                 typeof(*tbl), tbl);
17241                         mlx5_list_unregister(tbl->matchers,
17242                                                 &color_rule->matcher->entry);
17243                 }
17244                 mlx5_free(color_rule);
17245         }
17246         if (next_fm)
17247                 mlx5_flow_meter_detach(priv, next_fm);
17248         return -rte_errno;
17249 }
17250
17251 /**
17252  * Destroy the sub policy table with RX queue.
17253  *
17254  * @param[in] dev
17255  *   Pointer to Ethernet device.
17256  * @param[in] mtr_policy
17257  *   Pointer to meter policy table.
17258  */
17259 static void
17260 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17261                                     struct mlx5_flow_meter_policy *mtr_policy)
17262 {
17263         struct mlx5_priv *priv = dev->data->dev_private;
17264         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17265         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17266         uint32_t i, j;
17267         uint16_t sub_policy_num, new_policy_num;
17268
17269         rte_spinlock_lock(&mtr_policy->sl);
17270         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17271                 switch (mtr_policy->act_cnt[i].fate_action) {
17272                 case MLX5_FLOW_FATE_SHARED_RSS:
17273                         sub_policy_num = (mtr_policy->sub_policy_num >>
17274                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17275                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17276                         new_policy_num = sub_policy_num;
17277                         for (j = 0; j < sub_policy_num; j++) {
17278                                 sub_policy =
17279                                         mtr_policy->sub_policys[domain][j];
17280                                 if (sub_policy) {
17281                                         __flow_dv_destroy_sub_policy_rules(dev,
17282                                                 sub_policy);
17283                                 if (sub_policy !=
17284                                         mtr_policy->sub_policys[domain][0]) {
17285                                         mtr_policy->sub_policys[domain][j] =
17286                                                                 NULL;
17287                                         mlx5_ipool_free
17288                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17289                                                 sub_policy->idx);
17290                                                 new_policy_num--;
17291                                         }
17292                                 }
17293                         }
17294                         if (new_policy_num != sub_policy_num) {
17295                                 mtr_policy->sub_policy_num &=
17296                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17297                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17298                                 mtr_policy->sub_policy_num |=
17299                                 (new_policy_num &
17300                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17301                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17302                         }
17303                         break;
17304                 case MLX5_FLOW_FATE_QUEUE:
17305                         sub_policy = mtr_policy->sub_policys[domain][0];
17306                         __flow_dv_destroy_sub_policy_rules(dev,
17307                                                            sub_policy);
17308                         break;
17309                 default:
17310                         /*Other actions without queue and do nothing*/
17311                         break;
17312                 }
17313         }
17314         rte_spinlock_unlock(&mtr_policy->sl);
17315 }
17316 /**
17317  * Check whether the DR drop action is supported on the root table or not.
17318  *
17319  * Create a simple flow with DR drop action on root table to validate
17320  * if DR drop action on root table is supported or not.
17321  *
17322  * @param[in] dev
17323  *   Pointer to rte_eth_dev structure.
17324  *
17325  * @return
17326  *   0 on success, a negative errno value otherwise and rte_errno is set.
17327  */
17328 int
17329 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17330 {
17331         struct mlx5_priv *priv = dev->data->dev_private;
17332         struct mlx5_dev_ctx_shared *sh = priv->sh;
17333         struct mlx5_flow_dv_match_params mask = {
17334                 .size = sizeof(mask.buf),
17335         };
17336         struct mlx5_flow_dv_match_params value = {
17337                 .size = sizeof(value.buf),
17338         };
17339         struct mlx5dv_flow_matcher_attr dv_attr = {
17340                 .type = IBV_FLOW_ATTR_NORMAL,
17341                 .priority = 0,
17342                 .match_criteria_enable = 0,
17343                 .match_mask = (void *)&mask,
17344         };
17345         struct mlx5_flow_tbl_resource *tbl = NULL;
17346         void *matcher = NULL;
17347         void *flow = NULL;
17348         int ret = -1;
17349
17350         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17351                                         0, 0, 0, NULL);
17352         if (!tbl)
17353                 goto err;
17354         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17355         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17356         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17357                                                tbl->obj, &matcher);
17358         if (ret)
17359                 goto err;
17360         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17361         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17362                                        &sh->dr_drop_action, &flow);
17363 err:
17364         /*
17365          * If DR drop action is not supported on root table, flow create will
17366          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17367          */
17368         if (!flow) {
17369                 if (matcher &&
17370                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17371                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17372                 else
17373                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17374                 ret = -1;
17375         } else {
17376                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17377         }
17378         if (matcher)
17379                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17380         if (tbl)
17381                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17382         return ret;
17383 }
17384
17385 /**
17386  * Validate the batch counter support in root table.
17387  *
17388  * Create a simple flow with invalid counter and drop action on root table to
17389  * validate if batch counter with offset on root table is supported or not.
17390  *
17391  * @param[in] dev
17392  *   Pointer to rte_eth_dev structure.
17393  *
17394  * @return
17395  *   0 on success, a negative errno value otherwise and rte_errno is set.
17396  */
17397 int
17398 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17399 {
17400         struct mlx5_priv *priv = dev->data->dev_private;
17401         struct mlx5_dev_ctx_shared *sh = priv->sh;
17402         struct mlx5_flow_dv_match_params mask = {
17403                 .size = sizeof(mask.buf),
17404         };
17405         struct mlx5_flow_dv_match_params value = {
17406                 .size = sizeof(value.buf),
17407         };
17408         struct mlx5dv_flow_matcher_attr dv_attr = {
17409                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17410                 .priority = 0,
17411                 .match_criteria_enable = 0,
17412                 .match_mask = (void *)&mask,
17413         };
17414         void *actions[2] = { 0 };
17415         struct mlx5_flow_tbl_resource *tbl = NULL;
17416         struct mlx5_devx_obj *dcs = NULL;
17417         void *matcher = NULL;
17418         void *flow = NULL;
17419         int ret = -1;
17420
17421         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17422                                         0, 0, 0, NULL);
17423         if (!tbl)
17424                 goto err;
17425         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17426         if (!dcs)
17427                 goto err;
17428         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17429                                                     &actions[0]);
17430         if (ret)
17431                 goto err;
17432         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17433         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17434         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17435                                                tbl->obj, &matcher);
17436         if (ret)
17437                 goto err;
17438         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17439         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17440                                        actions, &flow);
17441 err:
17442         /*
17443          * If batch counter with offset is not supported, the driver will not
17444          * validate the invalid offset value, flow create should success.
17445          * In this case, it means batch counter is not supported in root table.
17446          *
17447          * Otherwise, if flow create is failed, counter offset is supported.
17448          */
17449         if (flow) {
17450                 DRV_LOG(INFO, "Batch counter is not supported in root "
17451                               "table. Switch to fallback mode.");
17452                 rte_errno = ENOTSUP;
17453                 ret = -rte_errno;
17454                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17455         } else {
17456                 /* Check matcher to make sure validate fail at flow create. */
17457                 if (!matcher || (matcher && errno != EINVAL))
17458                         DRV_LOG(ERR, "Unexpected error in counter offset "
17459                                      "support detection");
17460                 ret = 0;
17461         }
17462         if (actions[0])
17463                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17464         if (matcher)
17465                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17466         if (tbl)
17467                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17468         if (dcs)
17469                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17470         return ret;
17471 }
17472
17473 /**
17474  * Query a devx counter.
17475  *
17476  * @param[in] dev
17477  *   Pointer to the Ethernet device structure.
17478  * @param[in] cnt
17479  *   Index to the flow counter.
17480  * @param[in] clear
17481  *   Set to clear the counter statistics.
17482  * @param[out] pkts
17483  *   The statistics value of packets.
17484  * @param[out] bytes
17485  *   The statistics value of bytes.
17486  *
17487  * @return
17488  *   0 on success, otherwise return -1.
17489  */
17490 static int
17491 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17492                       uint64_t *pkts, uint64_t *bytes)
17493 {
17494         struct mlx5_priv *priv = dev->data->dev_private;
17495         struct mlx5_flow_counter *cnt;
17496         uint64_t inn_pkts, inn_bytes;
17497         int ret;
17498
17499         if (!priv->sh->devx)
17500                 return -1;
17501
17502         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17503         if (ret)
17504                 return -1;
17505         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17506         *pkts = inn_pkts - cnt->hits;
17507         *bytes = inn_bytes - cnt->bytes;
17508         if (clear) {
17509                 cnt->hits = inn_pkts;
17510                 cnt->bytes = inn_bytes;
17511         }
17512         return 0;
17513 }
17514
17515 /**
17516  * Get aged-out flows.
17517  *
17518  * @param[in] dev
17519  *   Pointer to the Ethernet device structure.
17520  * @param[in] context
17521  *   The address of an array of pointers to the aged-out flows contexts.
17522  * @param[in] nb_contexts
17523  *   The length of context array pointers.
17524  * @param[out] error
17525  *   Perform verbose error reporting if not NULL. Initialized in case of
17526  *   error only.
17527  *
17528  * @return
17529  *   how many contexts get in success, otherwise negative errno value.
17530  *   if nb_contexts is 0, return the amount of all aged contexts.
17531  *   if nb_contexts is not 0 , return the amount of aged flows reported
17532  *   in the context array.
17533  * @note: only stub for now
17534  */
17535 static int
17536 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17537                     void **context,
17538                     uint32_t nb_contexts,
17539                     struct rte_flow_error *error)
17540 {
17541         struct mlx5_priv *priv = dev->data->dev_private;
17542         struct mlx5_age_info *age_info;
17543         struct mlx5_age_param *age_param;
17544         struct mlx5_flow_counter *counter;
17545         struct mlx5_aso_age_action *act;
17546         int nb_flows = 0;
17547
17548         if (nb_contexts && !context)
17549                 return rte_flow_error_set(error, EINVAL,
17550                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17551                                           NULL, "empty context");
17552         age_info = GET_PORT_AGE_INFO(priv);
17553         rte_spinlock_lock(&age_info->aged_sl);
17554         LIST_FOREACH(act, &age_info->aged_aso, next) {
17555                 nb_flows++;
17556                 if (nb_contexts) {
17557                         context[nb_flows - 1] =
17558                                                 act->age_params.context;
17559                         if (!(--nb_contexts))
17560                                 break;
17561                 }
17562         }
17563         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17564                 nb_flows++;
17565                 if (nb_contexts) {
17566                         age_param = MLX5_CNT_TO_AGE(counter);
17567                         context[nb_flows - 1] = age_param->context;
17568                         if (!(--nb_contexts))
17569                                 break;
17570                 }
17571         }
17572         rte_spinlock_unlock(&age_info->aged_sl);
17573         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17574         return nb_flows;
17575 }
17576
17577 /*
17578  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17579  */
17580 static uint32_t
17581 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17582 {
17583         return flow_dv_counter_alloc(dev, 0);
17584 }
17585
17586 /**
17587  * Validate indirect action.
17588  * Dispatcher for action type specific validation.
17589  *
17590  * @param[in] dev
17591  *   Pointer to the Ethernet device structure.
17592  * @param[in] conf
17593  *   Indirect action configuration.
17594  * @param[in] action
17595  *   The indirect action object to validate.
17596  * @param[out] error
17597  *   Perform verbose error reporting if not NULL. Initialized in case of
17598  *   error only.
17599  *
17600  * @return
17601  *   0 on success, otherwise negative errno value.
17602  */
17603 static int
17604 flow_dv_action_validate(struct rte_eth_dev *dev,
17605                         const struct rte_flow_indir_action_conf *conf,
17606                         const struct rte_flow_action *action,
17607                         struct rte_flow_error *err)
17608 {
17609         struct mlx5_priv *priv = dev->data->dev_private;
17610
17611         RTE_SET_USED(conf);
17612         switch (action->type) {
17613         case RTE_FLOW_ACTION_TYPE_RSS:
17614                 /*
17615                  * priv->obj_ops is set according to driver capabilities.
17616                  * When DevX capabilities are
17617                  * sufficient, it is set to devx_obj_ops.
17618                  * Otherwise, it is set to ibv_obj_ops.
17619                  * ibv_obj_ops doesn't support ind_table_modify operation.
17620                  * In this case the indirect RSS action can't be used.
17621                  */
17622                 if (priv->obj_ops.ind_table_modify == NULL)
17623                         return rte_flow_error_set
17624                                         (err, ENOTSUP,
17625                                          RTE_FLOW_ERROR_TYPE_ACTION,
17626                                          NULL,
17627                                          "Indirect RSS action not supported");
17628                 return mlx5_validate_action_rss(dev, action, err);
17629         case RTE_FLOW_ACTION_TYPE_AGE:
17630                 if (!priv->sh->aso_age_mng)
17631                         return rte_flow_error_set(err, ENOTSUP,
17632                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17633                                                 NULL,
17634                                                 "Indirect age action not supported");
17635                 return flow_dv_validate_action_age(0, action, dev, err);
17636         case RTE_FLOW_ACTION_TYPE_COUNT:
17637                 return flow_dv_validate_action_count(dev, true, 0, err);
17638         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17639                 if (!priv->sh->ct_aso_en)
17640                         return rte_flow_error_set(err, ENOTSUP,
17641                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17642                                         "ASO CT is not supported");
17643                 return mlx5_validate_action_ct(dev, action->conf, err);
17644         default:
17645                 return rte_flow_error_set(err, ENOTSUP,
17646                                           RTE_FLOW_ERROR_TYPE_ACTION,
17647                                           NULL,
17648                                           "action type not supported");
17649         }
17650 }
17651
17652 /*
17653  * Check if the RSS configurations for colors of a meter policy match
17654  * each other, except the queues.
17655  *
17656  * @param[in] r1
17657  *   Pointer to the first RSS flow action.
17658  * @param[in] r2
17659  *   Pointer to the second RSS flow action.
17660  *
17661  * @return
17662  *   0 on match, 1 on conflict.
17663  */
17664 static inline int
17665 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17666                                const struct rte_flow_action_rss *r2)
17667 {
17668         if (r1 == NULL || r2 == NULL)
17669                 return 0;
17670         if (!(r1->level <= 1 && r2->level <= 1) &&
17671             !(r1->level > 1 && r2->level > 1))
17672                 return 1;
17673         if (r1->types != r2->types &&
17674             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17675               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17676                 return 1;
17677         if (r1->key || r2->key) {
17678                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17679                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17680
17681                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17682                         return 1;
17683         }
17684         return 0;
17685 }
17686
17687 /**
17688  * Validate the meter hierarchy chain for meter policy.
17689  *
17690  * @param[in] dev
17691  *   Pointer to the Ethernet device structure.
17692  * @param[in] meter_id
17693  *   Meter id.
17694  * @param[in] action_flags
17695  *   Holds the actions detected until now.
17696  * @param[out] is_rss
17697  *   Is RSS or not.
17698  * @param[out] hierarchy_domain
17699  *   The domain bitmap for hierarchy policy.
17700  * @param[out] error
17701  *   Perform verbose error reporting if not NULL. Initialized in case of
17702  *   error only.
17703  *
17704  * @return
17705  *   0 on success, otherwise negative errno value with error set.
17706  */
17707 static int
17708 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17709                                   uint32_t meter_id,
17710                                   uint64_t action_flags,
17711                                   bool *is_rss,
17712                                   uint8_t *hierarchy_domain,
17713                                   struct rte_mtr_error *error)
17714 {
17715         struct mlx5_priv *priv = dev->data->dev_private;
17716         struct mlx5_flow_meter_info *fm;
17717         struct mlx5_flow_meter_policy *policy;
17718         uint8_t cnt = 1;
17719
17720         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17721                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17722                 return -rte_mtr_error_set(error, EINVAL,
17723                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17724                                         NULL,
17725                                         "Multiple fate actions not supported.");
17726         *hierarchy_domain = 0;
17727         while (true) {
17728                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17729                 if (!fm)
17730                         return -rte_mtr_error_set(error, EINVAL,
17731                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17732                                         "Meter not found in meter hierarchy.");
17733                 if (fm->def_policy)
17734                         return -rte_mtr_error_set(error, EINVAL,
17735                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17736                         "Non termination meter not supported in hierarchy.");
17737                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17738                 MLX5_ASSERT(policy);
17739                 /**
17740                  * Only inherit the supported domains of the first meter in
17741                  * hierarchy.
17742                  * One meter supports at least one domain.
17743                  */
17744                 if (!*hierarchy_domain) {
17745                         if (policy->transfer)
17746                                 *hierarchy_domain |=
17747                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17748                         if (policy->ingress)
17749                                 *hierarchy_domain |=
17750                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17751                         if (policy->egress)
17752                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17753                 }
17754                 if (!policy->is_hierarchy) {
17755                         *is_rss = policy->is_rss;
17756                         break;
17757                 }
17758                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17759                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17760                         return -rte_mtr_error_set(error, EINVAL,
17761                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17762                                         "Exceed max hierarchy meter number.");
17763         }
17764         return 0;
17765 }
17766
17767 /**
17768  * Validate meter policy actions.
17769  * Dispatcher for action type specific validation.
17770  *
17771  * @param[in] dev
17772  *   Pointer to the Ethernet device structure.
17773  * @param[in] action
17774  *   The meter policy action object to validate.
17775  * @param[in] attr
17776  *   Attributes of flow to determine steering domain.
17777  * @param[out] error
17778  *   Perform verbose error reporting if not NULL. Initialized in case of
17779  *   error only.
17780  *
17781  * @return
17782  *   0 on success, otherwise negative errno value.
17783  */
17784 static int
17785 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17786                         const struct rte_flow_action *actions[RTE_COLORS],
17787                         struct rte_flow_attr *attr,
17788                         bool *is_rss,
17789                         uint8_t *domain_bitmap,
17790                         uint8_t *policy_mode,
17791                         struct rte_mtr_error *error)
17792 {
17793         struct mlx5_priv *priv = dev->data->dev_private;
17794         struct mlx5_dev_config *dev_conf = &priv->config;
17795         const struct rte_flow_action *act;
17796         uint64_t action_flags[RTE_COLORS] = {0};
17797         int actions_n;
17798         int i, ret;
17799         struct rte_flow_error flow_err;
17800         uint8_t domain_color[RTE_COLORS] = {0};
17801         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17802         uint8_t hierarchy_domain = 0;
17803         const struct rte_flow_action_meter *mtr;
17804         bool def_green = false;
17805         bool def_yellow = false;
17806         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17807
17808         if (!priv->config.dv_esw_en)
17809                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17810         *domain_bitmap = def_domain;
17811         /* Red color could only support DROP action. */
17812         if (!actions[RTE_COLOR_RED] ||
17813             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17814                 return -rte_mtr_error_set(error, ENOTSUP,
17815                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17816                                 NULL, "Red color only supports drop action.");
17817         /*
17818          * Check default policy actions:
17819          * Green / Yellow: no action, Red: drop action
17820          * Either G or Y will trigger default policy actions to be created.
17821          */
17822         if (!actions[RTE_COLOR_GREEN] ||
17823             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17824                 def_green = true;
17825         if (!actions[RTE_COLOR_YELLOW] ||
17826             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17827                 def_yellow = true;
17828         if (def_green && def_yellow) {
17829                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17830                 return 0;
17831         } else if (!def_green && def_yellow) {
17832                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17833         } else if (def_green && !def_yellow) {
17834                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17835         } else {
17836                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17837         }
17838         /* Set to empty string in case of NULL pointer access by user. */
17839         flow_err.message = "";
17840         for (i = 0; i < RTE_COLORS; i++) {
17841                 act = actions[i];
17842                 for (action_flags[i] = 0, actions_n = 0;
17843                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17844                      act++) {
17845                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17846                                 return -rte_mtr_error_set(error, ENOTSUP,
17847                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17848                                           NULL, "too many actions");
17849                         switch (act->type) {
17850                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17851                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17852                                 if (!priv->config.dv_esw_en)
17853                                         return -rte_mtr_error_set(error,
17854                                         ENOTSUP,
17855                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17856                                         NULL, "PORT action validate check"
17857                                         " fail for ESW disable");
17858                                 ret = flow_dv_validate_action_port_id(dev,
17859                                                 action_flags[i],
17860                                                 act, attr, &flow_err);
17861                                 if (ret)
17862                                         return -rte_mtr_error_set(error,
17863                                         ENOTSUP,
17864                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17865                                         NULL, flow_err.message ?
17866                                         flow_err.message :
17867                                         "PORT action validate check fail");
17868                                 ++actions_n;
17869                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17870                                 break;
17871                         case RTE_FLOW_ACTION_TYPE_MARK:
17872                                 ret = flow_dv_validate_action_mark(dev, act,
17873                                                            action_flags[i],
17874                                                            attr, &flow_err);
17875                                 if (ret < 0)
17876                                         return -rte_mtr_error_set(error,
17877                                         ENOTSUP,
17878                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17879                                         NULL, flow_err.message ?
17880                                         flow_err.message :
17881                                         "Mark action validate check fail");
17882                                 if (dev_conf->dv_xmeta_en !=
17883                                         MLX5_XMETA_MODE_LEGACY)
17884                                         return -rte_mtr_error_set(error,
17885                                         ENOTSUP,
17886                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17887                                         NULL, "Extend MARK action is "
17888                                         "not supported. Please try use "
17889                                         "default policy for meter.");
17890                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17891                                 ++actions_n;
17892                                 break;
17893                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17894                                 ret = flow_dv_validate_action_set_tag(dev,
17895                                                         act, action_flags[i],
17896                                                         attr, &flow_err);
17897                                 if (ret)
17898                                         return -rte_mtr_error_set(error,
17899                                         ENOTSUP,
17900                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17901                                         NULL, flow_err.message ?
17902                                         flow_err.message :
17903                                         "Set tag action validate check fail");
17904                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17905                                 ++actions_n;
17906                                 break;
17907                         case RTE_FLOW_ACTION_TYPE_DROP:
17908                                 ret = mlx5_flow_validate_action_drop
17909                                         (action_flags[i], attr, &flow_err);
17910                                 if (ret < 0)
17911                                         return -rte_mtr_error_set(error,
17912                                         ENOTSUP,
17913                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17914                                         NULL, flow_err.message ?
17915                                         flow_err.message :
17916                                         "Drop action validate check fail");
17917                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17918                                 ++actions_n;
17919                                 break;
17920                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17921                                 /*
17922                                  * Check whether extensive
17923                                  * metadata feature is engaged.
17924                                  */
17925                                 if (dev_conf->dv_flow_en &&
17926                                     (dev_conf->dv_xmeta_en !=
17927                                      MLX5_XMETA_MODE_LEGACY) &&
17928                                     mlx5_flow_ext_mreg_supported(dev))
17929                                         return -rte_mtr_error_set(error,
17930                                           ENOTSUP,
17931                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17932                                           NULL, "Queue action with meta "
17933                                           "is not supported. Please try use "
17934                                           "default policy for meter.");
17935                                 ret = mlx5_flow_validate_action_queue(act,
17936                                                         action_flags[i], dev,
17937                                                         attr, &flow_err);
17938                                 if (ret < 0)
17939                                         return -rte_mtr_error_set(error,
17940                                           ENOTSUP,
17941                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17942                                           NULL, flow_err.message ?
17943                                           flow_err.message :
17944                                           "Queue action validate check fail");
17945                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17946                                 ++actions_n;
17947                                 break;
17948                         case RTE_FLOW_ACTION_TYPE_RSS:
17949                                 if (dev_conf->dv_flow_en &&
17950                                     (dev_conf->dv_xmeta_en !=
17951                                      MLX5_XMETA_MODE_LEGACY) &&
17952                                     mlx5_flow_ext_mreg_supported(dev))
17953                                         return -rte_mtr_error_set(error,
17954                                           ENOTSUP,
17955                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17956                                           NULL, "RSS action with meta "
17957                                           "is not supported. Please try use "
17958                                           "default policy for meter.");
17959                                 ret = mlx5_validate_action_rss(dev, act,
17960                                                                &flow_err);
17961                                 if (ret < 0)
17962                                         return -rte_mtr_error_set(error,
17963                                           ENOTSUP,
17964                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17965                                           NULL, flow_err.message ?
17966                                           flow_err.message :
17967                                           "RSS action validate check fail");
17968                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17969                                 ++actions_n;
17970                                 /* Either G or Y will set the RSS. */
17971                                 rss_color[i] = act->conf;
17972                                 break;
17973                         case RTE_FLOW_ACTION_TYPE_JUMP:
17974                                 ret = flow_dv_validate_action_jump(dev,
17975                                         NULL, act, action_flags[i],
17976                                         attr, true, &flow_err);
17977                                 if (ret)
17978                                         return -rte_mtr_error_set(error,
17979                                           ENOTSUP,
17980                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17981                                           NULL, flow_err.message ?
17982                                           flow_err.message :
17983                                           "Jump action validate check fail");
17984                                 ++actions_n;
17985                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17986                                 break;
17987                         /*
17988                          * Only the last meter in the hierarchy will support
17989                          * the YELLOW color steering. Then in the meter policy
17990                          * actions list, there should be no other meter inside.
17991                          */
17992                         case RTE_FLOW_ACTION_TYPE_METER:
17993                                 if (i != RTE_COLOR_GREEN)
17994                                         return -rte_mtr_error_set(error,
17995                                                 ENOTSUP,
17996                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17997                                                 NULL,
17998                                                 "Meter hierarchy only supports GREEN color.");
17999                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
18000                                         return -rte_mtr_error_set(error,
18001                                                 ENOTSUP,
18002                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18003                                                 NULL,
18004                                                 "No yellow policy should be provided in meter hierarchy.");
18005                                 mtr = act->conf;
18006                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18007                                                         mtr->mtr_id,
18008                                                         action_flags[i],
18009                                                         is_rss,
18010                                                         &hierarchy_domain,
18011                                                         error);
18012                                 if (ret)
18013                                         return ret;
18014                                 ++actions_n;
18015                                 action_flags[i] |=
18016                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18017                                 break;
18018                         default:
18019                                 return -rte_mtr_error_set(error, ENOTSUP,
18020                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18021                                         NULL,
18022                                         "Doesn't support optional action");
18023                         }
18024                 }
18025                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18026                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18027                 } else if ((action_flags[i] &
18028                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18029                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18030                         /*
18031                          * Only support MLX5_XMETA_MODE_LEGACY
18032                          * so MARK action is only in ingress domain.
18033                          */
18034                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18035                 } else {
18036                         domain_color[i] = def_domain;
18037                         if (action_flags[i] &&
18038                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18039                                 domain_color[i] &=
18040                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18041                 }
18042                 if (action_flags[i] &
18043                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18044                         domain_color[i] &= hierarchy_domain;
18045                 /*
18046                  * Non-termination actions only support NIC Tx domain.
18047                  * The adjustion should be skipped when there is no
18048                  * action or only END is provided. The default domains
18049                  * bit-mask is set to find the MIN intersection.
18050                  * The action flags checking should also be skipped.
18051                  */
18052                 if ((def_green && i == RTE_COLOR_GREEN) ||
18053                     (def_yellow && i == RTE_COLOR_YELLOW))
18054                         continue;
18055                 /*
18056                  * Validate the drop action mutual exclusion
18057                  * with other actions. Drop action is mutually-exclusive
18058                  * with any other action, except for Count action.
18059                  */
18060                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18061                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18062                         return -rte_mtr_error_set(error, ENOTSUP,
18063                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18064                                 NULL, "Drop action is mutually-exclusive "
18065                                 "with any other action");
18066                 }
18067                 /* Eswitch has few restrictions on using items and actions */
18068                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18069                         if (!mlx5_flow_ext_mreg_supported(dev) &&
18070                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
18071                                 return -rte_mtr_error_set(error, ENOTSUP,
18072                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18073                                         NULL, "unsupported action MARK");
18074                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18075                                 return -rte_mtr_error_set(error, ENOTSUP,
18076                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18077                                         NULL, "unsupported action QUEUE");
18078                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18079                                 return -rte_mtr_error_set(error, ENOTSUP,
18080                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18081                                         NULL, "unsupported action RSS");
18082                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18083                                 return -rte_mtr_error_set(error, ENOTSUP,
18084                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18085                                         NULL, "no fate action is found");
18086                 } else {
18087                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18088                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18089                                 if ((domain_color[i] &
18090                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18091                                         domain_color[i] =
18092                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18093                                 else
18094                                         return -rte_mtr_error_set(error,
18095                                                 ENOTSUP,
18096                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18097                                                 NULL,
18098                                                 "no fate action is found");
18099                         }
18100                 }
18101         }
18102         /* If both colors have RSS, the attributes should be the same. */
18103         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18104                                            rss_color[RTE_COLOR_YELLOW]))
18105                 return -rte_mtr_error_set(error, EINVAL,
18106                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18107                                           NULL, "policy RSS attr conflict");
18108         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18109                 *is_rss = true;
18110         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18111         if (!def_green && !def_yellow &&
18112             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18113             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18114             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18115                 return -rte_mtr_error_set(error, EINVAL,
18116                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18117                                           NULL, "policy domains conflict");
18118         /*
18119          * At least one color policy is listed in the actions, the domains
18120          * to be supported should be the intersection.
18121          */
18122         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18123                          domain_color[RTE_COLOR_YELLOW];
18124         return 0;
18125 }
18126
18127 static int
18128 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18129 {
18130         struct mlx5_priv *priv = dev->data->dev_private;
18131         int ret = 0;
18132
18133         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18134                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18135                                                 flags);
18136                 if (ret != 0)
18137                         return ret;
18138         }
18139         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18140                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18141                 if (ret != 0)
18142                         return ret;
18143         }
18144         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18145                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18146                 if (ret != 0)
18147                         return ret;
18148         }
18149         return 0;
18150 }
18151
18152 /**
18153  * Discover the number of available flow priorities
18154  * by trying to create a flow with the highest priority value
18155  * for each possible number.
18156  *
18157  * @param[in] dev
18158  *   Ethernet device.
18159  * @param[in] vprio
18160  *   List of possible number of available priorities.
18161  * @param[in] vprio_n
18162  *   Size of @p vprio array.
18163  * @return
18164  *   On success, number of available flow priorities.
18165  *   On failure, a negative errno-style code and rte_errno is set.
18166  */
18167 static int
18168 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18169                             const uint16_t *vprio, int vprio_n)
18170 {
18171         struct mlx5_priv *priv = dev->data->dev_private;
18172         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18173         struct rte_flow_item_eth eth;
18174         struct rte_flow_item item = {
18175                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18176                 .spec = &eth,
18177                 .mask = &eth,
18178         };
18179         struct mlx5_flow_dv_matcher matcher = {
18180                 .mask = {
18181                         .size = sizeof(matcher.mask.buf),
18182                 },
18183         };
18184         union mlx5_flow_tbl_key tbl_key;
18185         struct mlx5_flow flow;
18186         void *action;
18187         struct rte_flow_error error;
18188         uint8_t misc_mask;
18189         int i, err, ret = -ENOTSUP;
18190
18191         /*
18192          * Prepare a flow with a catch-all pattern and a drop action.
18193          * Use drop queue, because shared drop action may be unavailable.
18194          */
18195         action = priv->drop_queue.hrxq->action;
18196         if (action == NULL) {
18197                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18198                 rte_errno = ENOTSUP;
18199                 return -rte_errno;
18200         }
18201         memset(&flow, 0, sizeof(flow));
18202         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18203         if (flow.handle == NULL) {
18204                 DRV_LOG(ERR, "Cannot create flow handle");
18205                 rte_errno = ENOMEM;
18206                 return -rte_errno;
18207         }
18208         flow.ingress = true;
18209         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18210         flow.dv.actions[0] = action;
18211         flow.dv.actions_n = 1;
18212         memset(&eth, 0, sizeof(eth));
18213         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18214                                    &item, /* inner */ false, /* group */ 0);
18215         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18216         for (i = 0; i < vprio_n; i++) {
18217                 /* Configure the next proposed maximum priority. */
18218                 matcher.priority = vprio[i] - 1;
18219                 memset(&tbl_key, 0, sizeof(tbl_key));
18220                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18221                                                /* tunnel */ NULL,
18222                                                /* group */ 0,
18223                                                &error);
18224                 if (err != 0) {
18225                         /* This action is pure SW and must always succeed. */
18226                         DRV_LOG(ERR, "Cannot register matcher");
18227                         ret = -rte_errno;
18228                         break;
18229                 }
18230                 /* Try to apply the flow to HW. */
18231                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18232                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18233                 err = mlx5_flow_os_create_flow
18234                                 (flow.handle->dvh.matcher->matcher_object,
18235                                  (void *)&flow.dv.value, flow.dv.actions_n,
18236                                  flow.dv.actions, &flow.handle->drv_flow);
18237                 if (err == 0) {
18238                         claim_zero(mlx5_flow_os_destroy_flow
18239                                                 (flow.handle->drv_flow));
18240                         flow.handle->drv_flow = NULL;
18241                 }
18242                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18243                 if (err != 0)
18244                         break;
18245                 ret = vprio[i];
18246         }
18247         mlx5_ipool_free(pool, flow.handle_idx);
18248         /* Set rte_errno if no expected priority value matched. */
18249         if (ret < 0)
18250                 rte_errno = -ret;
18251         return ret;
18252 }
18253
18254 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18255         .validate = flow_dv_validate,
18256         .prepare = flow_dv_prepare,
18257         .translate = flow_dv_translate,
18258         .apply = flow_dv_apply,
18259         .remove = flow_dv_remove,
18260         .destroy = flow_dv_destroy,
18261         .query = flow_dv_query,
18262         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18263         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18264         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18265         .create_meter = flow_dv_mtr_alloc,
18266         .free_meter = flow_dv_aso_mtr_release_to_pool,
18267         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18268         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18269         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18270         .create_policy_rules = flow_dv_create_policy_rules,
18271         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18272         .create_def_policy = flow_dv_create_def_policy,
18273         .destroy_def_policy = flow_dv_destroy_def_policy,
18274         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18275         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18276         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18277         .counter_alloc = flow_dv_counter_allocate,
18278         .counter_free = flow_dv_counter_free,
18279         .counter_query = flow_dv_counter_query,
18280         .get_aged_flows = flow_dv_get_aged_flows,
18281         .action_validate = flow_dv_action_validate,
18282         .action_create = flow_dv_action_create,
18283         .action_destroy = flow_dv_action_destroy,
18284         .action_update = flow_dv_action_update,
18285         .action_query = flow_dv_action_query,
18286         .sync_domain = flow_dv_sync_domain,
18287         .discover_priorities = flow_dv_discover_priorities,
18288         .item_create = flow_dv_item_create,
18289         .item_release = flow_dv_item_release,
18290 };
18291
18292 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18293