net/mlx5: fix integrity item validation and translation
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1150                 if (conf->dst == REG_C_0) {
1151                         /* Copy to reg_c[0], within mask only. */
1152                         reg_dst.offset = rte_bsf32(reg_c0);
1153                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1154                 } else {
1155                         reg_dst.offset = 0;
1156                         mask = rte_cpu_to_be_32(reg_c0);
1157                 }
1158         }
1159         return flow_dv_convert_modify_action(&item,
1160                                              reg_src, &reg_dst, res,
1161                                              MLX5_MODIFICATION_TYPE_COPY,
1162                                              error);
1163 }
1164
1165 /**
1166  * Convert MARK action to DV specification. This routine is used
1167  * in extensive metadata only and requires metadata register to be
1168  * handled. In legacy mode hardware tag resource is engaged.
1169  *
1170  * @param[in] dev
1171  *   Pointer to the rte_eth_dev structure.
1172  * @param[in] conf
1173  *   Pointer to MARK action specification.
1174  * @param[in,out] resource
1175  *   Pointer to the modify-header resource.
1176  * @param[out] error
1177  *   Pointer to the error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1184                             const struct rte_flow_action_mark *conf,
1185                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1186                             struct rte_flow_error *error)
1187 {
1188         struct mlx5_priv *priv = dev->data->dev_private;
1189         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1190                                            priv->sh->dv_mark_mask);
1191         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1192         struct rte_flow_item item = {
1193                 .spec = &data,
1194                 .mask = &mask,
1195         };
1196         struct field_modify_info reg_c_x[] = {
1197                 [1] = {0, 0, 0},
1198         };
1199         int reg;
1200
1201         if (!mask)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1204                                           NULL, "zero mark action mask");
1205         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1206         if (reg < 0)
1207                 return reg;
1208         MLX5_ASSERT(reg > 0);
1209         if (reg == REG_C_0) {
1210                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1211                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1212
1213                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1214                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1215                 mask = rte_cpu_to_be_32(mask << shl_c0);
1216         }
1217         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1218         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1219                                              MLX5_MODIFICATION_TYPE_SET, error);
1220 }
1221
1222 /**
1223  * Get metadata register index for specified steering domain.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in] attr
1228  *   Attributes of flow to determine steering domain.
1229  * @param[out] error
1230  *   Pointer to the error structure.
1231  *
1232  * @return
1233  *   positive index on success, a negative errno value otherwise
1234  *   and rte_errno is set.
1235  */
1236 static enum modify_reg
1237 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1238                          const struct rte_flow_attr *attr,
1239                          struct rte_flow_error *error)
1240 {
1241         int reg =
1242                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1243                                           MLX5_METADATA_FDB :
1244                                             attr->egress ?
1245                                             MLX5_METADATA_TX :
1246                                             MLX5_METADATA_RX, 0, error);
1247         if (reg < 0)
1248                 return rte_flow_error_set(error,
1249                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1250                                           NULL, "unavailable "
1251                                           "metadata register");
1252         return reg;
1253 }
1254
1255 /**
1256  * Convert SET_META action to DV specification.
1257  *
1258  * @param[in] dev
1259  *   Pointer to the rte_eth_dev structure.
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] attr
1263  *   Attributes of flow that includes this item.
1264  * @param[in] conf
1265  *   Pointer to action specification.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_convert_action_set_meta
1274                         (struct rte_eth_dev *dev,
1275                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1276                          const struct rte_flow_attr *attr,
1277                          const struct rte_flow_action_set_meta *conf,
1278                          struct rte_flow_error *error)
1279 {
1280         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1281         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1282         struct rte_flow_item item = {
1283                 .spec = &data,
1284                 .mask = &mask,
1285         };
1286         struct field_modify_info reg_c_x[] = {
1287                 [1] = {0, 0, 0},
1288         };
1289         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1290
1291         if (reg < 0)
1292                 return reg;
1293         MLX5_ASSERT(reg != REG_NON);
1294         if (reg == REG_C_0) {
1295                 struct mlx5_priv *priv = dev->data->dev_private;
1296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1298
1299                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1300                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1301                 mask = rte_cpu_to_be_32(mask << shl_c0);
1302         }
1303         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1304         /* The routine expects parameters in memory as big-endian ones. */
1305         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1306                                              MLX5_MODIFICATION_TYPE_SET, error);
1307 }
1308
1309 /**
1310  * Convert modify-header set IPv4 DSCP action to DV specification.
1311  *
1312  * @param[in,out] resource
1313  *   Pointer to the modify-header resource.
1314  * @param[in] action
1315  *   Pointer to action specification.
1316  * @param[out] error
1317  *   Pointer to the error structure.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 static int
1323 flow_dv_convert_action_modify_ipv4_dscp
1324                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1325                          const struct rte_flow_action *action,
1326                          struct rte_flow_error *error)
1327 {
1328         const struct rte_flow_action_set_dscp *conf =
1329                 (const struct rte_flow_action_set_dscp *)(action->conf);
1330         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1331         struct rte_flow_item_ipv4 ipv4;
1332         struct rte_flow_item_ipv4 ipv4_mask;
1333
1334         memset(&ipv4, 0, sizeof(ipv4));
1335         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1336         ipv4.hdr.type_of_service = conf->dscp;
1337         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1338         item.spec = &ipv4;
1339         item.mask = &ipv4_mask;
1340         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1341                                              MLX5_MODIFICATION_TYPE_SET, error);
1342 }
1343
1344 /**
1345  * Convert modify-header set IPv6 DSCP action to DV specification.
1346  *
1347  * @param[in,out] resource
1348  *   Pointer to the modify-header resource.
1349  * @param[in] action
1350  *   Pointer to action specification.
1351  * @param[out] error
1352  *   Pointer to the error structure.
1353  *
1354  * @return
1355  *   0 on success, a negative errno value otherwise and rte_errno is set.
1356  */
1357 static int
1358 flow_dv_convert_action_modify_ipv6_dscp
1359                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1360                          const struct rte_flow_action *action,
1361                          struct rte_flow_error *error)
1362 {
1363         const struct rte_flow_action_set_dscp *conf =
1364                 (const struct rte_flow_action_set_dscp *)(action->conf);
1365         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1366         struct rte_flow_item_ipv6 ipv6;
1367         struct rte_flow_item_ipv6 ipv6_mask;
1368
1369         memset(&ipv6, 0, sizeof(ipv6));
1370         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1371         /*
1372          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1373          * rdma-core only accept the DSCP bits byte aligned start from
1374          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1375          * bits in IPv6 case as rdma-core requires byte aligned value.
1376          */
1377         ipv6.hdr.vtc_flow = conf->dscp;
1378         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1379         item.spec = &ipv6;
1380         item.mask = &ipv6_mask;
1381         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1382                                              MLX5_MODIFICATION_TYPE_SET, error);
1383 }
1384
1385 static int
1386 mlx5_flow_item_field_width(struct mlx5_priv *priv,
1387                            enum rte_flow_field_id field, int inherit)
1388 {
1389         switch (field) {
1390         case RTE_FLOW_FIELD_START:
1391                 return 32;
1392         case RTE_FLOW_FIELD_MAC_DST:
1393         case RTE_FLOW_FIELD_MAC_SRC:
1394                 return 48;
1395         case RTE_FLOW_FIELD_VLAN_TYPE:
1396                 return 16;
1397         case RTE_FLOW_FIELD_VLAN_ID:
1398                 return 12;
1399         case RTE_FLOW_FIELD_MAC_TYPE:
1400                 return 16;
1401         case RTE_FLOW_FIELD_IPV4_DSCP:
1402                 return 6;
1403         case RTE_FLOW_FIELD_IPV4_TTL:
1404                 return 8;
1405         case RTE_FLOW_FIELD_IPV4_SRC:
1406         case RTE_FLOW_FIELD_IPV4_DST:
1407                 return 32;
1408         case RTE_FLOW_FIELD_IPV6_DSCP:
1409                 return 6;
1410         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1411                 return 8;
1412         case RTE_FLOW_FIELD_IPV6_SRC:
1413         case RTE_FLOW_FIELD_IPV6_DST:
1414                 return 128;
1415         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1416         case RTE_FLOW_FIELD_TCP_PORT_DST:
1417                 return 16;
1418         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1419         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1420                 return 32;
1421         case RTE_FLOW_FIELD_TCP_FLAGS:
1422                 return 9;
1423         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1424         case RTE_FLOW_FIELD_UDP_PORT_DST:
1425                 return 16;
1426         case RTE_FLOW_FIELD_VXLAN_VNI:
1427         case RTE_FLOW_FIELD_GENEVE_VNI:
1428                 return 24;
1429         case RTE_FLOW_FIELD_GTP_TEID:
1430         case RTE_FLOW_FIELD_TAG:
1431                 return 32;
1432         case RTE_FLOW_FIELD_MARK:
1433                 return __builtin_popcount(priv->sh->dv_mark_mask);
1434         case RTE_FLOW_FIELD_META:
1435                 return __builtin_popcount(priv->sh->dv_meta_mask);
1436         case RTE_FLOW_FIELD_POINTER:
1437         case RTE_FLOW_FIELD_VALUE:
1438                 return inherit < 0 ? 0 : inherit;
1439         default:
1440                 MLX5_ASSERT(false);
1441         }
1442         return 0;
1443 }
1444
1445 static void
1446 mlx5_flow_field_id_to_modify_info
1447                 (const struct rte_flow_action_modify_data *data,
1448                  struct field_modify_info *info, uint32_t *mask,
1449                  uint32_t width, uint32_t *shift, struct rte_eth_dev *dev,
1450                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1451 {
1452         struct mlx5_priv *priv = dev->data->dev_private;
1453         uint32_t idx = 0;
1454         uint32_t off = 0;
1455
1456         switch (data->field) {
1457         case RTE_FLOW_FIELD_START:
1458                 /* not supported yet */
1459                 MLX5_ASSERT(false);
1460                 break;
1461         case RTE_FLOW_FIELD_MAC_DST:
1462                 off = data->offset > 16 ? data->offset - 16 : 0;
1463                 if (mask) {
1464                         if (data->offset < 16) {
1465                                 info[idx] = (struct field_modify_info){2, 4,
1466                                                 MLX5_MODI_OUT_DMAC_15_0};
1467                                 if (width < 16) {
1468                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1469                                                                  (16 - width));
1470                                         width = 0;
1471                                 } else {
1472                                         mask[idx] = RTE_BE16(0xffff);
1473                                         width -= 16;
1474                                 }
1475                                 if (!width)
1476                                         break;
1477                                 ++idx;
1478                         }
1479                         info[idx] = (struct field_modify_info){4, 0,
1480                                                 MLX5_MODI_OUT_DMAC_47_16};
1481                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1482                                                       (32 - width)) << off);
1483                 } else {
1484                         if (data->offset < 16)
1485                                 info[idx++] = (struct field_modify_info){2, 4,
1486                                                 MLX5_MODI_OUT_DMAC_15_0};
1487                         info[idx] = (struct field_modify_info){4, 0,
1488                                                 MLX5_MODI_OUT_DMAC_47_16};
1489                 }
1490                 break;
1491         case RTE_FLOW_FIELD_MAC_SRC:
1492                 off = data->offset > 16 ? data->offset - 16 : 0;
1493                 if (mask) {
1494                         if (data->offset < 16) {
1495                                 info[idx] = (struct field_modify_info){2, 4,
1496                                                 MLX5_MODI_OUT_SMAC_15_0};
1497                                 if (width < 16) {
1498                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1499                                                                  (16 - width));
1500                                         width = 0;
1501                                 } else {
1502                                         mask[idx] = RTE_BE16(0xffff);
1503                                         width -= 16;
1504                                 }
1505                                 if (!width)
1506                                         break;
1507                                 ++idx;
1508                         }
1509                         info[idx] = (struct field_modify_info){4, 0,
1510                                                 MLX5_MODI_OUT_SMAC_47_16};
1511                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1512                                                       (32 - width)) << off);
1513                 } else {
1514                         if (data->offset < 16)
1515                                 info[idx++] = (struct field_modify_info){2, 4,
1516                                                 MLX5_MODI_OUT_SMAC_15_0};
1517                         info[idx] = (struct field_modify_info){4, 0,
1518                                                 MLX5_MODI_OUT_SMAC_47_16};
1519                 }
1520                 break;
1521         case RTE_FLOW_FIELD_VLAN_TYPE:
1522                 /* not supported yet */
1523                 break;
1524         case RTE_FLOW_FIELD_VLAN_ID:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_FIRST_VID};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_MAC_TYPE:
1531                 info[idx] = (struct field_modify_info){2, 0,
1532                                         MLX5_MODI_OUT_ETHERTYPE};
1533                 if (mask)
1534                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_DSCP:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IP_DSCP};
1539                 if (mask)
1540                         mask[idx] = 0x3f >> (6 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_TTL:
1543                 info[idx] = (struct field_modify_info){1, 0,
1544                                         MLX5_MODI_OUT_IPV4_TTL};
1545                 if (mask)
1546                         mask[idx] = 0xff >> (8 - width);
1547                 break;
1548         case RTE_FLOW_FIELD_IPV4_SRC:
1549                 info[idx] = (struct field_modify_info){4, 0,
1550                                         MLX5_MODI_OUT_SIPV4};
1551                 if (mask)
1552                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1553                                                      (32 - width));
1554                 break;
1555         case RTE_FLOW_FIELD_IPV4_DST:
1556                 info[idx] = (struct field_modify_info){4, 0,
1557                                         MLX5_MODI_OUT_DIPV4};
1558                 if (mask)
1559                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1560                                                      (32 - width));
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_DSCP:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IP_DSCP};
1565                 if (mask)
1566                         mask[idx] = 0x3f >> (6 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1569                 info[idx] = (struct field_modify_info){1, 0,
1570                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1571                 if (mask)
1572                         mask[idx] = 0xff >> (8 - width);
1573                 break;
1574         case RTE_FLOW_FIELD_IPV6_SRC:
1575                 if (mask) {
1576                         if (data->offset < 32) {
1577                                 info[idx] = (struct field_modify_info){4, 12,
1578                                                 MLX5_MODI_OUT_SIPV6_31_0};
1579                                 if (width < 32) {
1580                                         mask[idx] =
1581                                                 rte_cpu_to_be_32(0xffffffff >>
1582                                                                  (32 - width));
1583                                         width = 0;
1584                                 } else {
1585                                         mask[idx] = RTE_BE32(0xffffffff);
1586                                         width -= 32;
1587                                 }
1588                                 if (!width)
1589                                         break;
1590                                 ++idx;
1591                         }
1592                         if (data->offset < 64) {
1593                                 info[idx] = (struct field_modify_info){4, 8,
1594                                                 MLX5_MODI_OUT_SIPV6_63_32};
1595                                 if (width < 32) {
1596                                         mask[idx] =
1597                                                 rte_cpu_to_be_32(0xffffffff >>
1598                                                                  (32 - width));
1599                                         width = 0;
1600                                 } else {
1601                                         mask[idx] = RTE_BE32(0xffffffff);
1602                                         width -= 32;
1603                                 }
1604                                 if (!width)
1605                                         break;
1606                                 ++idx;
1607                         }
1608                         if (data->offset < 96) {
1609                                 info[idx] = (struct field_modify_info){4, 4,
1610                                                 MLX5_MODI_OUT_SIPV6_95_64};
1611                                 if (width < 32) {
1612                                         mask[idx] =
1613                                                 rte_cpu_to_be_32(0xffffffff >>
1614                                                                  (32 - width));
1615                                         width = 0;
1616                                 } else {
1617                                         mask[idx] = RTE_BE32(0xffffffff);
1618                                         width -= 32;
1619                                 }
1620                                 if (!width)
1621                                         break;
1622                                 ++idx;
1623                         }
1624                         info[idx] = (struct field_modify_info){4, 0,
1625                                                 MLX5_MODI_OUT_SIPV6_127_96};
1626                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1627                                                      (32 - width));
1628                 } else {
1629                         if (data->offset < 32)
1630                                 info[idx++] = (struct field_modify_info){4, 12,
1631                                                 MLX5_MODI_OUT_SIPV6_31_0};
1632                         if (data->offset < 64)
1633                                 info[idx++] = (struct field_modify_info){4, 8,
1634                                                 MLX5_MODI_OUT_SIPV6_63_32};
1635                         if (data->offset < 96)
1636                                 info[idx++] = (struct field_modify_info){4, 4,
1637                                                 MLX5_MODI_OUT_SIPV6_95_64};
1638                         if (data->offset < 128)
1639                                 info[idx++] = (struct field_modify_info){4, 0,
1640                                                 MLX5_MODI_OUT_SIPV6_127_96};
1641                 }
1642                 break;
1643         case RTE_FLOW_FIELD_IPV6_DST:
1644                 if (mask) {
1645                         if (data->offset < 32) {
1646                                 info[idx] = (struct field_modify_info){4, 12,
1647                                                 MLX5_MODI_OUT_DIPV6_31_0};
1648                                 if (width < 32) {
1649                                         mask[idx] =
1650                                                 rte_cpu_to_be_32(0xffffffff >>
1651                                                                  (32 - width));
1652                                         width = 0;
1653                                 } else {
1654                                         mask[idx] = RTE_BE32(0xffffffff);
1655                                         width -= 32;
1656                                 }
1657                                 if (!width)
1658                                         break;
1659                                 ++idx;
1660                         }
1661                         if (data->offset < 64) {
1662                                 info[idx] = (struct field_modify_info){4, 8,
1663                                                 MLX5_MODI_OUT_DIPV6_63_32};
1664                                 if (width < 32) {
1665                                         mask[idx] =
1666                                                 rte_cpu_to_be_32(0xffffffff >>
1667                                                                  (32 - width));
1668                                         width = 0;
1669                                 } else {
1670                                         mask[idx] = RTE_BE32(0xffffffff);
1671                                         width -= 32;
1672                                 }
1673                                 if (!width)
1674                                         break;
1675                                 ++idx;
1676                         }
1677                         if (data->offset < 96) {
1678                                 info[idx] = (struct field_modify_info){4, 4,
1679                                                 MLX5_MODI_OUT_DIPV6_95_64};
1680                                 if (width < 32) {
1681                                         mask[idx] =
1682                                                 rte_cpu_to_be_32(0xffffffff >>
1683                                                                  (32 - width));
1684                                         width = 0;
1685                                 } else {
1686                                         mask[idx] = RTE_BE32(0xffffffff);
1687                                         width -= 32;
1688                                 }
1689                                 if (!width)
1690                                         break;
1691                                 ++idx;
1692                         }
1693                         info[idx] = (struct field_modify_info){4, 0,
1694                                                 MLX5_MODI_OUT_DIPV6_127_96};
1695                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1696                                                      (32 - width));
1697                 } else {
1698                         if (data->offset < 32)
1699                                 info[idx++] = (struct field_modify_info){4, 12,
1700                                                 MLX5_MODI_OUT_DIPV6_31_0};
1701                         if (data->offset < 64)
1702                                 info[idx++] = (struct field_modify_info){4, 8,
1703                                                 MLX5_MODI_OUT_DIPV6_63_32};
1704                         if (data->offset < 96)
1705                                 info[idx++] = (struct field_modify_info){4, 4,
1706                                                 MLX5_MODI_OUT_DIPV6_95_64};
1707                         if (data->offset < 128)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_127_96};
1710                 }
1711                 break;
1712         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1713                 info[idx] = (struct field_modify_info){2, 0,
1714                                         MLX5_MODI_OUT_TCP_SPORT};
1715                 if (mask)
1716                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_DST:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_DPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1725                 info[idx] = (struct field_modify_info){4, 0,
1726                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1729                                                      (32 - width));
1730                 break;
1731         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1732                 info[idx] = (struct field_modify_info){4, 0,
1733                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1734                 if (mask)
1735                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1736                                                      (32 - width));
1737                 break;
1738         case RTE_FLOW_FIELD_TCP_FLAGS:
1739                 info[idx] = (struct field_modify_info){2, 0,
1740                                         MLX5_MODI_OUT_TCP_FLAGS};
1741                 if (mask)
1742                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_UDP_SPORT};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_DST:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_DPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_VXLAN_VNI:
1757                 /* not supported yet */
1758                 break;
1759         case RTE_FLOW_FIELD_GENEVE_VNI:
1760                 /* not supported yet*/
1761                 break;
1762         case RTE_FLOW_FIELD_GTP_TEID:
1763                 info[idx] = (struct field_modify_info){4, 0,
1764                                         MLX5_MODI_GTP_TEID};
1765                 if (mask)
1766                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1767                                                      (32 - width));
1768                 break;
1769         case RTE_FLOW_FIELD_TAG:
1770                 {
1771                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1772                                                    data->level, error);
1773                         if (reg < 0)
1774                                 return;
1775                         MLX5_ASSERT(reg != REG_NON);
1776                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1777                         info[idx] = (struct field_modify_info){4, 0,
1778                                                 reg_to_field[reg]};
1779                         if (mask)
1780                                 mask[idx] =
1781                                         rte_cpu_to_be_32(0xffffffff >>
1782                                                          (32 - width));
1783                 }
1784                 break;
1785         case RTE_FLOW_FIELD_MARK:
1786                 {
1787                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1788                         uint32_t mark_count = __builtin_popcount(mark_mask);
1789                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1790                                                        0, error);
1791                         if (reg < 0)
1792                                 return;
1793                         MLX5_ASSERT(reg != REG_NON);
1794                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1795                         info[idx] = (struct field_modify_info){4, 0,
1796                                                 reg_to_field[reg]};
1797                         if (mask)
1798                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1799                                          (mark_count - width)) & mark_mask);
1800                 }
1801                 break;
1802         case RTE_FLOW_FIELD_META:
1803                 {
1804                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1805                         uint32_t meta_count = __builtin_popcount(meta_mask);
1806                         uint32_t msk_c0 =
1807                                 rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
1808                         uint32_t shl_c0 = rte_bsf32(msk_c0);
1809                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1810                         if (reg < 0)
1811                                 return;
1812                         MLX5_ASSERT(reg != REG_NON);
1813                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1814                         if (reg == REG_C_0)
1815                                 *shift = shl_c0;
1816                         info[idx] = (struct field_modify_info){4, 0,
1817                                                 reg_to_field[reg]};
1818                         if (mask)
1819                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1820                                         (meta_count - width)) & meta_mask);
1821                 }
1822                 break;
1823         case RTE_FLOW_FIELD_POINTER:
1824         case RTE_FLOW_FIELD_VALUE:
1825         default:
1826                 MLX5_ASSERT(false);
1827                 break;
1828         }
1829 }
1830
1831 /**
1832  * Convert modify_field action to DV specification.
1833  *
1834  * @param[in] dev
1835  *   Pointer to the rte_eth_dev structure.
1836  * @param[in,out] resource
1837  *   Pointer to the modify-header resource.
1838  * @param[in] action
1839  *   Pointer to action specification.
1840  * @param[in] attr
1841  *   Attributes of flow that includes this item.
1842  * @param[out] error
1843  *   Pointer to the error structure.
1844  *
1845  * @return
1846  *   0 on success, a negative errno value otherwise and rte_errno is set.
1847  */
1848 static int
1849 flow_dv_convert_action_modify_field
1850                         (struct rte_eth_dev *dev,
1851                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1852                          const struct rte_flow_action *action,
1853                          const struct rte_flow_attr *attr,
1854                          struct rte_flow_error *error)
1855 {
1856         const struct rte_flow_action_modify_field *conf =
1857                 (const struct rte_flow_action_modify_field *)(action->conf);
1858         struct rte_flow_item item = {
1859                 .spec = NULL,
1860                 .mask = NULL
1861         };
1862         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1863                                                                 {0, 0, 0} };
1864         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1865                                                                 {0, 0, 0} };
1866         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1867         uint32_t type;
1868         uint32_t shift = 0;
1869
1870         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1871             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1872                 type = MLX5_MODIFICATION_TYPE_SET;
1873                 /** For SET fill the destination field (field) first. */
1874                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1875                                                   conf->width, &shift, dev,
1876                                                   attr, error);
1877                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1878                                         (void *)(uintptr_t)conf->src.pvalue :
1879                                         (void *)(uintptr_t)&conf->src.value;
1880         } else {
1881                 type = MLX5_MODIFICATION_TYPE_COPY;
1882                 /** For COPY fill the destination field (dcopy) without mask. */
1883                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1884                                                   conf->width, &shift, dev,
1885                                                   attr, error);
1886                 /** Then construct the source field (field) with mask. */
1887                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1888                                                   conf->width, &shift,
1889                                                   dev, attr, error);
1890         }
1891         item.mask = &mask;
1892         return flow_dv_convert_modify_action(&item,
1893                         field, dcopy, resource, type, error);
1894 }
1895
1896 /**
1897  * Validate MARK item.
1898  *
1899  * @param[in] dev
1900  *   Pointer to the rte_eth_dev structure.
1901  * @param[in] item
1902  *   Item specification.
1903  * @param[in] attr
1904  *   Attributes of flow that includes this item.
1905  * @param[out] error
1906  *   Pointer to error structure.
1907  *
1908  * @return
1909  *   0 on success, a negative errno value otherwise and rte_errno is set.
1910  */
1911 static int
1912 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1913                            const struct rte_flow_item *item,
1914                            const struct rte_flow_attr *attr __rte_unused,
1915                            struct rte_flow_error *error)
1916 {
1917         struct mlx5_priv *priv = dev->data->dev_private;
1918         struct mlx5_dev_config *config = &priv->config;
1919         const struct rte_flow_item_mark *spec = item->spec;
1920         const struct rte_flow_item_mark *mask = item->mask;
1921         const struct rte_flow_item_mark nic_mask = {
1922                 .id = priv->sh->dv_mark_mask,
1923         };
1924         int ret;
1925
1926         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1927                 return rte_flow_error_set(error, ENOTSUP,
1928                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1929                                           "extended metadata feature"
1930                                           " isn't enabled");
1931         if (!mlx5_flow_ext_mreg_supported(dev))
1932                 return rte_flow_error_set(error, ENOTSUP,
1933                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1934                                           "extended metadata register"
1935                                           " isn't supported");
1936         if (!nic_mask.id)
1937                 return rte_flow_error_set(error, ENOTSUP,
1938                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1939                                           "extended metadata register"
1940                                           " isn't available");
1941         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1942         if (ret < 0)
1943                 return ret;
1944         if (!spec)
1945                 return rte_flow_error_set(error, EINVAL,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1947                                           item->spec,
1948                                           "data cannot be empty");
1949         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1950                 return rte_flow_error_set(error, EINVAL,
1951                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1952                                           &spec->id,
1953                                           "mark id exceeds the limit");
1954         if (!mask)
1955                 mask = &nic_mask;
1956         if (!mask->id)
1957                 return rte_flow_error_set(error, EINVAL,
1958                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1959                                         "mask cannot be zero");
1960
1961         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1962                                         (const uint8_t *)&nic_mask,
1963                                         sizeof(struct rte_flow_item_mark),
1964                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1965         if (ret < 0)
1966                 return ret;
1967         return 0;
1968 }
1969
1970 /**
1971  * Validate META item.
1972  *
1973  * @param[in] dev
1974  *   Pointer to the rte_eth_dev structure.
1975  * @param[in] item
1976  *   Item specification.
1977  * @param[in] attr
1978  *   Attributes of flow that includes this item.
1979  * @param[out] error
1980  *   Pointer to error structure.
1981  *
1982  * @return
1983  *   0 on success, a negative errno value otherwise and rte_errno is set.
1984  */
1985 static int
1986 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1987                            const struct rte_flow_item *item,
1988                            const struct rte_flow_attr *attr,
1989                            struct rte_flow_error *error)
1990 {
1991         struct mlx5_priv *priv = dev->data->dev_private;
1992         struct mlx5_dev_config *config = &priv->config;
1993         const struct rte_flow_item_meta *spec = item->spec;
1994         const struct rte_flow_item_meta *mask = item->mask;
1995         struct rte_flow_item_meta nic_mask = {
1996                 .data = UINT32_MAX
1997         };
1998         int reg;
1999         int ret;
2000
2001         if (!spec)
2002                 return rte_flow_error_set(error, EINVAL,
2003                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2004                                           item->spec,
2005                                           "data cannot be empty");
2006         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2007                 if (!mlx5_flow_ext_mreg_supported(dev))
2008                         return rte_flow_error_set(error, ENOTSUP,
2009                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2010                                           "extended metadata register"
2011                                           " isn't supported");
2012                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2013                 if (reg < 0)
2014                         return reg;
2015                 if (reg == REG_NON)
2016                         return rte_flow_error_set(error, ENOTSUP,
2017                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2018                                         "unavalable extended metadata register");
2019                 if (reg == REG_B)
2020                         return rte_flow_error_set(error, ENOTSUP,
2021                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2022                                           "match on reg_b "
2023                                           "isn't supported");
2024                 if (reg != REG_A)
2025                         nic_mask.data = priv->sh->dv_meta_mask;
2026         } else {
2027                 if (attr->transfer)
2028                         return rte_flow_error_set(error, ENOTSUP,
2029                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2030                                         "extended metadata feature "
2031                                         "should be enabled when "
2032                                         "meta item is requested "
2033                                         "with e-switch mode ");
2034                 if (attr->ingress)
2035                         return rte_flow_error_set(error, ENOTSUP,
2036                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2037                                         "match on metadata for ingress "
2038                                         "is not supported in legacy "
2039                                         "metadata mode");
2040         }
2041         if (!mask)
2042                 mask = &rte_flow_item_meta_mask;
2043         if (!mask->data)
2044                 return rte_flow_error_set(error, EINVAL,
2045                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2046                                         "mask cannot be zero");
2047
2048         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2049                                         (const uint8_t *)&nic_mask,
2050                                         sizeof(struct rte_flow_item_meta),
2051                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2052         return ret;
2053 }
2054
2055 /**
2056  * Validate TAG item.
2057  *
2058  * @param[in] dev
2059  *   Pointer to the rte_eth_dev structure.
2060  * @param[in] item
2061  *   Item specification.
2062  * @param[in] attr
2063  *   Attributes of flow that includes this item.
2064  * @param[out] error
2065  *   Pointer to error structure.
2066  *
2067  * @return
2068  *   0 on success, a negative errno value otherwise and rte_errno is set.
2069  */
2070 static int
2071 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2072                           const struct rte_flow_item *item,
2073                           const struct rte_flow_attr *attr __rte_unused,
2074                           struct rte_flow_error *error)
2075 {
2076         const struct rte_flow_item_tag *spec = item->spec;
2077         const struct rte_flow_item_tag *mask = item->mask;
2078         const struct rte_flow_item_tag nic_mask = {
2079                 .data = RTE_BE32(UINT32_MAX),
2080                 .index = 0xff,
2081         };
2082         int ret;
2083
2084         if (!mlx5_flow_ext_mreg_supported(dev))
2085                 return rte_flow_error_set(error, ENOTSUP,
2086                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2087                                           "extensive metadata register"
2088                                           " isn't supported");
2089         if (!spec)
2090                 return rte_flow_error_set(error, EINVAL,
2091                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2092                                           item->spec,
2093                                           "data cannot be empty");
2094         if (!mask)
2095                 mask = &rte_flow_item_tag_mask;
2096         if (!mask->data)
2097                 return rte_flow_error_set(error, EINVAL,
2098                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2099                                         "mask cannot be zero");
2100
2101         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2102                                         (const uint8_t *)&nic_mask,
2103                                         sizeof(struct rte_flow_item_tag),
2104                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2105         if (ret < 0)
2106                 return ret;
2107         if (mask->index != 0xff)
2108                 return rte_flow_error_set(error, EINVAL,
2109                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2110                                           "partial mask for tag index"
2111                                           " is not supported");
2112         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2113         if (ret < 0)
2114                 return ret;
2115         MLX5_ASSERT(ret != REG_NON);
2116         return 0;
2117 }
2118
2119 /**
2120  * Validate vport item.
2121  *
2122  * @param[in] dev
2123  *   Pointer to the rte_eth_dev structure.
2124  * @param[in] item
2125  *   Item specification.
2126  * @param[in] attr
2127  *   Attributes of flow that includes this item.
2128  * @param[in] item_flags
2129  *   Bit-fields that holds the items detected until now.
2130  * @param[out] error
2131  *   Pointer to error structure.
2132  *
2133  * @return
2134  *   0 on success, a negative errno value otherwise and rte_errno is set.
2135  */
2136 static int
2137 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2138                               const struct rte_flow_item *item,
2139                               const struct rte_flow_attr *attr,
2140                               uint64_t item_flags,
2141                               struct rte_flow_error *error)
2142 {
2143         const struct rte_flow_item_port_id *spec = item->spec;
2144         const struct rte_flow_item_port_id *mask = item->mask;
2145         const struct rte_flow_item_port_id switch_mask = {
2146                         .id = 0xffffffff,
2147         };
2148         struct mlx5_priv *esw_priv;
2149         struct mlx5_priv *dev_priv;
2150         int ret;
2151
2152         if (!attr->transfer)
2153                 return rte_flow_error_set(error, EINVAL,
2154                                           RTE_FLOW_ERROR_TYPE_ITEM,
2155                                           NULL,
2156                                           "match on port id is valid only"
2157                                           " when transfer flag is enabled");
2158         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2159                 return rte_flow_error_set(error, ENOTSUP,
2160                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2161                                           "multiple source ports are not"
2162                                           " supported");
2163         if (!mask)
2164                 mask = &switch_mask;
2165         if (mask->id != 0xffffffff)
2166                 return rte_flow_error_set(error, ENOTSUP,
2167                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2168                                            mask,
2169                                            "no support for partial mask on"
2170                                            " \"id\" field");
2171         ret = mlx5_flow_item_acceptable
2172                                 (item, (const uint8_t *)mask,
2173                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2174                                  sizeof(struct rte_flow_item_port_id),
2175                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2176         if (ret)
2177                 return ret;
2178         if (!spec)
2179                 return 0;
2180         if (spec->id == MLX5_PORT_ESW_MGR)
2181                 return 0;
2182         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2183         if (!esw_priv)
2184                 return rte_flow_error_set(error, rte_errno,
2185                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2186                                           "failed to obtain E-Switch info for"
2187                                           " port");
2188         dev_priv = mlx5_dev_to_eswitch_info(dev);
2189         if (!dev_priv)
2190                 return rte_flow_error_set(error, rte_errno,
2191                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2192                                           NULL,
2193                                           "failed to obtain E-Switch info");
2194         if (esw_priv->domain_id != dev_priv->domain_id)
2195                 return rte_flow_error_set(error, EINVAL,
2196                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2197                                           "cannot match on a port from a"
2198                                           " different E-Switch");
2199         return 0;
2200 }
2201
2202 /**
2203  * Validate VLAN item.
2204  *
2205  * @param[in] item
2206  *   Item specification.
2207  * @param[in] item_flags
2208  *   Bit-fields that holds the items detected until now.
2209  * @param[in] dev
2210  *   Ethernet device flow is being created on.
2211  * @param[out] error
2212  *   Pointer to error structure.
2213  *
2214  * @return
2215  *   0 on success, a negative errno value otherwise and rte_errno is set.
2216  */
2217 static int
2218 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2219                            uint64_t item_flags,
2220                            struct rte_eth_dev *dev,
2221                            struct rte_flow_error *error)
2222 {
2223         const struct rte_flow_item_vlan *mask = item->mask;
2224         const struct rte_flow_item_vlan nic_mask = {
2225                 .tci = RTE_BE16(UINT16_MAX),
2226                 .inner_type = RTE_BE16(UINT16_MAX),
2227                 .has_more_vlan = 1,
2228         };
2229         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2230         int ret;
2231         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2232                                         MLX5_FLOW_LAYER_INNER_L4) :
2233                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2234                                         MLX5_FLOW_LAYER_OUTER_L4);
2235         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2236                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2237
2238         if (item_flags & vlanm)
2239                 return rte_flow_error_set(error, EINVAL,
2240                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2241                                           "multiple VLAN layers not supported");
2242         else if ((item_flags & l34m) != 0)
2243                 return rte_flow_error_set(error, EINVAL,
2244                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2245                                           "VLAN cannot follow L3/L4 layer");
2246         if (!mask)
2247                 mask = &rte_flow_item_vlan_mask;
2248         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2249                                         (const uint8_t *)&nic_mask,
2250                                         sizeof(struct rte_flow_item_vlan),
2251                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2252         if (ret)
2253                 return ret;
2254         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2255                 struct mlx5_priv *priv = dev->data->dev_private;
2256
2257                 if (priv->vmwa_context) {
2258                         /*
2259                          * Non-NULL context means we have a virtual machine
2260                          * and SR-IOV enabled, we have to create VLAN interface
2261                          * to make hypervisor to setup E-Switch vport
2262                          * context correctly. We avoid creating the multiple
2263                          * VLAN interfaces, so we cannot support VLAN tag mask.
2264                          */
2265                         return rte_flow_error_set(error, EINVAL,
2266                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2267                                                   item,
2268                                                   "VLAN tag mask is not"
2269                                                   " supported in virtual"
2270                                                   " environment");
2271                 }
2272         }
2273         return 0;
2274 }
2275
2276 /*
2277  * GTP flags are contained in 1 byte of the format:
2278  * -------------------------------------------
2279  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2280  * |-----------------------------------------|
2281  * | value | Version | PT | Res | E | S | PN |
2282  * -------------------------------------------
2283  *
2284  * Matching is supported only for GTP flags E, S, PN.
2285  */
2286 #define MLX5_GTP_FLAGS_MASK     0x07
2287
2288 /**
2289  * Validate GTP item.
2290  *
2291  * @param[in] dev
2292  *   Pointer to the rte_eth_dev structure.
2293  * @param[in] item
2294  *   Item specification.
2295  * @param[in] item_flags
2296  *   Bit-fields that holds the items detected until now.
2297  * @param[out] error
2298  *   Pointer to error structure.
2299  *
2300  * @return
2301  *   0 on success, a negative errno value otherwise and rte_errno is set.
2302  */
2303 static int
2304 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2305                           const struct rte_flow_item *item,
2306                           uint64_t item_flags,
2307                           struct rte_flow_error *error)
2308 {
2309         struct mlx5_priv *priv = dev->data->dev_private;
2310         const struct rte_flow_item_gtp *spec = item->spec;
2311         const struct rte_flow_item_gtp *mask = item->mask;
2312         const struct rte_flow_item_gtp nic_mask = {
2313                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2314                 .msg_type = 0xff,
2315                 .teid = RTE_BE32(0xffffffff),
2316         };
2317
2318         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2319                 return rte_flow_error_set(error, ENOTSUP,
2320                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2321                                           "GTP support is not enabled");
2322         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2323                 return rte_flow_error_set(error, ENOTSUP,
2324                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2325                                           "multiple tunnel layers not"
2326                                           " supported");
2327         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2328                 return rte_flow_error_set(error, EINVAL,
2329                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2330                                           "no outer UDP layer found");
2331         if (!mask)
2332                 mask = &rte_flow_item_gtp_mask;
2333         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2334                 return rte_flow_error_set(error, ENOTSUP,
2335                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2336                                           "Match is supported for GTP"
2337                                           " flags only");
2338         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2339                                          (const uint8_t *)&nic_mask,
2340                                          sizeof(struct rte_flow_item_gtp),
2341                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2342 }
2343
2344 /**
2345  * Validate GTP PSC item.
2346  *
2347  * @param[in] item
2348  *   Item specification.
2349  * @param[in] last_item
2350  *   Previous validated item in the pattern items.
2351  * @param[in] gtp_item
2352  *   Previous GTP item specification.
2353  * @param[in] attr
2354  *   Pointer to flow attributes.
2355  * @param[out] error
2356  *   Pointer to error structure.
2357  *
2358  * @return
2359  *   0 on success, a negative errno value otherwise and rte_errno is set.
2360  */
2361 static int
2362 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2363                               uint64_t last_item,
2364                               const struct rte_flow_item *gtp_item,
2365                               const struct rte_flow_attr *attr,
2366                               struct rte_flow_error *error)
2367 {
2368         const struct rte_flow_item_gtp *gtp_spec;
2369         const struct rte_flow_item_gtp *gtp_mask;
2370         const struct rte_flow_item_gtp_psc *mask;
2371         const struct rte_flow_item_gtp_psc nic_mask = {
2372                 .hdr.type = 0xF,
2373                 .hdr.qfi = 0x3F,
2374         };
2375
2376         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2377                 return rte_flow_error_set
2378                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2379                          "GTP PSC item must be preceded with GTP item");
2380         gtp_spec = gtp_item->spec;
2381         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2382         /* GTP spec and E flag is requested to match zero. */
2383         if (gtp_spec &&
2384                 (gtp_mask->v_pt_rsv_flags &
2385                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2386                 return rte_flow_error_set
2387                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2388                          "GTP E flag must be 1 to match GTP PSC");
2389         /* Check the flow is not created in group zero. */
2390         if (!attr->transfer && !attr->group)
2391                 return rte_flow_error_set
2392                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2393                          "GTP PSC is not supported for group 0");
2394         /* GTP spec is here and E flag is requested to match zero. */
2395         if (!item->spec)
2396                 return 0;
2397         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2398         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2399                                          (const uint8_t *)&nic_mask,
2400                                          sizeof(struct rte_flow_item_gtp_psc),
2401                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2402 }
2403
2404 /**
2405  * Validate IPV4 item.
2406  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2407  * add specific validation of fragment_offset field,
2408  *
2409  * @param[in] item
2410  *   Item specification.
2411  * @param[in] item_flags
2412  *   Bit-fields that holds the items detected until now.
2413  * @param[out] error
2414  *   Pointer to error structure.
2415  *
2416  * @return
2417  *   0 on success, a negative errno value otherwise and rte_errno is set.
2418  */
2419 static int
2420 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2421                            const struct rte_flow_item *item,
2422                            uint64_t item_flags, uint64_t last_item,
2423                            uint16_t ether_type, struct rte_flow_error *error)
2424 {
2425         int ret;
2426         struct mlx5_priv *priv = dev->data->dev_private;
2427         const struct rte_flow_item_ipv4 *spec = item->spec;
2428         const struct rte_flow_item_ipv4 *last = item->last;
2429         const struct rte_flow_item_ipv4 *mask = item->mask;
2430         rte_be16_t fragment_offset_spec = 0;
2431         rte_be16_t fragment_offset_last = 0;
2432         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2433                 .hdr = {
2434                         .src_addr = RTE_BE32(0xffffffff),
2435                         .dst_addr = RTE_BE32(0xffffffff),
2436                         .type_of_service = 0xff,
2437                         .fragment_offset = RTE_BE16(0xffff),
2438                         .next_proto_id = 0xff,
2439                         .time_to_live = 0xff,
2440                 },
2441         };
2442
2443         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2444                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2445                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2446                                priv->config.hca_attr.inner_ipv4_ihl;
2447                 if (!ihl_cap)
2448                         return rte_flow_error_set(error, ENOTSUP,
2449                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2450                                                   item,
2451                                                   "IPV4 ihl offload not supported");
2452                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2453         }
2454         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2455                                            ether_type, &nic_ipv4_mask,
2456                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2457         if (ret < 0)
2458                 return ret;
2459         if (spec && mask)
2460                 fragment_offset_spec = spec->hdr.fragment_offset &
2461                                        mask->hdr.fragment_offset;
2462         if (!fragment_offset_spec)
2463                 return 0;
2464         /*
2465          * spec and mask are valid, enforce using full mask to make sure the
2466          * complete value is used correctly.
2467          */
2468         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2469                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2470                 return rte_flow_error_set(error, EINVAL,
2471                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2472                                           item, "must use full mask for"
2473                                           " fragment_offset");
2474         /*
2475          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2476          * indicating this is 1st fragment of fragmented packet.
2477          * This is not yet supported in MLX5, return appropriate error message.
2478          */
2479         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2480                 return rte_flow_error_set(error, ENOTSUP,
2481                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2482                                           "match on first fragment not "
2483                                           "supported");
2484         if (fragment_offset_spec && !last)
2485                 return rte_flow_error_set(error, ENOTSUP,
2486                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2487                                           "specified value not supported");
2488         /* spec and last are valid, validate the specified range. */
2489         fragment_offset_last = last->hdr.fragment_offset &
2490                                mask->hdr.fragment_offset;
2491         /*
2492          * Match on fragment_offset spec 0x2001 and last 0x3fff
2493          * means MF is 1 and frag-offset is > 0.
2494          * This packet is fragment 2nd and onward, excluding last.
2495          * This is not yet supported in MLX5, return appropriate
2496          * error message.
2497          */
2498         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2499             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2500                 return rte_flow_error_set(error, ENOTSUP,
2501                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2502                                           last, "match on following "
2503                                           "fragments not supported");
2504         /*
2505          * Match on fragment_offset spec 0x0001 and last 0x1fff
2506          * means MF is 0 and frag-offset is > 0.
2507          * This packet is last fragment of fragmented packet.
2508          * This is not yet supported in MLX5, return appropriate
2509          * error message.
2510          */
2511         if (fragment_offset_spec == RTE_BE16(1) &&
2512             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2513                 return rte_flow_error_set(error, ENOTSUP,
2514                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2515                                           last, "match on last "
2516                                           "fragment not supported");
2517         /*
2518          * Match on fragment_offset spec 0x0001 and last 0x3fff
2519          * means MF and/or frag-offset is not 0.
2520          * This is a fragmented packet.
2521          * Other range values are invalid and rejected.
2522          */
2523         if (!(fragment_offset_spec == RTE_BE16(1) &&
2524               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2525                 return rte_flow_error_set(error, ENOTSUP,
2526                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2527                                           "specified range not supported");
2528         return 0;
2529 }
2530
2531 /**
2532  * Validate IPV6 fragment extension item.
2533  *
2534  * @param[in] item
2535  *   Item specification.
2536  * @param[in] item_flags
2537  *   Bit-fields that holds the items detected until now.
2538  * @param[out] error
2539  *   Pointer to error structure.
2540  *
2541  * @return
2542  *   0 on success, a negative errno value otherwise and rte_errno is set.
2543  */
2544 static int
2545 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2546                                     uint64_t item_flags,
2547                                     struct rte_flow_error *error)
2548 {
2549         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2550         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2551         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2552         rte_be16_t frag_data_spec = 0;
2553         rte_be16_t frag_data_last = 0;
2554         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2555         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2556                                       MLX5_FLOW_LAYER_OUTER_L4;
2557         int ret = 0;
2558         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2559                 .hdr = {
2560                         .next_header = 0xff,
2561                         .frag_data = RTE_BE16(0xffff),
2562                 },
2563         };
2564
2565         if (item_flags & l4m)
2566                 return rte_flow_error_set(error, EINVAL,
2567                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2568                                           "ipv6 fragment extension item cannot "
2569                                           "follow L4 item.");
2570         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2571             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2572                 return rte_flow_error_set(error, EINVAL,
2573                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2574                                           "ipv6 fragment extension item must "
2575                                           "follow ipv6 item");
2576         if (spec && mask)
2577                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2578         if (!frag_data_spec)
2579                 return 0;
2580         /*
2581          * spec and mask are valid, enforce using full mask to make sure the
2582          * complete value is used correctly.
2583          */
2584         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2585                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2586                 return rte_flow_error_set(error, EINVAL,
2587                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2588                                           item, "must use full mask for"
2589                                           " frag_data");
2590         /*
2591          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2592          * This is 1st fragment of fragmented packet.
2593          */
2594         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2595                 return rte_flow_error_set(error, ENOTSUP,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "match on first fragment not "
2598                                           "supported");
2599         if (frag_data_spec && !last)
2600                 return rte_flow_error_set(error, EINVAL,
2601                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2602                                           "specified value not supported");
2603         ret = mlx5_flow_item_acceptable
2604                                 (item, (const uint8_t *)mask,
2605                                  (const uint8_t *)&nic_mask,
2606                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2607                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2608         if (ret)
2609                 return ret;
2610         /* spec and last are valid, validate the specified range. */
2611         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2612         /*
2613          * Match on frag_data spec 0x0009 and last 0xfff9
2614          * means M is 1 and frag-offset is > 0.
2615          * This packet is fragment 2nd and onward, excluding last.
2616          * This is not yet supported in MLX5, return appropriate
2617          * error message.
2618          */
2619         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2620                                        RTE_IPV6_EHDR_MF_MASK) &&
2621             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2622                 return rte_flow_error_set(error, ENOTSUP,
2623                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2624                                           last, "match on following "
2625                                           "fragments not supported");
2626         /*
2627          * Match on frag_data spec 0x0008 and last 0xfff8
2628          * means M is 0 and frag-offset is > 0.
2629          * This packet is last fragment of fragmented packet.
2630          * This is not yet supported in MLX5, return appropriate
2631          * error message.
2632          */
2633         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2634             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2635                 return rte_flow_error_set(error, ENOTSUP,
2636                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2637                                           last, "match on last "
2638                                           "fragment not supported");
2639         /* Other range values are invalid and rejected. */
2640         return rte_flow_error_set(error, EINVAL,
2641                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2642                                   "specified range not supported");
2643 }
2644
2645 /*
2646  * Validate ASO CT item.
2647  *
2648  * @param[in] dev
2649  *   Pointer to the rte_eth_dev structure.
2650  * @param[in] item
2651  *   Item specification.
2652  * @param[in] item_flags
2653  *   Pointer to bit-fields that holds the items detected until now.
2654  * @param[out] error
2655  *   Pointer to error structure.
2656  *
2657  * @return
2658  *   0 on success, a negative errno value otherwise and rte_errno is set.
2659  */
2660 static int
2661 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2662                              const struct rte_flow_item *item,
2663                              uint64_t *item_flags,
2664                              struct rte_flow_error *error)
2665 {
2666         const struct rte_flow_item_conntrack *spec = item->spec;
2667         const struct rte_flow_item_conntrack *mask = item->mask;
2668         RTE_SET_USED(dev);
2669         uint32_t flags;
2670
2671         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2672                 return rte_flow_error_set(error, EINVAL,
2673                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2674                                           "Only one CT is supported");
2675         if (!mask)
2676                 mask = &rte_flow_item_conntrack_mask;
2677         flags = spec->flags & mask->flags;
2678         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2679             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2680              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2681              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2682                 return rte_flow_error_set(error, EINVAL,
2683                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2684                                           "Conflict status bits");
2685         /* State change also needs to be considered. */
2686         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2687         return 0;
2688 }
2689
2690 /**
2691  * Validate the pop VLAN action.
2692  *
2693  * @param[in] dev
2694  *   Pointer to the rte_eth_dev structure.
2695  * @param[in] action_flags
2696  *   Holds the actions detected until now.
2697  * @param[in] action
2698  *   Pointer to the pop vlan action.
2699  * @param[in] item_flags
2700  *   The items found in this flow rule.
2701  * @param[in] attr
2702  *   Pointer to flow attributes.
2703  * @param[out] error
2704  *   Pointer to error structure.
2705  *
2706  * @return
2707  *   0 on success, a negative errno value otherwise and rte_errno is set.
2708  */
2709 static int
2710 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2711                                  uint64_t action_flags,
2712                                  const struct rte_flow_action *action,
2713                                  uint64_t item_flags,
2714                                  const struct rte_flow_attr *attr,
2715                                  struct rte_flow_error *error)
2716 {
2717         const struct mlx5_priv *priv = dev->data->dev_private;
2718         struct mlx5_dev_ctx_shared *sh = priv->sh;
2719         bool direction_error = false;
2720
2721         if (!priv->sh->pop_vlan_action)
2722                 return rte_flow_error_set(error, ENOTSUP,
2723                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2724                                           NULL,
2725                                           "pop vlan action is not supported");
2726         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2727         if (attr->transfer) {
2728                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2729                 bool is_cx5 = sh->steering_format_version ==
2730                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2731
2732                 if (fdb_tx && is_cx5)
2733                         direction_error = true;
2734         } else if (attr->egress) {
2735                 direction_error = true;
2736         }
2737         if (direction_error)
2738                 return rte_flow_error_set(error, ENOTSUP,
2739                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2740                                           NULL,
2741                                           "pop vlan action not supported for egress");
2742         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2743                 return rte_flow_error_set(error, ENOTSUP,
2744                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2745                                           "no support for multiple VLAN "
2746                                           "actions");
2747         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2748         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2749             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2750                 return rte_flow_error_set(error, ENOTSUP,
2751                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2752                                           NULL,
2753                                           "cannot pop vlan after decap without "
2754                                           "match on inner vlan in the flow");
2755         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2756         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2757             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2758                 return rte_flow_error_set(error, ENOTSUP,
2759                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2760                                           NULL,
2761                                           "cannot pop vlan without a "
2762                                           "match on (outer) vlan in the flow");
2763         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2764                 return rte_flow_error_set(error, EINVAL,
2765                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2766                                           "wrong action order, port_id should "
2767                                           "be after pop VLAN action");
2768         if (!attr->transfer && priv->representor)
2769                 return rte_flow_error_set(error, ENOTSUP,
2770                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2771                                           "pop vlan action for VF representor "
2772                                           "not supported on NIC table");
2773         return 0;
2774 }
2775
2776 /**
2777  * Get VLAN default info from vlan match info.
2778  *
2779  * @param[in] items
2780  *   the list of item specifications.
2781  * @param[out] vlan
2782  *   pointer VLAN info to fill to.
2783  *
2784  * @return
2785  *   0 on success, a negative errno value otherwise and rte_errno is set.
2786  */
2787 static void
2788 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2789                                   struct rte_vlan_hdr *vlan)
2790 {
2791         const struct rte_flow_item_vlan nic_mask = {
2792                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2793                                 MLX5DV_FLOW_VLAN_VID_MASK),
2794                 .inner_type = RTE_BE16(0xffff),
2795         };
2796
2797         if (items == NULL)
2798                 return;
2799         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2800                 int type = items->type;
2801
2802                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2803                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2804                         break;
2805         }
2806         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2807                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2808                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2809
2810                 /* If VLAN item in pattern doesn't contain data, return here. */
2811                 if (!vlan_v)
2812                         return;
2813                 if (!vlan_m)
2814                         vlan_m = &nic_mask;
2815                 /* Only full match values are accepted */
2816                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2817                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2818                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2819                         vlan->vlan_tci |=
2820                                 rte_be_to_cpu_16(vlan_v->tci &
2821                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2822                 }
2823                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2824                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2825                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2826                         vlan->vlan_tci |=
2827                                 rte_be_to_cpu_16(vlan_v->tci &
2828                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2829                 }
2830                 if (vlan_m->inner_type == nic_mask.inner_type)
2831                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2832                                                            vlan_m->inner_type);
2833         }
2834 }
2835
2836 /**
2837  * Validate the push VLAN action.
2838  *
2839  * @param[in] dev
2840  *   Pointer to the rte_eth_dev structure.
2841  * @param[in] action_flags
2842  *   Holds the actions detected until now.
2843  * @param[in] item_flags
2844  *   The items found in this flow rule.
2845  * @param[in] action
2846  *   Pointer to the action structure.
2847  * @param[in] attr
2848  *   Pointer to flow attributes
2849  * @param[out] error
2850  *   Pointer to error structure.
2851  *
2852  * @return
2853  *   0 on success, a negative errno value otherwise and rte_errno is set.
2854  */
2855 static int
2856 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2857                                   uint64_t action_flags,
2858                                   const struct rte_flow_item_vlan *vlan_m,
2859                                   const struct rte_flow_action *action,
2860                                   const struct rte_flow_attr *attr,
2861                                   struct rte_flow_error *error)
2862 {
2863         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2864         const struct mlx5_priv *priv = dev->data->dev_private;
2865         struct mlx5_dev_ctx_shared *sh = priv->sh;
2866         bool direction_error = false;
2867
2868         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2869             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2870                 return rte_flow_error_set(error, EINVAL,
2871                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2872                                           "invalid vlan ethertype");
2873         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2874                 return rte_flow_error_set(error, EINVAL,
2875                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2876                                           "wrong action order, port_id should "
2877                                           "be after push VLAN");
2878         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2879         if (attr->transfer) {
2880                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2881                 bool is_cx5 = sh->steering_format_version ==
2882                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2883
2884                 if (!fdb_tx && is_cx5)
2885                         direction_error = true;
2886         } else if (attr->ingress) {
2887                 direction_error = true;
2888         }
2889         if (direction_error)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2892                                           NULL,
2893                                           "push vlan action not supported for ingress");
2894         if (!attr->transfer && priv->representor)
2895                 return rte_flow_error_set(error, ENOTSUP,
2896                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2897                                           "push vlan action for VF representor "
2898                                           "not supported on NIC table");
2899         if (vlan_m &&
2900             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2901             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2902                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2903             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2904             !(mlx5_flow_find_action
2905                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2906                 return rte_flow_error_set(error, EINVAL,
2907                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2908                                           "not full match mask on VLAN PCP and "
2909                                           "there is no of_set_vlan_pcp action, "
2910                                           "push VLAN action cannot figure out "
2911                                           "PCP value");
2912         if (vlan_m &&
2913             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2914             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2915                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2916             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2917             !(mlx5_flow_find_action
2918                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2919                 return rte_flow_error_set(error, EINVAL,
2920                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2921                                           "not full match mask on VLAN VID and "
2922                                           "there is no of_set_vlan_vid action, "
2923                                           "push VLAN action cannot figure out "
2924                                           "VID value");
2925         (void)attr;
2926         return 0;
2927 }
2928
2929 /**
2930  * Validate the set VLAN PCP.
2931  *
2932  * @param[in] action_flags
2933  *   Holds the actions detected until now.
2934  * @param[in] actions
2935  *   Pointer to the list of actions remaining in the flow rule.
2936  * @param[out] error
2937  *   Pointer to error structure.
2938  *
2939  * @return
2940  *   0 on success, a negative errno value otherwise and rte_errno is set.
2941  */
2942 static int
2943 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2944                                      const struct rte_flow_action actions[],
2945                                      struct rte_flow_error *error)
2946 {
2947         const struct rte_flow_action *action = actions;
2948         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2949
2950         if (conf->vlan_pcp > 7)
2951                 return rte_flow_error_set(error, EINVAL,
2952                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2953                                           "VLAN PCP value is too big");
2954         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2955                 return rte_flow_error_set(error, ENOTSUP,
2956                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2957                                           "set VLAN PCP action must follow "
2958                                           "the push VLAN action");
2959         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2960                 return rte_flow_error_set(error, ENOTSUP,
2961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2962                                           "Multiple VLAN PCP modification are "
2963                                           "not supported");
2964         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2965                 return rte_flow_error_set(error, EINVAL,
2966                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2967                                           "wrong action order, port_id should "
2968                                           "be after set VLAN PCP");
2969         return 0;
2970 }
2971
2972 /**
2973  * Validate the set VLAN VID.
2974  *
2975  * @param[in] item_flags
2976  *   Holds the items detected in this rule.
2977  * @param[in] action_flags
2978  *   Holds the actions detected until now.
2979  * @param[in] actions
2980  *   Pointer to the list of actions remaining in the flow rule.
2981  * @param[out] error
2982  *   Pointer to error structure.
2983  *
2984  * @return
2985  *   0 on success, a negative errno value otherwise and rte_errno is set.
2986  */
2987 static int
2988 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2989                                      uint64_t action_flags,
2990                                      const struct rte_flow_action actions[],
2991                                      struct rte_flow_error *error)
2992 {
2993         const struct rte_flow_action *action = actions;
2994         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2995
2996         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2997                 return rte_flow_error_set(error, EINVAL,
2998                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2999                                           "VLAN VID value is too big");
3000         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3001             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3002                 return rte_flow_error_set(error, ENOTSUP,
3003                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3004                                           "set VLAN VID action must follow push"
3005                                           " VLAN action or match on VLAN item");
3006         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3007                 return rte_flow_error_set(error, ENOTSUP,
3008                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3009                                           "Multiple VLAN VID modifications are "
3010                                           "not supported");
3011         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3012                 return rte_flow_error_set(error, EINVAL,
3013                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                           "wrong action order, port_id should "
3015                                           "be after set VLAN VID");
3016         return 0;
3017 }
3018
3019 /*
3020  * Validate the FLAG action.
3021  *
3022  * @param[in] dev
3023  *   Pointer to the rte_eth_dev structure.
3024  * @param[in] action_flags
3025  *   Holds the actions detected until now.
3026  * @param[in] attr
3027  *   Pointer to flow attributes
3028  * @param[out] error
3029  *   Pointer to error structure.
3030  *
3031  * @return
3032  *   0 on success, a negative errno value otherwise and rte_errno is set.
3033  */
3034 static int
3035 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3036                              uint64_t action_flags,
3037                              const struct rte_flow_attr *attr,
3038                              struct rte_flow_error *error)
3039 {
3040         struct mlx5_priv *priv = dev->data->dev_private;
3041         struct mlx5_dev_config *config = &priv->config;
3042         int ret;
3043
3044         /* Fall back if no extended metadata register support. */
3045         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3046                 return mlx5_flow_validate_action_flag(action_flags, attr,
3047                                                       error);
3048         /* Extensive metadata mode requires registers. */
3049         if (!mlx5_flow_ext_mreg_supported(dev))
3050                 return rte_flow_error_set(error, ENOTSUP,
3051                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3052                                           "no metadata registers "
3053                                           "to support flag action");
3054         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3055                 return rte_flow_error_set(error, ENOTSUP,
3056                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3057                                           "extended metadata register"
3058                                           " isn't available");
3059         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3060         if (ret < 0)
3061                 return ret;
3062         MLX5_ASSERT(ret > 0);
3063         if (action_flags & MLX5_FLOW_ACTION_MARK)
3064                 return rte_flow_error_set(error, EINVAL,
3065                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3066                                           "can't mark and flag in same flow");
3067         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3068                 return rte_flow_error_set(error, EINVAL,
3069                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3070                                           "can't have 2 flag"
3071                                           " actions in same flow");
3072         return 0;
3073 }
3074
3075 /**
3076  * Validate MARK action.
3077  *
3078  * @param[in] dev
3079  *   Pointer to the rte_eth_dev structure.
3080  * @param[in] action
3081  *   Pointer to action.
3082  * @param[in] action_flags
3083  *   Holds the actions detected until now.
3084  * @param[in] attr
3085  *   Pointer to flow attributes
3086  * @param[out] error
3087  *   Pointer to error structure.
3088  *
3089  * @return
3090  *   0 on success, a negative errno value otherwise and rte_errno is set.
3091  */
3092 static int
3093 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3094                              const struct rte_flow_action *action,
3095                              uint64_t action_flags,
3096                              const struct rte_flow_attr *attr,
3097                              struct rte_flow_error *error)
3098 {
3099         struct mlx5_priv *priv = dev->data->dev_private;
3100         struct mlx5_dev_config *config = &priv->config;
3101         const struct rte_flow_action_mark *mark = action->conf;
3102         int ret;
3103
3104         if (is_tunnel_offload_active(dev))
3105                 return rte_flow_error_set(error, ENOTSUP,
3106                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3107                                           "no mark action "
3108                                           "if tunnel offload active");
3109         /* Fall back if no extended metadata register support. */
3110         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3111                 return mlx5_flow_validate_action_mark(action, action_flags,
3112                                                       attr, error);
3113         /* Extensive metadata mode requires registers. */
3114         if (!mlx5_flow_ext_mreg_supported(dev))
3115                 return rte_flow_error_set(error, ENOTSUP,
3116                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3117                                           "no metadata registers "
3118                                           "to support mark action");
3119         if (!priv->sh->dv_mark_mask)
3120                 return rte_flow_error_set(error, ENOTSUP,
3121                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3122                                           "extended metadata register"
3123                                           " isn't available");
3124         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3125         if (ret < 0)
3126                 return ret;
3127         MLX5_ASSERT(ret > 0);
3128         if (!mark)
3129                 return rte_flow_error_set(error, EINVAL,
3130                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3131                                           "configuration cannot be null");
3132         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3133                 return rte_flow_error_set(error, EINVAL,
3134                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3135                                           &mark->id,
3136                                           "mark id exceeds the limit");
3137         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3138                 return rte_flow_error_set(error, EINVAL,
3139                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3140                                           "can't flag and mark in same flow");
3141         if (action_flags & MLX5_FLOW_ACTION_MARK)
3142                 return rte_flow_error_set(error, EINVAL,
3143                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3144                                           "can't have 2 mark actions in same"
3145                                           " flow");
3146         return 0;
3147 }
3148
3149 /**
3150  * Validate SET_META action.
3151  *
3152  * @param[in] dev
3153  *   Pointer to the rte_eth_dev structure.
3154  * @param[in] action
3155  *   Pointer to the action structure.
3156  * @param[in] action_flags
3157  *   Holds the actions detected until now.
3158  * @param[in] attr
3159  *   Pointer to flow attributes
3160  * @param[out] error
3161  *   Pointer to error structure.
3162  *
3163  * @return
3164  *   0 on success, a negative errno value otherwise and rte_errno is set.
3165  */
3166 static int
3167 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3168                                  const struct rte_flow_action *action,
3169                                  uint64_t action_flags __rte_unused,
3170                                  const struct rte_flow_attr *attr,
3171                                  struct rte_flow_error *error)
3172 {
3173         const struct rte_flow_action_set_meta *conf;
3174         uint32_t nic_mask = UINT32_MAX;
3175         int reg;
3176
3177         if (!mlx5_flow_ext_mreg_supported(dev))
3178                 return rte_flow_error_set(error, ENOTSUP,
3179                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3180                                           "extended metadata register"
3181                                           " isn't supported");
3182         reg = flow_dv_get_metadata_reg(dev, attr, error);
3183         if (reg < 0)
3184                 return reg;
3185         if (reg == REG_NON)
3186                 return rte_flow_error_set(error, ENOTSUP,
3187                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3188                                           "unavalable extended metadata register");
3189         if (reg != REG_A && reg != REG_B) {
3190                 struct mlx5_priv *priv = dev->data->dev_private;
3191
3192                 nic_mask = priv->sh->dv_meta_mask;
3193         }
3194         if (!(action->conf))
3195                 return rte_flow_error_set(error, EINVAL,
3196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3197                                           "configuration cannot be null");
3198         conf = (const struct rte_flow_action_set_meta *)action->conf;
3199         if (!conf->mask)
3200                 return rte_flow_error_set(error, EINVAL,
3201                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3202                                           "zero mask doesn't have any effect");
3203         if (conf->mask & ~nic_mask)
3204                 return rte_flow_error_set(error, EINVAL,
3205                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3206                                           "meta data must be within reg C0");
3207         return 0;
3208 }
3209
3210 /**
3211  * Validate SET_TAG action.
3212  *
3213  * @param[in] dev
3214  *   Pointer to the rte_eth_dev structure.
3215  * @param[in] action
3216  *   Pointer to the action structure.
3217  * @param[in] action_flags
3218  *   Holds the actions detected until now.
3219  * @param[in] attr
3220  *   Pointer to flow attributes
3221  * @param[out] error
3222  *   Pointer to error structure.
3223  *
3224  * @return
3225  *   0 on success, a negative errno value otherwise and rte_errno is set.
3226  */
3227 static int
3228 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3229                                 const struct rte_flow_action *action,
3230                                 uint64_t action_flags,
3231                                 const struct rte_flow_attr *attr,
3232                                 struct rte_flow_error *error)
3233 {
3234         const struct rte_flow_action_set_tag *conf;
3235         const uint64_t terminal_action_flags =
3236                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3237                 MLX5_FLOW_ACTION_RSS;
3238         int ret;
3239
3240         if (!mlx5_flow_ext_mreg_supported(dev))
3241                 return rte_flow_error_set(error, ENOTSUP,
3242                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3243                                           "extensive metadata register"
3244                                           " isn't supported");
3245         if (!(action->conf))
3246                 return rte_flow_error_set(error, EINVAL,
3247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3248                                           "configuration cannot be null");
3249         conf = (const struct rte_flow_action_set_tag *)action->conf;
3250         if (!conf->mask)
3251                 return rte_flow_error_set(error, EINVAL,
3252                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3253                                           "zero mask doesn't have any effect");
3254         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3255         if (ret < 0)
3256                 return ret;
3257         if (!attr->transfer && attr->ingress &&
3258             (action_flags & terminal_action_flags))
3259                 return rte_flow_error_set(error, EINVAL,
3260                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3261                                           "set_tag has no effect"
3262                                           " with terminal actions");
3263         return 0;
3264 }
3265
3266 /**
3267  * Validate count action.
3268  *
3269  * @param[in] dev
3270  *   Pointer to rte_eth_dev structure.
3271  * @param[in] shared
3272  *   Indicator if action is shared.
3273  * @param[in] action_flags
3274  *   Holds the actions detected until now.
3275  * @param[out] error
3276  *   Pointer to error structure.
3277  *
3278  * @return
3279  *   0 on success, a negative errno value otherwise and rte_errno is set.
3280  */
3281 static int
3282 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3283                               uint64_t action_flags,
3284                               struct rte_flow_error *error)
3285 {
3286         struct mlx5_priv *priv = dev->data->dev_private;
3287
3288         if (!priv->sh->devx)
3289                 goto notsup_err;
3290         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3291                 return rte_flow_error_set(error, EINVAL,
3292                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3293                                           "duplicate count actions set");
3294         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3295             !priv->sh->flow_hit_aso_en)
3296                 return rte_flow_error_set(error, EINVAL,
3297                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3298                                           "old age and shared count combination is not supported");
3299 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3300         return 0;
3301 #endif
3302 notsup_err:
3303         return rte_flow_error_set
3304                       (error, ENOTSUP,
3305                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3306                        NULL,
3307                        "count action not supported");
3308 }
3309
3310 /**
3311  * Validate the L2 encap action.
3312  *
3313  * @param[in] dev
3314  *   Pointer to the rte_eth_dev structure.
3315  * @param[in] action_flags
3316  *   Holds the actions detected until now.
3317  * @param[in] action
3318  *   Pointer to the action structure.
3319  * @param[in] attr
3320  *   Pointer to flow attributes.
3321  * @param[out] error
3322  *   Pointer to error structure.
3323  *
3324  * @return
3325  *   0 on success, a negative errno value otherwise and rte_errno is set.
3326  */
3327 static int
3328 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3329                                  uint64_t action_flags,
3330                                  const struct rte_flow_action *action,
3331                                  const struct rte_flow_attr *attr,
3332                                  struct rte_flow_error *error)
3333 {
3334         const struct mlx5_priv *priv = dev->data->dev_private;
3335
3336         if (!(action->conf))
3337                 return rte_flow_error_set(error, EINVAL,
3338                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3339                                           "configuration cannot be null");
3340         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3341                 return rte_flow_error_set(error, EINVAL,
3342                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3343                                           "can only have a single encap action "
3344                                           "in a flow");
3345         if (!attr->transfer && priv->representor)
3346                 return rte_flow_error_set(error, ENOTSUP,
3347                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3348                                           "encap action for VF representor "
3349                                           "not supported on NIC table");
3350         return 0;
3351 }
3352
3353 /**
3354  * Validate a decap action.
3355  *
3356  * @param[in] dev
3357  *   Pointer to the rte_eth_dev structure.
3358  * @param[in] action_flags
3359  *   Holds the actions detected until now.
3360  * @param[in] action
3361  *   Pointer to the action structure.
3362  * @param[in] item_flags
3363  *   Holds the items detected.
3364  * @param[in] attr
3365  *   Pointer to flow attributes
3366  * @param[out] error
3367  *   Pointer to error structure.
3368  *
3369  * @return
3370  *   0 on success, a negative errno value otherwise and rte_errno is set.
3371  */
3372 static int
3373 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3374                               uint64_t action_flags,
3375                               const struct rte_flow_action *action,
3376                               const uint64_t item_flags,
3377                               const struct rte_flow_attr *attr,
3378                               struct rte_flow_error *error)
3379 {
3380         const struct mlx5_priv *priv = dev->data->dev_private;
3381
3382         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3383             !priv->config.decap_en)
3384                 return rte_flow_error_set(error, ENOTSUP,
3385                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3386                                           "decap is not enabled");
3387         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3388                 return rte_flow_error_set(error, ENOTSUP,
3389                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3390                                           action_flags &
3391                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3392                                           "have a single decap action" : "decap "
3393                                           "after encap is not supported");
3394         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3395                 return rte_flow_error_set(error, EINVAL,
3396                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3397                                           "can't have decap action after"
3398                                           " modify action");
3399         if (attr->egress)
3400                 return rte_flow_error_set(error, ENOTSUP,
3401                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3402                                           NULL,
3403                                           "decap action not supported for "
3404                                           "egress");
3405         if (!attr->transfer && priv->representor)
3406                 return rte_flow_error_set(error, ENOTSUP,
3407                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3408                                           "decap action for VF representor "
3409                                           "not supported on NIC table");
3410         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3411             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3412                 return rte_flow_error_set(error, ENOTSUP,
3413                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3414                                 "VXLAN item should be present for VXLAN decap");
3415         return 0;
3416 }
3417
3418 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3419
3420 /**
3421  * Validate the raw encap and decap actions.
3422  *
3423  * @param[in] dev
3424  *   Pointer to the rte_eth_dev structure.
3425  * @param[in] decap
3426  *   Pointer to the decap action.
3427  * @param[in] encap
3428  *   Pointer to the encap action.
3429  * @param[in] attr
3430  *   Pointer to flow attributes
3431  * @param[in/out] action_flags
3432  *   Holds the actions detected until now.
3433  * @param[out] actions_n
3434  *   pointer to the number of actions counter.
3435  * @param[in] action
3436  *   Pointer to the action structure.
3437  * @param[in] item_flags
3438  *   Holds the items detected.
3439  * @param[out] error
3440  *   Pointer to error structure.
3441  *
3442  * @return
3443  *   0 on success, a negative errno value otherwise and rte_errno is set.
3444  */
3445 static int
3446 flow_dv_validate_action_raw_encap_decap
3447         (struct rte_eth_dev *dev,
3448          const struct rte_flow_action_raw_decap *decap,
3449          const struct rte_flow_action_raw_encap *encap,
3450          const struct rte_flow_attr *attr, uint64_t *action_flags,
3451          int *actions_n, const struct rte_flow_action *action,
3452          uint64_t item_flags, struct rte_flow_error *error)
3453 {
3454         const struct mlx5_priv *priv = dev->data->dev_private;
3455         int ret;
3456
3457         if (encap && (!encap->size || !encap->data))
3458                 return rte_flow_error_set(error, EINVAL,
3459                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3460                                           "raw encap data cannot be empty");
3461         if (decap && encap) {
3462                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3463                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3464                         /* L3 encap. */
3465                         decap = NULL;
3466                 else if (encap->size <=
3467                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3468                            decap->size >
3469                            MLX5_ENCAPSULATION_DECISION_SIZE)
3470                         /* L3 decap. */
3471                         encap = NULL;
3472                 else if (encap->size >
3473                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3474                            decap->size >
3475                            MLX5_ENCAPSULATION_DECISION_SIZE)
3476                         /* 2 L2 actions: encap and decap. */
3477                         ;
3478                 else
3479                         return rte_flow_error_set(error,
3480                                 ENOTSUP,
3481                                 RTE_FLOW_ERROR_TYPE_ACTION,
3482                                 NULL, "unsupported too small "
3483                                 "raw decap and too small raw "
3484                                 "encap combination");
3485         }
3486         if (decap) {
3487                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3488                                                     item_flags, attr, error);
3489                 if (ret < 0)
3490                         return ret;
3491                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3492                 ++(*actions_n);
3493         }
3494         if (encap) {
3495                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3496                         return rte_flow_error_set(error, ENOTSUP,
3497                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3498                                                   NULL,
3499                                                   "small raw encap size");
3500                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3501                         return rte_flow_error_set(error, EINVAL,
3502                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3503                                                   NULL,
3504                                                   "more than one encap action");
3505                 if (!attr->transfer && priv->representor)
3506                         return rte_flow_error_set
3507                                         (error, ENOTSUP,
3508                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3509                                          "encap action for VF representor "
3510                                          "not supported on NIC table");
3511                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3512                 ++(*actions_n);
3513         }
3514         return 0;
3515 }
3516
3517 /*
3518  * Validate the ASO CT action.
3519  *
3520  * @param[in] dev
3521  *   Pointer to the rte_eth_dev structure.
3522  * @param[in] action_flags
3523  *   Holds the actions detected until now.
3524  * @param[in] item_flags
3525  *   The items found in this flow rule.
3526  * @param[in] attr
3527  *   Pointer to flow attributes.
3528  * @param[out] error
3529  *   Pointer to error structure.
3530  *
3531  * @return
3532  *   0 on success, a negative errno value otherwise and rte_errno is set.
3533  */
3534 static int
3535 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3536                                uint64_t action_flags,
3537                                uint64_t item_flags,
3538                                const struct rte_flow_attr *attr,
3539                                struct rte_flow_error *error)
3540 {
3541         RTE_SET_USED(dev);
3542
3543         if (attr->group == 0 && !attr->transfer)
3544                 return rte_flow_error_set(error, ENOTSUP,
3545                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3546                                           NULL,
3547                                           "Only support non-root table");
3548         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3549                 return rte_flow_error_set(error, ENOTSUP,
3550                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3551                                           "CT cannot follow a fate action");
3552         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3553             (action_flags & MLX5_FLOW_ACTION_AGE))
3554                 return rte_flow_error_set(error, EINVAL,
3555                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3556                                           "Only one ASO action is supported");
3557         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3558                 return rte_flow_error_set(error, EINVAL,
3559                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3560                                           "Encap cannot exist before CT");
3561         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3562                 return rte_flow_error_set(error, EINVAL,
3563                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3564                                           "Not a outer TCP packet");
3565         return 0;
3566 }
3567
3568 int
3569 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3570                              struct mlx5_list_entry *entry, void *cb_ctx)
3571 {
3572         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3573         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3574         struct mlx5_flow_dv_encap_decap_resource *resource;
3575
3576         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3577                                 entry);
3578         if (resource->reformat_type == ctx_resource->reformat_type &&
3579             resource->ft_type == ctx_resource->ft_type &&
3580             resource->flags == ctx_resource->flags &&
3581             resource->size == ctx_resource->size &&
3582             !memcmp((const void *)resource->buf,
3583                     (const void *)ctx_resource->buf,
3584                     resource->size))
3585                 return 0;
3586         return -1;
3587 }
3588
3589 struct mlx5_list_entry *
3590 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3591 {
3592         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3593         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3594         struct mlx5dv_dr_domain *domain;
3595         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3596         struct mlx5_flow_dv_encap_decap_resource *resource;
3597         uint32_t idx;
3598         int ret;
3599
3600         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3601                 domain = sh->fdb_domain;
3602         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3603                 domain = sh->rx_domain;
3604         else
3605                 domain = sh->tx_domain;
3606         /* Register new encap/decap resource. */
3607         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3608         if (!resource) {
3609                 rte_flow_error_set(ctx->error, ENOMEM,
3610                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3611                                    "cannot allocate resource memory");
3612                 return NULL;
3613         }
3614         *resource = *ctx_resource;
3615         resource->idx = idx;
3616         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3617                                                               domain, resource,
3618                                                              &resource->action);
3619         if (ret) {
3620                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3621                 rte_flow_error_set(ctx->error, ENOMEM,
3622                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3623                                    NULL, "cannot create action");
3624                 return NULL;
3625         }
3626
3627         return &resource->entry;
3628 }
3629
3630 struct mlx5_list_entry *
3631 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3632                              void *cb_ctx)
3633 {
3634         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3635         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3636         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3637         uint32_t idx;
3638
3639         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3640                                            &idx);
3641         if (!cache_resource) {
3642                 rte_flow_error_set(ctx->error, ENOMEM,
3643                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3644                                    "cannot allocate resource memory");
3645                 return NULL;
3646         }
3647         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3648         cache_resource->idx = idx;
3649         return &cache_resource->entry;
3650 }
3651
3652 void
3653 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3654 {
3655         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3656         struct mlx5_flow_dv_encap_decap_resource *res =
3657                                        container_of(entry, typeof(*res), entry);
3658
3659         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3660 }
3661
3662 /**
3663  * Find existing encap/decap resource or create and register a new one.
3664  *
3665  * @param[in, out] dev
3666  *   Pointer to rte_eth_dev structure.
3667  * @param[in, out] resource
3668  *   Pointer to encap/decap resource.
3669  * @parm[in, out] dev_flow
3670  *   Pointer to the dev_flow.
3671  * @param[out] error
3672  *   pointer to error structure.
3673  *
3674  * @return
3675  *   0 on success otherwise -errno and errno is set.
3676  */
3677 static int
3678 flow_dv_encap_decap_resource_register
3679                         (struct rte_eth_dev *dev,
3680                          struct mlx5_flow_dv_encap_decap_resource *resource,
3681                          struct mlx5_flow *dev_flow,
3682                          struct rte_flow_error *error)
3683 {
3684         struct mlx5_priv *priv = dev->data->dev_private;
3685         struct mlx5_dev_ctx_shared *sh = priv->sh;
3686         struct mlx5_list_entry *entry;
3687         union {
3688                 struct {
3689                         uint32_t ft_type:8;
3690                         uint32_t refmt_type:8;
3691                         /*
3692                          * Header reformat actions can be shared between
3693                          * non-root tables. One bit to indicate non-root
3694                          * table or not.
3695                          */
3696                         uint32_t is_root:1;
3697                         uint32_t reserve:15;
3698                 };
3699                 uint32_t v32;
3700         } encap_decap_key = {
3701                 {
3702                         .ft_type = resource->ft_type,
3703                         .refmt_type = resource->reformat_type,
3704                         .is_root = !!dev_flow->dv.group,
3705                         .reserve = 0,
3706                 }
3707         };
3708         struct mlx5_flow_cb_ctx ctx = {
3709                 .error = error,
3710                 .data = resource,
3711         };
3712         struct mlx5_hlist *encaps_decaps;
3713         uint64_t key64;
3714
3715         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3716                                 "encaps_decaps",
3717                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3718                                 true, true, sh,
3719                                 flow_dv_encap_decap_create_cb,
3720                                 flow_dv_encap_decap_match_cb,
3721                                 flow_dv_encap_decap_remove_cb,
3722                                 flow_dv_encap_decap_clone_cb,
3723                                 flow_dv_encap_decap_clone_free_cb);
3724         if (unlikely(!encaps_decaps))
3725                 return -rte_errno;
3726         resource->flags = dev_flow->dv.group ? 0 : 1;
3727         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3728                                  sizeof(encap_decap_key.v32), 0);
3729         if (resource->reformat_type !=
3730             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3731             resource->size)
3732                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3733         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3734         if (!entry)
3735                 return -rte_errno;
3736         resource = container_of(entry, typeof(*resource), entry);
3737         dev_flow->dv.encap_decap = resource;
3738         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3739         return 0;
3740 }
3741
3742 /**
3743  * Find existing table jump resource or create and register a new one.
3744  *
3745  * @param[in, out] dev
3746  *   Pointer to rte_eth_dev structure.
3747  * @param[in, out] tbl
3748  *   Pointer to flow table resource.
3749  * @parm[in, out] dev_flow
3750  *   Pointer to the dev_flow.
3751  * @param[out] error
3752  *   pointer to error structure.
3753  *
3754  * @return
3755  *   0 on success otherwise -errno and errno is set.
3756  */
3757 static int
3758 flow_dv_jump_tbl_resource_register
3759                         (struct rte_eth_dev *dev __rte_unused,
3760                          struct mlx5_flow_tbl_resource *tbl,
3761                          struct mlx5_flow *dev_flow,
3762                          struct rte_flow_error *error __rte_unused)
3763 {
3764         struct mlx5_flow_tbl_data_entry *tbl_data =
3765                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3766
3767         MLX5_ASSERT(tbl);
3768         MLX5_ASSERT(tbl_data->jump.action);
3769         dev_flow->handle->rix_jump = tbl_data->idx;
3770         dev_flow->dv.jump = &tbl_data->jump;
3771         return 0;
3772 }
3773
3774 int
3775 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3776                          struct mlx5_list_entry *entry, void *cb_ctx)
3777 {
3778         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3779         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3780         struct mlx5_flow_dv_port_id_action_resource *res =
3781                                        container_of(entry, typeof(*res), entry);
3782
3783         return ref->port_id != res->port_id;
3784 }
3785
3786 struct mlx5_list_entry *
3787 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3788 {
3789         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3790         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3791         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3792         struct mlx5_flow_dv_port_id_action_resource *resource;
3793         uint32_t idx;
3794         int ret;
3795
3796         /* Register new port id action resource. */
3797         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3798         if (!resource) {
3799                 rte_flow_error_set(ctx->error, ENOMEM,
3800                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3801                                    "cannot allocate port_id action memory");
3802                 return NULL;
3803         }
3804         *resource = *ref;
3805         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3806                                                         ref->port_id,
3807                                                         &resource->action);
3808         if (ret) {
3809                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3810                 rte_flow_error_set(ctx->error, ENOMEM,
3811                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3812                                    "cannot create action");
3813                 return NULL;
3814         }
3815         resource->idx = idx;
3816         return &resource->entry;
3817 }
3818
3819 struct mlx5_list_entry *
3820 flow_dv_port_id_clone_cb(void *tool_ctx,
3821                          struct mlx5_list_entry *entry __rte_unused,
3822                          void *cb_ctx)
3823 {
3824         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3825         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3826         struct mlx5_flow_dv_port_id_action_resource *resource;
3827         uint32_t idx;
3828
3829         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3830         if (!resource) {
3831                 rte_flow_error_set(ctx->error, ENOMEM,
3832                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3833                                    "cannot allocate port_id action memory");
3834                 return NULL;
3835         }
3836         memcpy(resource, entry, sizeof(*resource));
3837         resource->idx = idx;
3838         return &resource->entry;
3839 }
3840
3841 void
3842 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3843 {
3844         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3845         struct mlx5_flow_dv_port_id_action_resource *resource =
3846                                   container_of(entry, typeof(*resource), entry);
3847
3848         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3849 }
3850
3851 /**
3852  * Find existing table port ID resource or create and register a new one.
3853  *
3854  * @param[in, out] dev
3855  *   Pointer to rte_eth_dev structure.
3856  * @param[in, out] ref
3857  *   Pointer to port ID action resource reference.
3858  * @parm[in, out] dev_flow
3859  *   Pointer to the dev_flow.
3860  * @param[out] error
3861  *   pointer to error structure.
3862  *
3863  * @return
3864  *   0 on success otherwise -errno and errno is set.
3865  */
3866 static int
3867 flow_dv_port_id_action_resource_register
3868                         (struct rte_eth_dev *dev,
3869                          struct mlx5_flow_dv_port_id_action_resource *ref,
3870                          struct mlx5_flow *dev_flow,
3871                          struct rte_flow_error *error)
3872 {
3873         struct mlx5_priv *priv = dev->data->dev_private;
3874         struct mlx5_list_entry *entry;
3875         struct mlx5_flow_dv_port_id_action_resource *resource;
3876         struct mlx5_flow_cb_ctx ctx = {
3877                 .error = error,
3878                 .data = ref,
3879         };
3880
3881         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3882         if (!entry)
3883                 return -rte_errno;
3884         resource = container_of(entry, typeof(*resource), entry);
3885         dev_flow->dv.port_id_action = resource;
3886         dev_flow->handle->rix_port_id_action = resource->idx;
3887         return 0;
3888 }
3889
3890 int
3891 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3892                            struct mlx5_list_entry *entry, void *cb_ctx)
3893 {
3894         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3895         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3896         struct mlx5_flow_dv_push_vlan_action_resource *res =
3897                                        container_of(entry, typeof(*res), entry);
3898
3899         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3900 }
3901
3902 struct mlx5_list_entry *
3903 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3904 {
3905         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3906         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3907         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3908         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3909         struct mlx5dv_dr_domain *domain;
3910         uint32_t idx;
3911         int ret;
3912
3913         /* Register new port id action resource. */
3914         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3915         if (!resource) {
3916                 rte_flow_error_set(ctx->error, ENOMEM,
3917                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3918                                    "cannot allocate push_vlan action memory");
3919                 return NULL;
3920         }
3921         *resource = *ref;
3922         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3923                 domain = sh->fdb_domain;
3924         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3925                 domain = sh->rx_domain;
3926         else
3927                 domain = sh->tx_domain;
3928         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3929                                                         &resource->action);
3930         if (ret) {
3931                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3932                 rte_flow_error_set(ctx->error, ENOMEM,
3933                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3934                                    "cannot create push vlan action");
3935                 return NULL;
3936         }
3937         resource->idx = idx;
3938         return &resource->entry;
3939 }
3940
3941 struct mlx5_list_entry *
3942 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3943                            struct mlx5_list_entry *entry __rte_unused,
3944                            void *cb_ctx)
3945 {
3946         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3947         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3948         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3949         uint32_t idx;
3950
3951         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3952         if (!resource) {
3953                 rte_flow_error_set(ctx->error, ENOMEM,
3954                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3955                                    "cannot allocate push_vlan action memory");
3956                 return NULL;
3957         }
3958         memcpy(resource, entry, sizeof(*resource));
3959         resource->idx = idx;
3960         return &resource->entry;
3961 }
3962
3963 void
3964 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3965 {
3966         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3967         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3968                                   container_of(entry, typeof(*resource), entry);
3969
3970         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3971 }
3972
3973 /**
3974  * Find existing push vlan resource or create and register a new one.
3975  *
3976  * @param [in, out] dev
3977  *   Pointer to rte_eth_dev structure.
3978  * @param[in, out] ref
3979  *   Pointer to port ID action resource reference.
3980  * @parm[in, out] dev_flow
3981  *   Pointer to the dev_flow.
3982  * @param[out] error
3983  *   pointer to error structure.
3984  *
3985  * @return
3986  *   0 on success otherwise -errno and errno is set.
3987  */
3988 static int
3989 flow_dv_push_vlan_action_resource_register
3990                        (struct rte_eth_dev *dev,
3991                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
3992                         struct mlx5_flow *dev_flow,
3993                         struct rte_flow_error *error)
3994 {
3995         struct mlx5_priv *priv = dev->data->dev_private;
3996         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3997         struct mlx5_list_entry *entry;
3998         struct mlx5_flow_cb_ctx ctx = {
3999                 .error = error,
4000                 .data = ref,
4001         };
4002
4003         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4004         if (!entry)
4005                 return -rte_errno;
4006         resource = container_of(entry, typeof(*resource), entry);
4007
4008         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4009         dev_flow->dv.push_vlan_res = resource;
4010         return 0;
4011 }
4012
4013 /**
4014  * Get the size of specific rte_flow_item_type hdr size
4015  *
4016  * @param[in] item_type
4017  *   Tested rte_flow_item_type.
4018  *
4019  * @return
4020  *   sizeof struct item_type, 0 if void or irrelevant.
4021  */
4022 static size_t
4023 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4024 {
4025         size_t retval;
4026
4027         switch (item_type) {
4028         case RTE_FLOW_ITEM_TYPE_ETH:
4029                 retval = sizeof(struct rte_ether_hdr);
4030                 break;
4031         case RTE_FLOW_ITEM_TYPE_VLAN:
4032                 retval = sizeof(struct rte_vlan_hdr);
4033                 break;
4034         case RTE_FLOW_ITEM_TYPE_IPV4:
4035                 retval = sizeof(struct rte_ipv4_hdr);
4036                 break;
4037         case RTE_FLOW_ITEM_TYPE_IPV6:
4038                 retval = sizeof(struct rte_ipv6_hdr);
4039                 break;
4040         case RTE_FLOW_ITEM_TYPE_UDP:
4041                 retval = sizeof(struct rte_udp_hdr);
4042                 break;
4043         case RTE_FLOW_ITEM_TYPE_TCP:
4044                 retval = sizeof(struct rte_tcp_hdr);
4045                 break;
4046         case RTE_FLOW_ITEM_TYPE_VXLAN:
4047         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4048                 retval = sizeof(struct rte_vxlan_hdr);
4049                 break;
4050         case RTE_FLOW_ITEM_TYPE_GRE:
4051         case RTE_FLOW_ITEM_TYPE_NVGRE:
4052                 retval = sizeof(struct rte_gre_hdr);
4053                 break;
4054         case RTE_FLOW_ITEM_TYPE_MPLS:
4055                 retval = sizeof(struct rte_mpls_hdr);
4056                 break;
4057         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4058         default:
4059                 retval = 0;
4060                 break;
4061         }
4062         return retval;
4063 }
4064
4065 #define MLX5_ENCAP_IPV4_VERSION         0x40
4066 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4067 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4068 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4069 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4070 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4071 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4072
4073 /**
4074  * Convert the encap action data from list of rte_flow_item to raw buffer
4075  *
4076  * @param[in] items
4077  *   Pointer to rte_flow_item objects list.
4078  * @param[out] buf
4079  *   Pointer to the output buffer.
4080  * @param[out] size
4081  *   Pointer to the output buffer size.
4082  * @param[out] error
4083  *   Pointer to the error structure.
4084  *
4085  * @return
4086  *   0 on success, a negative errno value otherwise and rte_errno is set.
4087  */
4088 static int
4089 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4090                            size_t *size, struct rte_flow_error *error)
4091 {
4092         struct rte_ether_hdr *eth = NULL;
4093         struct rte_vlan_hdr *vlan = NULL;
4094         struct rte_ipv4_hdr *ipv4 = NULL;
4095         struct rte_ipv6_hdr *ipv6 = NULL;
4096         struct rte_udp_hdr *udp = NULL;
4097         struct rte_vxlan_hdr *vxlan = NULL;
4098         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4099         struct rte_gre_hdr *gre = NULL;
4100         size_t len;
4101         size_t temp_size = 0;
4102
4103         if (!items)
4104                 return rte_flow_error_set(error, EINVAL,
4105                                           RTE_FLOW_ERROR_TYPE_ACTION,
4106                                           NULL, "invalid empty data");
4107         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4108                 len = flow_dv_get_item_hdr_len(items->type);
4109                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4110                         return rte_flow_error_set(error, EINVAL,
4111                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4112                                                   (void *)items->type,
4113                                                   "items total size is too big"
4114                                                   " for encap action");
4115                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4116                 switch (items->type) {
4117                 case RTE_FLOW_ITEM_TYPE_ETH:
4118                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4119                         break;
4120                 case RTE_FLOW_ITEM_TYPE_VLAN:
4121                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4122                         if (!eth)
4123                                 return rte_flow_error_set(error, EINVAL,
4124                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4125                                                 (void *)items->type,
4126                                                 "eth header not found");
4127                         if (!eth->ether_type)
4128                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4129                         break;
4130                 case RTE_FLOW_ITEM_TYPE_IPV4:
4131                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4132                         if (!vlan && !eth)
4133                                 return rte_flow_error_set(error, EINVAL,
4134                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4135                                                 (void *)items->type,
4136                                                 "neither eth nor vlan"
4137                                                 " header found");
4138                         if (vlan && !vlan->eth_proto)
4139                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4140                         else if (eth && !eth->ether_type)
4141                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4142                         if (!ipv4->version_ihl)
4143                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4144                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4145                         if (!ipv4->time_to_live)
4146                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4147                         break;
4148                 case RTE_FLOW_ITEM_TYPE_IPV6:
4149                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4150                         if (!vlan && !eth)
4151                                 return rte_flow_error_set(error, EINVAL,
4152                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4153                                                 (void *)items->type,
4154                                                 "neither eth nor vlan"
4155                                                 " header found");
4156                         if (vlan && !vlan->eth_proto)
4157                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4158                         else if (eth && !eth->ether_type)
4159                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4160                         if (!ipv6->vtc_flow)
4161                                 ipv6->vtc_flow =
4162                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4163                         if (!ipv6->hop_limits)
4164                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4165                         break;
4166                 case RTE_FLOW_ITEM_TYPE_UDP:
4167                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4168                         if (!ipv4 && !ipv6)
4169                                 return rte_flow_error_set(error, EINVAL,
4170                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4171                                                 (void *)items->type,
4172                                                 "ip header not found");
4173                         if (ipv4 && !ipv4->next_proto_id)
4174                                 ipv4->next_proto_id = IPPROTO_UDP;
4175                         else if (ipv6 && !ipv6->proto)
4176                                 ipv6->proto = IPPROTO_UDP;
4177                         break;
4178                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4179                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4180                         if (!udp)
4181                                 return rte_flow_error_set(error, EINVAL,
4182                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4183                                                 (void *)items->type,
4184                                                 "udp header not found");
4185                         if (!udp->dst_port)
4186                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4187                         if (!vxlan->vx_flags)
4188                                 vxlan->vx_flags =
4189                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4190                         break;
4191                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4192                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4193                         if (!udp)
4194                                 return rte_flow_error_set(error, EINVAL,
4195                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4196                                                 (void *)items->type,
4197                                                 "udp header not found");
4198                         if (!vxlan_gpe->proto)
4199                                 return rte_flow_error_set(error, EINVAL,
4200                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4201                                                 (void *)items->type,
4202                                                 "next protocol not found");
4203                         if (!udp->dst_port)
4204                                 udp->dst_port =
4205                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4206                         if (!vxlan_gpe->vx_flags)
4207                                 vxlan_gpe->vx_flags =
4208                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4209                         break;
4210                 case RTE_FLOW_ITEM_TYPE_GRE:
4211                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4212                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4213                         if (!gre->proto)
4214                                 return rte_flow_error_set(error, EINVAL,
4215                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4216                                                 (void *)items->type,
4217                                                 "next protocol not found");
4218                         if (!ipv4 && !ipv6)
4219                                 return rte_flow_error_set(error, EINVAL,
4220                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4221                                                 (void *)items->type,
4222                                                 "ip header not found");
4223                         if (ipv4 && !ipv4->next_proto_id)
4224                                 ipv4->next_proto_id = IPPROTO_GRE;
4225                         else if (ipv6 && !ipv6->proto)
4226                                 ipv6->proto = IPPROTO_GRE;
4227                         break;
4228                 case RTE_FLOW_ITEM_TYPE_VOID:
4229                         break;
4230                 default:
4231                         return rte_flow_error_set(error, EINVAL,
4232                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4233                                                   (void *)items->type,
4234                                                   "unsupported item type");
4235                         break;
4236                 }
4237                 temp_size += len;
4238         }
4239         *size = temp_size;
4240         return 0;
4241 }
4242
4243 static int
4244 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4245 {
4246         struct rte_ether_hdr *eth = NULL;
4247         struct rte_vlan_hdr *vlan = NULL;
4248         struct rte_ipv6_hdr *ipv6 = NULL;
4249         struct rte_udp_hdr *udp = NULL;
4250         char *next_hdr;
4251         uint16_t proto;
4252
4253         eth = (struct rte_ether_hdr *)data;
4254         next_hdr = (char *)(eth + 1);
4255         proto = RTE_BE16(eth->ether_type);
4256
4257         /* VLAN skipping */
4258         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4259                 vlan = (struct rte_vlan_hdr *)next_hdr;
4260                 proto = RTE_BE16(vlan->eth_proto);
4261                 next_hdr += sizeof(struct rte_vlan_hdr);
4262         }
4263
4264         /* HW calculates IPv4 csum. no need to proceed */
4265         if (proto == RTE_ETHER_TYPE_IPV4)
4266                 return 0;
4267
4268         /* non IPv4/IPv6 header. not supported */
4269         if (proto != RTE_ETHER_TYPE_IPV6) {
4270                 return rte_flow_error_set(error, ENOTSUP,
4271                                           RTE_FLOW_ERROR_TYPE_ACTION,
4272                                           NULL, "Cannot offload non IPv4/IPv6");
4273         }
4274
4275         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4276
4277         /* ignore non UDP */
4278         if (ipv6->proto != IPPROTO_UDP)
4279                 return 0;
4280
4281         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4282         udp->dgram_cksum = 0;
4283
4284         return 0;
4285 }
4286
4287 /**
4288  * Convert L2 encap action to DV specification.
4289  *
4290  * @param[in] dev
4291  *   Pointer to rte_eth_dev structure.
4292  * @param[in] action
4293  *   Pointer to action structure.
4294  * @param[in, out] dev_flow
4295  *   Pointer to the mlx5_flow.
4296  * @param[in] transfer
4297  *   Mark if the flow is E-Switch flow.
4298  * @param[out] error
4299  *   Pointer to the error structure.
4300  *
4301  * @return
4302  *   0 on success, a negative errno value otherwise and rte_errno is set.
4303  */
4304 static int
4305 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4306                                const struct rte_flow_action *action,
4307                                struct mlx5_flow *dev_flow,
4308                                uint8_t transfer,
4309                                struct rte_flow_error *error)
4310 {
4311         const struct rte_flow_item *encap_data;
4312         const struct rte_flow_action_raw_encap *raw_encap_data;
4313         struct mlx5_flow_dv_encap_decap_resource res = {
4314                 .reformat_type =
4315                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4316                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4317                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4318         };
4319
4320         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4321                 raw_encap_data =
4322                         (const struct rte_flow_action_raw_encap *)action->conf;
4323                 res.size = raw_encap_data->size;
4324                 memcpy(res.buf, raw_encap_data->data, res.size);
4325         } else {
4326                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4327                         encap_data =
4328                                 ((const struct rte_flow_action_vxlan_encap *)
4329                                                 action->conf)->definition;
4330                 else
4331                         encap_data =
4332                                 ((const struct rte_flow_action_nvgre_encap *)
4333                                                 action->conf)->definition;
4334                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4335                                                &res.size, error))
4336                         return -rte_errno;
4337         }
4338         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4339                 return -rte_errno;
4340         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4341                 return rte_flow_error_set(error, EINVAL,
4342                                           RTE_FLOW_ERROR_TYPE_ACTION,
4343                                           NULL, "can't create L2 encap action");
4344         return 0;
4345 }
4346
4347 /**
4348  * Convert L2 decap action to DV specification.
4349  *
4350  * @param[in] dev
4351  *   Pointer to rte_eth_dev structure.
4352  * @param[in, out] dev_flow
4353  *   Pointer to the mlx5_flow.
4354  * @param[in] transfer
4355  *   Mark if the flow is E-Switch flow.
4356  * @param[out] error
4357  *   Pointer to the error structure.
4358  *
4359  * @return
4360  *   0 on success, a negative errno value otherwise and rte_errno is set.
4361  */
4362 static int
4363 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4364                                struct mlx5_flow *dev_flow,
4365                                uint8_t transfer,
4366                                struct rte_flow_error *error)
4367 {
4368         struct mlx5_flow_dv_encap_decap_resource res = {
4369                 .size = 0,
4370                 .reformat_type =
4371                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4372                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4373                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4374         };
4375
4376         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4377                 return rte_flow_error_set(error, EINVAL,
4378                                           RTE_FLOW_ERROR_TYPE_ACTION,
4379                                           NULL, "can't create L2 decap action");
4380         return 0;
4381 }
4382
4383 /**
4384  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4385  *
4386  * @param[in] dev
4387  *   Pointer to rte_eth_dev structure.
4388  * @param[in] action
4389  *   Pointer to action structure.
4390  * @param[in, out] dev_flow
4391  *   Pointer to the mlx5_flow.
4392  * @param[in] attr
4393  *   Pointer to the flow attributes.
4394  * @param[out] error
4395  *   Pointer to the error structure.
4396  *
4397  * @return
4398  *   0 on success, a negative errno value otherwise and rte_errno is set.
4399  */
4400 static int
4401 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4402                                 const struct rte_flow_action *action,
4403                                 struct mlx5_flow *dev_flow,
4404                                 const struct rte_flow_attr *attr,
4405                                 struct rte_flow_error *error)
4406 {
4407         const struct rte_flow_action_raw_encap *encap_data;
4408         struct mlx5_flow_dv_encap_decap_resource res;
4409
4410         memset(&res, 0, sizeof(res));
4411         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4412         res.size = encap_data->size;
4413         memcpy(res.buf, encap_data->data, res.size);
4414         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4415                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4416                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4417         if (attr->transfer)
4418                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4419         else
4420                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4421                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4422         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4423                 return rte_flow_error_set(error, EINVAL,
4424                                           RTE_FLOW_ERROR_TYPE_ACTION,
4425                                           NULL, "can't create encap action");
4426         return 0;
4427 }
4428
4429 /**
4430  * Create action push VLAN.
4431  *
4432  * @param[in] dev
4433  *   Pointer to rte_eth_dev structure.
4434  * @param[in] attr
4435  *   Pointer to the flow attributes.
4436  * @param[in] vlan
4437  *   Pointer to the vlan to push to the Ethernet header.
4438  * @param[in, out] dev_flow
4439  *   Pointer to the mlx5_flow.
4440  * @param[out] error
4441  *   Pointer to the error structure.
4442  *
4443  * @return
4444  *   0 on success, a negative errno value otherwise and rte_errno is set.
4445  */
4446 static int
4447 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4448                                 const struct rte_flow_attr *attr,
4449                                 const struct rte_vlan_hdr *vlan,
4450                                 struct mlx5_flow *dev_flow,
4451                                 struct rte_flow_error *error)
4452 {
4453         struct mlx5_flow_dv_push_vlan_action_resource res;
4454
4455         memset(&res, 0, sizeof(res));
4456         res.vlan_tag =
4457                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4458                                  vlan->vlan_tci);
4459         if (attr->transfer)
4460                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4461         else
4462                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4463                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4464         return flow_dv_push_vlan_action_resource_register
4465                                             (dev, &res, dev_flow, error);
4466 }
4467
4468 /**
4469  * Validate the modify-header actions.
4470  *
4471  * @param[in] action_flags
4472  *   Holds the actions detected until now.
4473  * @param[in] action
4474  *   Pointer to the modify action.
4475  * @param[out] error
4476  *   Pointer to error structure.
4477  *
4478  * @return
4479  *   0 on success, a negative errno value otherwise and rte_errno is set.
4480  */
4481 static int
4482 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4483                                    const struct rte_flow_action *action,
4484                                    struct rte_flow_error *error)
4485 {
4486         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4487                 return rte_flow_error_set(error, EINVAL,
4488                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4489                                           NULL, "action configuration not set");
4490         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4491                 return rte_flow_error_set(error, EINVAL,
4492                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4493                                           "can't have encap action before"
4494                                           " modify action");
4495         return 0;
4496 }
4497
4498 /**
4499  * Validate the modify-header MAC address actions.
4500  *
4501  * @param[in] action_flags
4502  *   Holds the actions detected until now.
4503  * @param[in] action
4504  *   Pointer to the modify action.
4505  * @param[in] item_flags
4506  *   Holds the items detected.
4507  * @param[out] error
4508  *   Pointer to error structure.
4509  *
4510  * @return
4511  *   0 on success, a negative errno value otherwise and rte_errno is set.
4512  */
4513 static int
4514 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4515                                    const struct rte_flow_action *action,
4516                                    const uint64_t item_flags,
4517                                    struct rte_flow_error *error)
4518 {
4519         int ret = 0;
4520
4521         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4522         if (!ret) {
4523                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4524                         return rte_flow_error_set(error, EINVAL,
4525                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4526                                                   NULL,
4527                                                   "no L2 item in pattern");
4528         }
4529         return ret;
4530 }
4531
4532 /**
4533  * Validate the modify-header IPv4 address actions.
4534  *
4535  * @param[in] action_flags
4536  *   Holds the actions detected until now.
4537  * @param[in] action
4538  *   Pointer to the modify action.
4539  * @param[in] item_flags
4540  *   Holds the items detected.
4541  * @param[out] error
4542  *   Pointer to error structure.
4543  *
4544  * @return
4545  *   0 on success, a negative errno value otherwise and rte_errno is set.
4546  */
4547 static int
4548 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4549                                     const struct rte_flow_action *action,
4550                                     const uint64_t item_flags,
4551                                     struct rte_flow_error *error)
4552 {
4553         int ret = 0;
4554         uint64_t layer;
4555
4556         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4557         if (!ret) {
4558                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4559                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4560                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4561                 if (!(item_flags & layer))
4562                         return rte_flow_error_set(error, EINVAL,
4563                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4564                                                   NULL,
4565                                                   "no ipv4 item in pattern");
4566         }
4567         return ret;
4568 }
4569
4570 /**
4571  * Validate the modify-header IPv6 address actions.
4572  *
4573  * @param[in] action_flags
4574  *   Holds the actions detected until now.
4575  * @param[in] action
4576  *   Pointer to the modify action.
4577  * @param[in] item_flags
4578  *   Holds the items detected.
4579  * @param[out] error
4580  *   Pointer to error structure.
4581  *
4582  * @return
4583  *   0 on success, a negative errno value otherwise and rte_errno is set.
4584  */
4585 static int
4586 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4587                                     const struct rte_flow_action *action,
4588                                     const uint64_t item_flags,
4589                                     struct rte_flow_error *error)
4590 {
4591         int ret = 0;
4592         uint64_t layer;
4593
4594         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4595         if (!ret) {
4596                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4597                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4598                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4599                 if (!(item_flags & layer))
4600                         return rte_flow_error_set(error, EINVAL,
4601                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4602                                                   NULL,
4603                                                   "no ipv6 item in pattern");
4604         }
4605         return ret;
4606 }
4607
4608 /**
4609  * Validate the modify-header TP actions.
4610  *
4611  * @param[in] action_flags
4612  *   Holds the actions detected until now.
4613  * @param[in] action
4614  *   Pointer to the modify action.
4615  * @param[in] item_flags
4616  *   Holds the items detected.
4617  * @param[out] error
4618  *   Pointer to error structure.
4619  *
4620  * @return
4621  *   0 on success, a negative errno value otherwise and rte_errno is set.
4622  */
4623 static int
4624 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4625                                   const struct rte_flow_action *action,
4626                                   const uint64_t item_flags,
4627                                   struct rte_flow_error *error)
4628 {
4629         int ret = 0;
4630         uint64_t layer;
4631
4632         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4633         if (!ret) {
4634                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4635                                  MLX5_FLOW_LAYER_INNER_L4 :
4636                                  MLX5_FLOW_LAYER_OUTER_L4;
4637                 if (!(item_flags & layer))
4638                         return rte_flow_error_set(error, EINVAL,
4639                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4640                                                   NULL, "no transport layer "
4641                                                   "in pattern");
4642         }
4643         return ret;
4644 }
4645
4646 /**
4647  * Validate the modify-header actions of increment/decrement
4648  * TCP Sequence-number.
4649  *
4650  * @param[in] action_flags
4651  *   Holds the actions detected until now.
4652  * @param[in] action
4653  *   Pointer to the modify action.
4654  * @param[in] item_flags
4655  *   Holds the items detected.
4656  * @param[out] error
4657  *   Pointer to error structure.
4658  *
4659  * @return
4660  *   0 on success, a negative errno value otherwise and rte_errno is set.
4661  */
4662 static int
4663 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4664                                        const struct rte_flow_action *action,
4665                                        const uint64_t item_flags,
4666                                        struct rte_flow_error *error)
4667 {
4668         int ret = 0;
4669         uint64_t layer;
4670
4671         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4672         if (!ret) {
4673                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4674                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4675                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4676                 if (!(item_flags & layer))
4677                         return rte_flow_error_set(error, EINVAL,
4678                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4679                                                   NULL, "no TCP item in"
4680                                                   " pattern");
4681                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4682                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4683                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4684                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4685                         return rte_flow_error_set(error, EINVAL,
4686                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4687                                                   NULL,
4688                                                   "cannot decrease and increase"
4689                                                   " TCP sequence number"
4690                                                   " at the same time");
4691         }
4692         return ret;
4693 }
4694
4695 /**
4696  * Validate the modify-header actions of increment/decrement
4697  * TCP Acknowledgment number.
4698  *
4699  * @param[in] action_flags
4700  *   Holds the actions detected until now.
4701  * @param[in] action
4702  *   Pointer to the modify action.
4703  * @param[in] item_flags
4704  *   Holds the items detected.
4705  * @param[out] error
4706  *   Pointer to error structure.
4707  *
4708  * @return
4709  *   0 on success, a negative errno value otherwise and rte_errno is set.
4710  */
4711 static int
4712 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4713                                        const struct rte_flow_action *action,
4714                                        const uint64_t item_flags,
4715                                        struct rte_flow_error *error)
4716 {
4717         int ret = 0;
4718         uint64_t layer;
4719
4720         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4721         if (!ret) {
4722                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4723                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4724                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4725                 if (!(item_flags & layer))
4726                         return rte_flow_error_set(error, EINVAL,
4727                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4728                                                   NULL, "no TCP item in"
4729                                                   " pattern");
4730                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4731                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4732                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4733                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4734                         return rte_flow_error_set(error, EINVAL,
4735                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4736                                                   NULL,
4737                                                   "cannot decrease and increase"
4738                                                   " TCP acknowledgment number"
4739                                                   " at the same time");
4740         }
4741         return ret;
4742 }
4743
4744 /**
4745  * Validate the modify-header TTL actions.
4746  *
4747  * @param[in] action_flags
4748  *   Holds the actions detected until now.
4749  * @param[in] action
4750  *   Pointer to the modify action.
4751  * @param[in] item_flags
4752  *   Holds the items detected.
4753  * @param[out] error
4754  *   Pointer to error structure.
4755  *
4756  * @return
4757  *   0 on success, a negative errno value otherwise and rte_errno is set.
4758  */
4759 static int
4760 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4761                                    const struct rte_flow_action *action,
4762                                    const uint64_t item_flags,
4763                                    struct rte_flow_error *error)
4764 {
4765         int ret = 0;
4766         uint64_t layer;
4767
4768         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4769         if (!ret) {
4770                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4771                                  MLX5_FLOW_LAYER_INNER_L3 :
4772                                  MLX5_FLOW_LAYER_OUTER_L3;
4773                 if (!(item_flags & layer))
4774                         return rte_flow_error_set(error, EINVAL,
4775                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4776                                                   NULL,
4777                                                   "no IP protocol in pattern");
4778         }
4779         return ret;
4780 }
4781
4782 /**
4783  * Validate the generic modify field actions.
4784  * @param[in] dev
4785  *   Pointer to the rte_eth_dev structure.
4786  * @param[in] action_flags
4787  *   Holds the actions detected until now.
4788  * @param[in] action
4789  *   Pointer to the modify action.
4790  * @param[in] attr
4791  *   Pointer to the flow attributes.
4792  * @param[out] error
4793  *   Pointer to error structure.
4794  *
4795  * @return
4796  *   Number of header fields to modify (0 or more) on success,
4797  *   a negative errno value otherwise and rte_errno is set.
4798  */
4799 static int
4800 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4801                                    const uint64_t action_flags,
4802                                    const struct rte_flow_action *action,
4803                                    const struct rte_flow_attr *attr,
4804                                    struct rte_flow_error *error)
4805 {
4806         int ret = 0;
4807         struct mlx5_priv *priv = dev->data->dev_private;
4808         struct mlx5_dev_config *config = &priv->config;
4809         const struct rte_flow_action_modify_field *action_modify_field =
4810                 action->conf;
4811         uint32_t dst_width = mlx5_flow_item_field_width(priv,
4812                                 action_modify_field->dst.field, -1);
4813         uint32_t src_width = mlx5_flow_item_field_width(priv,
4814                                 action_modify_field->src.field, dst_width);
4815
4816         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4817         if (ret)
4818                 return ret;
4819
4820         if (action_modify_field->width == 0)
4821                 return rte_flow_error_set(error, EINVAL,
4822                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4823                                 "no bits are requested to be modified");
4824         else if (action_modify_field->width > dst_width ||
4825                  action_modify_field->width > src_width)
4826                 return rte_flow_error_set(error, EINVAL,
4827                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4828                                 "cannot modify more bits than"
4829                                 " the width of a field");
4830         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4831             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4832                 if ((action_modify_field->dst.offset +
4833                      action_modify_field->width > dst_width) ||
4834                     (action_modify_field->dst.offset % 32))
4835                         return rte_flow_error_set(error, EINVAL,
4836                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                         "destination offset is too big"
4838                                         " or not aligned to 4 bytes");
4839                 if (action_modify_field->dst.level &&
4840                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4841                         return rte_flow_error_set(error, ENOTSUP,
4842                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4843                                         "inner header fields modification"
4844                                         " is not supported");
4845         }
4846         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4847             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4848                 if (!attr->transfer && !attr->group)
4849                         return rte_flow_error_set(error, ENOTSUP,
4850                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4851                                         "modify field action is not"
4852                                         " supported for group 0");
4853                 if ((action_modify_field->src.offset +
4854                      action_modify_field->width > src_width) ||
4855                     (action_modify_field->src.offset % 32))
4856                         return rte_flow_error_set(error, EINVAL,
4857                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4858                                         "source offset is too big"
4859                                         " or not aligned to 4 bytes");
4860                 if (action_modify_field->src.level &&
4861                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4862                         return rte_flow_error_set(error, ENOTSUP,
4863                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4864                                         "inner header fields modification"
4865                                         " is not supported");
4866         }
4867         if ((action_modify_field->dst.field ==
4868              action_modify_field->src.field) &&
4869             (action_modify_field->dst.level ==
4870              action_modify_field->src.level))
4871                 return rte_flow_error_set(error, EINVAL,
4872                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4873                                 "source and destination fields"
4874                                 " cannot be the same");
4875         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4876             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4877             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4878                 return rte_flow_error_set(error, EINVAL,
4879                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4880                                 "mark, immediate value or a pointer to it"
4881                                 " cannot be used as a destination");
4882         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4883             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4884                 return rte_flow_error_set(error, ENOTSUP,
4885                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4886                                 "modifications of an arbitrary"
4887                                 " place in a packet is not supported");
4888         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4889             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4890                 return rte_flow_error_set(error, ENOTSUP,
4891                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4892                                 "modifications of the 802.1Q Tag"
4893                                 " Identifier is not supported");
4894         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4895             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4896                 return rte_flow_error_set(error, ENOTSUP,
4897                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4898                                 "modifications of the VXLAN Network"
4899                                 " Identifier is not supported");
4900         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4901             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4902                 return rte_flow_error_set(error, ENOTSUP,
4903                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4904                                 "modifications of the GENEVE Network"
4905                                 " Identifier is not supported");
4906         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4907             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4908             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4909             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4910                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4911                     !mlx5_flow_ext_mreg_supported(dev))
4912                         return rte_flow_error_set(error, ENOTSUP,
4913                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4914                                         "cannot modify mark or metadata without"
4915                                         " extended metadata register support");
4916         }
4917         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4918                 return rte_flow_error_set(error, ENOTSUP,
4919                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4920                                 "add and sub operations"
4921                                 " are not supported");
4922         return (action_modify_field->width / 32) +
4923                !!(action_modify_field->width % 32);
4924 }
4925
4926 /**
4927  * Validate jump action.
4928  *
4929  * @param[in] action
4930  *   Pointer to the jump action.
4931  * @param[in] action_flags
4932  *   Holds the actions detected until now.
4933  * @param[in] attributes
4934  *   Pointer to flow attributes
4935  * @param[in] external
4936  *   Action belongs to flow rule created by request external to PMD.
4937  * @param[out] error
4938  *   Pointer to error structure.
4939  *
4940  * @return
4941  *   0 on success, a negative errno value otherwise and rte_errno is set.
4942  */
4943 static int
4944 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4945                              const struct mlx5_flow_tunnel *tunnel,
4946                              const struct rte_flow_action *action,
4947                              uint64_t action_flags,
4948                              const struct rte_flow_attr *attributes,
4949                              bool external, struct rte_flow_error *error)
4950 {
4951         uint32_t target_group, table;
4952         int ret = 0;
4953         struct flow_grp_info grp_info = {
4954                 .external = !!external,
4955                 .transfer = !!attributes->transfer,
4956                 .fdb_def_rule = 1,
4957                 .std_tbl_fix = 0
4958         };
4959         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4960                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4961                 return rte_flow_error_set(error, EINVAL,
4962                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4963                                           "can't have 2 fate actions in"
4964                                           " same flow");
4965         if (!action->conf)
4966                 return rte_flow_error_set(error, EINVAL,
4967                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4968                                           NULL, "action configuration not set");
4969         target_group =
4970                 ((const struct rte_flow_action_jump *)action->conf)->group;
4971         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4972                                        &grp_info, error);
4973         if (ret)
4974                 return ret;
4975         if (attributes->group == target_group &&
4976             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4977                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4978                 return rte_flow_error_set(error, EINVAL,
4979                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4980                                           "target group must be other than"
4981                                           " the current flow group");
4982         return 0;
4983 }
4984
4985 /*
4986  * Validate action PORT_ID / REPRESENTED_PORT.
4987  *
4988  * @param[in] dev
4989  *   Pointer to rte_eth_dev structure.
4990  * @param[in] action_flags
4991  *   Bit-fields that holds the actions detected until now.
4992  * @param[in] action
4993  *   PORT_ID / REPRESENTED_PORT action structure.
4994  * @param[in] attr
4995  *   Attributes of flow that includes this action.
4996  * @param[out] error
4997  *   Pointer to error structure.
4998  *
4999  * @return
5000  *   0 on success, a negative errno value otherwise and rte_errno is set.
5001  */
5002 static int
5003 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5004                                 uint64_t action_flags,
5005                                 const struct rte_flow_action *action,
5006                                 const struct rte_flow_attr *attr,
5007                                 struct rte_flow_error *error)
5008 {
5009         const struct rte_flow_action_port_id *port_id;
5010         const struct rte_flow_action_ethdev *ethdev;
5011         struct mlx5_priv *act_priv;
5012         struct mlx5_priv *dev_priv;
5013         uint16_t port;
5014
5015         if (!attr->transfer)
5016                 return rte_flow_error_set(error, ENOTSUP,
5017                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5018                                           NULL,
5019                                           "port action is valid in transfer"
5020                                           " mode only");
5021         if (!action || !action->conf)
5022                 return rte_flow_error_set(error, ENOTSUP,
5023                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5024                                           NULL,
5025                                           "port action parameters must be"
5026                                           " specified");
5027         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5028                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5029                 return rte_flow_error_set(error, EINVAL,
5030                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5031                                           "can have only one fate actions in"
5032                                           " a flow");
5033         dev_priv = mlx5_dev_to_eswitch_info(dev);
5034         if (!dev_priv)
5035                 return rte_flow_error_set(error, rte_errno,
5036                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5037                                           NULL,
5038                                           "failed to obtain E-Switch info");
5039         switch (action->type) {
5040         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5041                 port_id = action->conf;
5042                 port = port_id->original ? dev->data->port_id : port_id->id;
5043                 break;
5044         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5045                 ethdev = action->conf;
5046                 port = ethdev->port_id;
5047                 break;
5048         default:
5049                 MLX5_ASSERT(false);
5050                 return rte_flow_error_set
5051                                 (error, EINVAL,
5052                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5053                                  "unknown E-Switch action");
5054         }
5055         act_priv = mlx5_port_to_eswitch_info(port, false);
5056         if (!act_priv)
5057                 return rte_flow_error_set
5058                                 (error, rte_errno,
5059                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5060                                  "failed to obtain E-Switch port id for port");
5061         if (act_priv->domain_id != dev_priv->domain_id)
5062                 return rte_flow_error_set
5063                                 (error, EINVAL,
5064                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5065                                  "port does not belong to"
5066                                  " E-Switch being configured");
5067         return 0;
5068 }
5069
5070 /**
5071  * Get the maximum number of modify header actions.
5072  *
5073  * @param dev
5074  *   Pointer to rte_eth_dev structure.
5075  * @param root
5076  *   Whether action is on root table.
5077  *
5078  * @return
5079  *   Max number of modify header actions device can support.
5080  */
5081 static inline unsigned int
5082 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5083                               bool root)
5084 {
5085         /*
5086          * There's no way to directly query the max capacity from FW.
5087          * The maximal value on root table should be assumed to be supported.
5088          */
5089         if (!root)
5090                 return MLX5_MAX_MODIFY_NUM;
5091         else
5092                 return MLX5_ROOT_TBL_MODIFY_NUM;
5093 }
5094
5095 /**
5096  * Validate the meter action.
5097  *
5098  * @param[in] dev
5099  *   Pointer to rte_eth_dev structure.
5100  * @param[in] action_flags
5101  *   Bit-fields that holds the actions detected until now.
5102  * @param[in] action
5103  *   Pointer to the meter action.
5104  * @param[in] attr
5105  *   Attributes of flow that includes this action.
5106  * @param[in] port_id_item
5107  *   Pointer to item indicating port id.
5108  * @param[out] error
5109  *   Pointer to error structure.
5110  *
5111  * @return
5112  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5113  */
5114 static int
5115 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5116                                 uint64_t action_flags,
5117                                 const struct rte_flow_action *action,
5118                                 const struct rte_flow_attr *attr,
5119                                 const struct rte_flow_item *port_id_item,
5120                                 bool *def_policy,
5121                                 struct rte_flow_error *error)
5122 {
5123         struct mlx5_priv *priv = dev->data->dev_private;
5124         const struct rte_flow_action_meter *am = action->conf;
5125         struct mlx5_flow_meter_info *fm;
5126         struct mlx5_flow_meter_policy *mtr_policy;
5127         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5128
5129         if (!am)
5130                 return rte_flow_error_set(error, EINVAL,
5131                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5132                                           "meter action conf is NULL");
5133
5134         if (action_flags & MLX5_FLOW_ACTION_METER)
5135                 return rte_flow_error_set(error, ENOTSUP,
5136                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5137                                           "meter chaining not support");
5138         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5139                 return rte_flow_error_set(error, ENOTSUP,
5140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5141                                           "meter with jump not support");
5142         if (!priv->mtr_en)
5143                 return rte_flow_error_set(error, ENOTSUP,
5144                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5145                                           NULL,
5146                                           "meter action not supported");
5147         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5148         if (!fm)
5149                 return rte_flow_error_set(error, EINVAL,
5150                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5151                                           "Meter not found");
5152         /* aso meter can always be shared by different domains */
5153         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5154             !(fm->transfer == attr->transfer ||
5155               (!fm->ingress && !attr->ingress && attr->egress) ||
5156               (!fm->egress && !attr->egress && attr->ingress)))
5157                 return rte_flow_error_set(error, EINVAL,
5158                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5159                         "Flow attributes domain are either invalid "
5160                         "or have a domain conflict with current "
5161                         "meter attributes");
5162         if (fm->def_policy) {
5163                 if (!((attr->transfer &&
5164                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5165                         (attr->egress &&
5166                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5167                         (attr->ingress &&
5168                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5169                         return rte_flow_error_set(error, EINVAL,
5170                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5171                                           "Flow attributes domain "
5172                                           "have a conflict with current "
5173                                           "meter domain attributes");
5174                 *def_policy = true;
5175         } else {
5176                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5177                                                 fm->policy_id, NULL);
5178                 if (!mtr_policy)
5179                         return rte_flow_error_set(error, EINVAL,
5180                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5181                                           "Invalid policy id for meter ");
5182                 if (!((attr->transfer && mtr_policy->transfer) ||
5183                         (attr->egress && mtr_policy->egress) ||
5184                         (attr->ingress && mtr_policy->ingress)))
5185                         return rte_flow_error_set(error, EINVAL,
5186                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5187                                           "Flow attributes domain "
5188                                           "have a conflict with current "
5189                                           "meter domain attributes");
5190                 if (attr->transfer && mtr_policy->dev) {
5191                         /**
5192                          * When policy has fate action of port_id,
5193                          * the flow should have the same src port as policy.
5194                          */
5195                         struct mlx5_priv *policy_port_priv =
5196                                         mtr_policy->dev->data->dev_private;
5197                         int32_t flow_src_port = priv->representor_id;
5198
5199                         if (port_id_item) {
5200                                 const struct rte_flow_item_port_id *spec =
5201                                                         port_id_item->spec;
5202                                 struct mlx5_priv *port_priv =
5203                                         mlx5_port_to_eswitch_info(spec->id,
5204                                                                   false);
5205                                 if (!port_priv)
5206                                         return rte_flow_error_set(error,
5207                                                 rte_errno,
5208                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5209                                                 spec,
5210                                                 "Failed to get port info.");
5211                                 flow_src_port = port_priv->representor_id;
5212                         }
5213                         if (flow_src_port != policy_port_priv->representor_id)
5214                                 return rte_flow_error_set(error,
5215                                                 rte_errno,
5216                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5217                                                 NULL,
5218                                                 "Flow and meter policy "
5219                                                 "have different src port.");
5220                 }
5221                 *def_policy = false;
5222         }
5223         return 0;
5224 }
5225
5226 /**
5227  * Validate the age action.
5228  *
5229  * @param[in] action_flags
5230  *   Holds the actions detected until now.
5231  * @param[in] action
5232  *   Pointer to the age action.
5233  * @param[in] dev
5234  *   Pointer to the Ethernet device structure.
5235  * @param[out] error
5236  *   Pointer to error structure.
5237  *
5238  * @return
5239  *   0 on success, a negative errno value otherwise and rte_errno is set.
5240  */
5241 static int
5242 flow_dv_validate_action_age(uint64_t action_flags,
5243                             const struct rte_flow_action *action,
5244                             struct rte_eth_dev *dev,
5245                             struct rte_flow_error *error)
5246 {
5247         struct mlx5_priv *priv = dev->data->dev_private;
5248         const struct rte_flow_action_age *age = action->conf;
5249
5250         if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
5251             !priv->sh->aso_age_mng))
5252                 return rte_flow_error_set(error, ENOTSUP,
5253                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5254                                           NULL,
5255                                           "age action not supported");
5256         if (!(action->conf))
5257                 return rte_flow_error_set(error, EINVAL,
5258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5259                                           "configuration cannot be null");
5260         if (!(age->timeout))
5261                 return rte_flow_error_set(error, EINVAL,
5262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5263                                           "invalid timeout value 0");
5264         if (action_flags & MLX5_FLOW_ACTION_AGE)
5265                 return rte_flow_error_set(error, EINVAL,
5266                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5267                                           "duplicate age actions set");
5268         return 0;
5269 }
5270
5271 /**
5272  * Validate the modify-header IPv4 DSCP actions.
5273  *
5274  * @param[in] action_flags
5275  *   Holds the actions detected until now.
5276  * @param[in] action
5277  *   Pointer to the modify action.
5278  * @param[in] item_flags
5279  *   Holds the items detected.
5280  * @param[out] error
5281  *   Pointer to error structure.
5282  *
5283  * @return
5284  *   0 on success, a negative errno value otherwise and rte_errno is set.
5285  */
5286 static int
5287 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5288                                          const struct rte_flow_action *action,
5289                                          const uint64_t item_flags,
5290                                          struct rte_flow_error *error)
5291 {
5292         int ret = 0;
5293
5294         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5295         if (!ret) {
5296                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5297                         return rte_flow_error_set(error, EINVAL,
5298                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5299                                                   NULL,
5300                                                   "no ipv4 item in pattern");
5301         }
5302         return ret;
5303 }
5304
5305 /**
5306  * Validate the modify-header IPv6 DSCP actions.
5307  *
5308  * @param[in] action_flags
5309  *   Holds the actions detected until now.
5310  * @param[in] action
5311  *   Pointer to the modify action.
5312  * @param[in] item_flags
5313  *   Holds the items detected.
5314  * @param[out] error
5315  *   Pointer to error structure.
5316  *
5317  * @return
5318  *   0 on success, a negative errno value otherwise and rte_errno is set.
5319  */
5320 static int
5321 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5322                                          const struct rte_flow_action *action,
5323                                          const uint64_t item_flags,
5324                                          struct rte_flow_error *error)
5325 {
5326         int ret = 0;
5327
5328         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5329         if (!ret) {
5330                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5331                         return rte_flow_error_set(error, EINVAL,
5332                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5333                                                   NULL,
5334                                                   "no ipv6 item in pattern");
5335         }
5336         return ret;
5337 }
5338
5339 int
5340 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5341                         struct mlx5_list_entry *entry, void *cb_ctx)
5342 {
5343         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5344         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5345         struct mlx5_flow_dv_modify_hdr_resource *resource =
5346                                   container_of(entry, typeof(*resource), entry);
5347         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5348
5349         key_len += ref->actions_num * sizeof(ref->actions[0]);
5350         return ref->actions_num != resource->actions_num ||
5351                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5352 }
5353
5354 static struct mlx5_indexed_pool *
5355 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5356 {
5357         struct mlx5_indexed_pool *ipool = __atomic_load_n
5358                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5359
5360         if (!ipool) {
5361                 struct mlx5_indexed_pool *expected = NULL;
5362                 struct mlx5_indexed_pool_config cfg =
5363                     (struct mlx5_indexed_pool_config) {
5364                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5365                                                                    (index + 1) *
5366                                            sizeof(struct mlx5_modification_cmd),
5367                        .trunk_size = 64,
5368                        .grow_trunk = 3,
5369                        .grow_shift = 2,
5370                        .need_lock = 1,
5371                        .release_mem_en = !!sh->reclaim_mode,
5372                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5373                        .malloc = mlx5_malloc,
5374                        .free = mlx5_free,
5375                        .type = "mlx5_modify_action_resource",
5376                 };
5377
5378                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5379                 ipool = mlx5_ipool_create(&cfg);
5380                 if (!ipool)
5381                         return NULL;
5382                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5383                                                  &expected, ipool, false,
5384                                                  __ATOMIC_SEQ_CST,
5385                                                  __ATOMIC_SEQ_CST)) {
5386                         mlx5_ipool_destroy(ipool);
5387                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5388                                                 __ATOMIC_SEQ_CST);
5389                 }
5390         }
5391         return ipool;
5392 }
5393
5394 struct mlx5_list_entry *
5395 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5396 {
5397         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5398         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5399         struct mlx5dv_dr_domain *ns;
5400         struct mlx5_flow_dv_modify_hdr_resource *entry;
5401         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5402         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5403                                                           ref->actions_num - 1);
5404         int ret;
5405         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5406         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5407         uint32_t idx;
5408
5409         if (unlikely(!ipool)) {
5410                 rte_flow_error_set(ctx->error, ENOMEM,
5411                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5412                                    NULL, "cannot allocate modify ipool");
5413                 return NULL;
5414         }
5415         entry = mlx5_ipool_zmalloc(ipool, &idx);
5416         if (!entry) {
5417                 rte_flow_error_set(ctx->error, ENOMEM,
5418                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5419                                    "cannot allocate resource memory");
5420                 return NULL;
5421         }
5422         rte_memcpy(&entry->ft_type,
5423                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5424                    key_len + data_len);
5425         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5426                 ns = sh->fdb_domain;
5427         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5428                 ns = sh->tx_domain;
5429         else
5430                 ns = sh->rx_domain;
5431         ret = mlx5_flow_os_create_flow_action_modify_header
5432                                         (sh->cdev->ctx, ns, entry,
5433                                          data_len, &entry->action);
5434         if (ret) {
5435                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5436                 rte_flow_error_set(ctx->error, ENOMEM,
5437                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5438                                    NULL, "cannot create modification action");
5439                 return NULL;
5440         }
5441         entry->idx = idx;
5442         return &entry->entry;
5443 }
5444
5445 struct mlx5_list_entry *
5446 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5447                         void *cb_ctx)
5448 {
5449         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5450         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5451         struct mlx5_flow_dv_modify_hdr_resource *entry;
5452         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5453         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5454         uint32_t idx;
5455
5456         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5457                                   &idx);
5458         if (!entry) {
5459                 rte_flow_error_set(ctx->error, ENOMEM,
5460                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5461                                    "cannot allocate resource memory");
5462                 return NULL;
5463         }
5464         memcpy(entry, oentry, sizeof(*entry) + data_len);
5465         entry->idx = idx;
5466         return &entry->entry;
5467 }
5468
5469 void
5470 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5471 {
5472         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5473         struct mlx5_flow_dv_modify_hdr_resource *res =
5474                 container_of(entry, typeof(*res), entry);
5475
5476         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5477 }
5478
5479 /**
5480  * Validate the sample action.
5481  *
5482  * @param[in, out] action_flags
5483  *   Holds the actions detected until now.
5484  * @param[in] action
5485  *   Pointer to the sample action.
5486  * @param[in] dev
5487  *   Pointer to the Ethernet device structure.
5488  * @param[in] attr
5489  *   Attributes of flow that includes this action.
5490  * @param[in] item_flags
5491  *   Holds the items detected.
5492  * @param[in] rss
5493  *   Pointer to the RSS action.
5494  * @param[out] sample_rss
5495  *   Pointer to the RSS action in sample action list.
5496  * @param[out] count
5497  *   Pointer to the COUNT action in sample action list.
5498  * @param[out] fdb_mirror_limit
5499  *   Pointer to the FDB mirror limitation flag.
5500  * @param[out] error
5501  *   Pointer to error structure.
5502  *
5503  * @return
5504  *   0 on success, a negative errno value otherwise and rte_errno is set.
5505  */
5506 static int
5507 flow_dv_validate_action_sample(uint64_t *action_flags,
5508                                const struct rte_flow_action *action,
5509                                struct rte_eth_dev *dev,
5510                                const struct rte_flow_attr *attr,
5511                                uint64_t item_flags,
5512                                const struct rte_flow_action_rss *rss,
5513                                const struct rte_flow_action_rss **sample_rss,
5514                                const struct rte_flow_action_count **count,
5515                                int *fdb_mirror_limit,
5516                                struct rte_flow_error *error)
5517 {
5518         struct mlx5_priv *priv = dev->data->dev_private;
5519         struct mlx5_dev_config *dev_conf = &priv->config;
5520         const struct rte_flow_action_sample *sample = action->conf;
5521         const struct rte_flow_action *act;
5522         uint64_t sub_action_flags = 0;
5523         uint16_t queue_index = 0xFFFF;
5524         int actions_n = 0;
5525         int ret;
5526
5527         if (!sample)
5528                 return rte_flow_error_set(error, EINVAL,
5529                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5530                                           "configuration cannot be NULL");
5531         if (sample->ratio == 0)
5532                 return rte_flow_error_set(error, EINVAL,
5533                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5534                                           "ratio value starts from 1");
5535         if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
5536                 return rte_flow_error_set(error, ENOTSUP,
5537                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5538                                           NULL,
5539                                           "sample action not supported");
5540         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5541                 return rte_flow_error_set(error, EINVAL,
5542                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5543                                           "Multiple sample actions not "
5544                                           "supported");
5545         if (*action_flags & MLX5_FLOW_ACTION_METER)
5546                 return rte_flow_error_set(error, EINVAL,
5547                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5548                                           "wrong action order, meter should "
5549                                           "be after sample action");
5550         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5551                 return rte_flow_error_set(error, EINVAL,
5552                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5553                                           "wrong action order, jump should "
5554                                           "be after sample action");
5555         act = sample->actions;
5556         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5557                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5558                         return rte_flow_error_set(error, ENOTSUP,
5559                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5560                                                   act, "too many actions");
5561                 switch (act->type) {
5562                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5563                         ret = mlx5_flow_validate_action_queue(act,
5564                                                               sub_action_flags,
5565                                                               dev,
5566                                                               attr, error);
5567                         if (ret < 0)
5568                                 return ret;
5569                         queue_index = ((const struct rte_flow_action_queue *)
5570                                                         (act->conf))->index;
5571                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5572                         ++actions_n;
5573                         break;
5574                 case RTE_FLOW_ACTION_TYPE_RSS:
5575                         *sample_rss = act->conf;
5576                         ret = mlx5_flow_validate_action_rss(act,
5577                                                             sub_action_flags,
5578                                                             dev, attr,
5579                                                             item_flags,
5580                                                             error);
5581                         if (ret < 0)
5582                                 return ret;
5583                         if (rss && *sample_rss &&
5584                             ((*sample_rss)->level != rss->level ||
5585                             (*sample_rss)->types != rss->types))
5586                                 return rte_flow_error_set(error, ENOTSUP,
5587                                         RTE_FLOW_ERROR_TYPE_ACTION,
5588                                         NULL,
5589                                         "Can't use the different RSS types "
5590                                         "or level in the same flow");
5591                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5592                                 queue_index = (*sample_rss)->queue[0];
5593                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5594                         ++actions_n;
5595                         break;
5596                 case RTE_FLOW_ACTION_TYPE_MARK:
5597                         ret = flow_dv_validate_action_mark(dev, act,
5598                                                            sub_action_flags,
5599                                                            attr, error);
5600                         if (ret < 0)
5601                                 return ret;
5602                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5603                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5604                                                 MLX5_FLOW_ACTION_MARK_EXT;
5605                         else
5606                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5607                         ++actions_n;
5608                         break;
5609                 case RTE_FLOW_ACTION_TYPE_COUNT:
5610                         ret = flow_dv_validate_action_count
5611                                 (dev, false, *action_flags | sub_action_flags,
5612                                  error);
5613                         if (ret < 0)
5614                                 return ret;
5615                         *count = act->conf;
5616                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5617                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5618                         ++actions_n;
5619                         break;
5620                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5621                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5622                         ret = flow_dv_validate_action_port_id(dev,
5623                                                               sub_action_flags,
5624                                                               act,
5625                                                               attr,
5626                                                               error);
5627                         if (ret)
5628                                 return ret;
5629                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5630                         ++actions_n;
5631                         break;
5632                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5633                         ret = flow_dv_validate_action_raw_encap_decap
5634                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5635                                  &actions_n, action, item_flags, error);
5636                         if (ret < 0)
5637                                 return ret;
5638                         ++actions_n;
5639                         break;
5640                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5641                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5642                         ret = flow_dv_validate_action_l2_encap(dev,
5643                                                                sub_action_flags,
5644                                                                act, attr,
5645                                                                error);
5646                         if (ret < 0)
5647                                 return ret;
5648                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5649                         ++actions_n;
5650                         break;
5651                 default:
5652                         return rte_flow_error_set(error, ENOTSUP,
5653                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5654                                                   NULL,
5655                                                   "Doesn't support optional "
5656                                                   "action");
5657                 }
5658         }
5659         if (attr->ingress && !attr->transfer) {
5660                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5661                                           MLX5_FLOW_ACTION_RSS)))
5662                         return rte_flow_error_set(error, EINVAL,
5663                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5664                                                   NULL,
5665                                                   "Ingress must has a dest "
5666                                                   "QUEUE for Sample");
5667         } else if (attr->egress && !attr->transfer) {
5668                 return rte_flow_error_set(error, ENOTSUP,
5669                                           RTE_FLOW_ERROR_TYPE_ACTION,
5670                                           NULL,
5671                                           "Sample Only support Ingress "
5672                                           "or E-Switch");
5673         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5674                 MLX5_ASSERT(attr->transfer);
5675                 if (sample->ratio > 1)
5676                         return rte_flow_error_set(error, ENOTSUP,
5677                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5678                                                   NULL,
5679                                                   "E-Switch doesn't support "
5680                                                   "any optional action "
5681                                                   "for sampling");
5682                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5683                         return rte_flow_error_set(error, ENOTSUP,
5684                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5685                                                   NULL,
5686                                                   "unsupported action QUEUE");
5687                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5688                         return rte_flow_error_set(error, ENOTSUP,
5689                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5690                                                   NULL,
5691                                                   "unsupported action QUEUE");
5692                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5693                         return rte_flow_error_set(error, EINVAL,
5694                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5695                                                   NULL,
5696                                                   "E-Switch must has a dest "
5697                                                   "port for mirroring");
5698                 if (!priv->config.hca_attr.reg_c_preserve &&
5699                      priv->representor_id != UINT16_MAX)
5700                         *fdb_mirror_limit = 1;
5701         }
5702         /* Continue validation for Xcap actions.*/
5703         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5704             (queue_index == 0xFFFF ||
5705              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5706                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5707                      MLX5_FLOW_XCAP_ACTIONS)
5708                         return rte_flow_error_set(error, ENOTSUP,
5709                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5710                                                   NULL, "encap and decap "
5711                                                   "combination aren't "
5712                                                   "supported");
5713                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5714                                                         MLX5_FLOW_ACTION_ENCAP))
5715                         return rte_flow_error_set(error, ENOTSUP,
5716                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5717                                                   NULL, "encap is not supported"
5718                                                   " for ingress traffic");
5719         }
5720         return 0;
5721 }
5722
5723 /**
5724  * Find existing modify-header resource or create and register a new one.
5725  *
5726  * @param dev[in, out]
5727  *   Pointer to rte_eth_dev structure.
5728  * @param[in, out] resource
5729  *   Pointer to modify-header resource.
5730  * @parm[in, out] dev_flow
5731  *   Pointer to the dev_flow.
5732  * @param[out] error
5733  *   pointer to error structure.
5734  *
5735  * @return
5736  *   0 on success otherwise -errno and errno is set.
5737  */
5738 static int
5739 flow_dv_modify_hdr_resource_register
5740                         (struct rte_eth_dev *dev,
5741                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5742                          struct mlx5_flow *dev_flow,
5743                          struct rte_flow_error *error)
5744 {
5745         struct mlx5_priv *priv = dev->data->dev_private;
5746         struct mlx5_dev_ctx_shared *sh = priv->sh;
5747         uint32_t key_len = sizeof(*resource) -
5748                            offsetof(typeof(*resource), ft_type) +
5749                            resource->actions_num * sizeof(resource->actions[0]);
5750         struct mlx5_list_entry *entry;
5751         struct mlx5_flow_cb_ctx ctx = {
5752                 .error = error,
5753                 .data = resource,
5754         };
5755         struct mlx5_hlist *modify_cmds;
5756         uint64_t key64;
5757
5758         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5759                                 "hdr_modify",
5760                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5761                                 true, false, sh,
5762                                 flow_dv_modify_create_cb,
5763                                 flow_dv_modify_match_cb,
5764                                 flow_dv_modify_remove_cb,
5765                                 flow_dv_modify_clone_cb,
5766                                 flow_dv_modify_clone_free_cb);
5767         if (unlikely(!modify_cmds))
5768                 return -rte_errno;
5769         resource->root = !dev_flow->dv.group;
5770         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5771                                                                 resource->root))
5772                 return rte_flow_error_set(error, EOVERFLOW,
5773                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5774                                           "too many modify header items");
5775         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5776         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5777         if (!entry)
5778                 return -rte_errno;
5779         resource = container_of(entry, typeof(*resource), entry);
5780         dev_flow->handle->dvh.modify_hdr = resource;
5781         return 0;
5782 }
5783
5784 /**
5785  * Get DV flow counter by index.
5786  *
5787  * @param[in] dev
5788  *   Pointer to the Ethernet device structure.
5789  * @param[in] idx
5790  *   mlx5 flow counter index in the container.
5791  * @param[out] ppool
5792  *   mlx5 flow counter pool in the container.
5793  *
5794  * @return
5795  *   Pointer to the counter, NULL otherwise.
5796  */
5797 static struct mlx5_flow_counter *
5798 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5799                            uint32_t idx,
5800                            struct mlx5_flow_counter_pool **ppool)
5801 {
5802         struct mlx5_priv *priv = dev->data->dev_private;
5803         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5804         struct mlx5_flow_counter_pool *pool;
5805
5806         /* Decrease to original index and clear shared bit. */
5807         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5808         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5809         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5810         MLX5_ASSERT(pool);
5811         if (ppool)
5812                 *ppool = pool;
5813         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5814 }
5815
5816 /**
5817  * Check the devx counter belongs to the pool.
5818  *
5819  * @param[in] pool
5820  *   Pointer to the counter pool.
5821  * @param[in] id
5822  *   The counter devx ID.
5823  *
5824  * @return
5825  *   True if counter belongs to the pool, false otherwise.
5826  */
5827 static bool
5828 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5829 {
5830         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5831                    MLX5_COUNTERS_PER_POOL;
5832
5833         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5834                 return true;
5835         return false;
5836 }
5837
5838 /**
5839  * Get a pool by devx counter ID.
5840  *
5841  * @param[in] cmng
5842  *   Pointer to the counter management.
5843  * @param[in] id
5844  *   The counter devx ID.
5845  *
5846  * @return
5847  *   The counter pool pointer if exists, NULL otherwise,
5848  */
5849 static struct mlx5_flow_counter_pool *
5850 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5851 {
5852         uint32_t i;
5853         struct mlx5_flow_counter_pool *pool = NULL;
5854
5855         rte_spinlock_lock(&cmng->pool_update_sl);
5856         /* Check last used pool. */
5857         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5858             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5859                 pool = cmng->pools[cmng->last_pool_idx];
5860                 goto out;
5861         }
5862         /* ID out of range means no suitable pool in the container. */
5863         if (id > cmng->max_id || id < cmng->min_id)
5864                 goto out;
5865         /*
5866          * Find the pool from the end of the container, since mostly counter
5867          * ID is sequence increasing, and the last pool should be the needed
5868          * one.
5869          */
5870         i = cmng->n_valid;
5871         while (i--) {
5872                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5873
5874                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5875                         pool = pool_tmp;
5876                         break;
5877                 }
5878         }
5879 out:
5880         rte_spinlock_unlock(&cmng->pool_update_sl);
5881         return pool;
5882 }
5883
5884 /**
5885  * Resize a counter container.
5886  *
5887  * @param[in] dev
5888  *   Pointer to the Ethernet device structure.
5889  *
5890  * @return
5891  *   0 on success, otherwise negative errno value and rte_errno is set.
5892  */
5893 static int
5894 flow_dv_container_resize(struct rte_eth_dev *dev)
5895 {
5896         struct mlx5_priv *priv = dev->data->dev_private;
5897         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5898         void *old_pools = cmng->pools;
5899         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5900         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5901         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5902
5903         if (!pools) {
5904                 rte_errno = ENOMEM;
5905                 return -ENOMEM;
5906         }
5907         if (old_pools)
5908                 memcpy(pools, old_pools, cmng->n *
5909                                        sizeof(struct mlx5_flow_counter_pool *));
5910         cmng->n = resize;
5911         cmng->pools = pools;
5912         if (old_pools)
5913                 mlx5_free(old_pools);
5914         return 0;
5915 }
5916
5917 /**
5918  * Query a devx flow counter.
5919  *
5920  * @param[in] dev
5921  *   Pointer to the Ethernet device structure.
5922  * @param[in] counter
5923  *   Index to the flow counter.
5924  * @param[out] pkts
5925  *   The statistics value of packets.
5926  * @param[out] bytes
5927  *   The statistics value of bytes.
5928  *
5929  * @return
5930  *   0 on success, otherwise a negative errno value and rte_errno is set.
5931  */
5932 static inline int
5933 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5934                      uint64_t *bytes)
5935 {
5936         struct mlx5_priv *priv = dev->data->dev_private;
5937         struct mlx5_flow_counter_pool *pool = NULL;
5938         struct mlx5_flow_counter *cnt;
5939         int offset;
5940
5941         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5942         MLX5_ASSERT(pool);
5943         if (priv->sh->cmng.counter_fallback)
5944                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5945                                         0, pkts, bytes, 0, NULL, NULL, 0);
5946         rte_spinlock_lock(&pool->sl);
5947         if (!pool->raw) {
5948                 *pkts = 0;
5949                 *bytes = 0;
5950         } else {
5951                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5952                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5953                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5954         }
5955         rte_spinlock_unlock(&pool->sl);
5956         return 0;
5957 }
5958
5959 /**
5960  * Create and initialize a new counter pool.
5961  *
5962  * @param[in] dev
5963  *   Pointer to the Ethernet device structure.
5964  * @param[out] dcs
5965  *   The devX counter handle.
5966  * @param[in] age
5967  *   Whether the pool is for counter that was allocated for aging.
5968  * @param[in/out] cont_cur
5969  *   Pointer to the container pointer, it will be update in pool resize.
5970  *
5971  * @return
5972  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5973  */
5974 static struct mlx5_flow_counter_pool *
5975 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5976                     uint32_t age)
5977 {
5978         struct mlx5_priv *priv = dev->data->dev_private;
5979         struct mlx5_flow_counter_pool *pool;
5980         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5981         bool fallback = priv->sh->cmng.counter_fallback;
5982         uint32_t size = sizeof(*pool);
5983
5984         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5985         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5986         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5987         if (!pool) {
5988                 rte_errno = ENOMEM;
5989                 return NULL;
5990         }
5991         pool->raw = NULL;
5992         pool->is_aged = !!age;
5993         pool->query_gen = 0;
5994         pool->min_dcs = dcs;
5995         rte_spinlock_init(&pool->sl);
5996         rte_spinlock_init(&pool->csl);
5997         TAILQ_INIT(&pool->counters[0]);
5998         TAILQ_INIT(&pool->counters[1]);
5999         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6000         rte_spinlock_lock(&cmng->pool_update_sl);
6001         pool->index = cmng->n_valid;
6002         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6003                 mlx5_free(pool);
6004                 rte_spinlock_unlock(&cmng->pool_update_sl);
6005                 return NULL;
6006         }
6007         cmng->pools[pool->index] = pool;
6008         cmng->n_valid++;
6009         if (unlikely(fallback)) {
6010                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6011
6012                 if (base < cmng->min_id)
6013                         cmng->min_id = base;
6014                 if (base > cmng->max_id)
6015                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6016                 cmng->last_pool_idx = pool->index;
6017         }
6018         rte_spinlock_unlock(&cmng->pool_update_sl);
6019         return pool;
6020 }
6021
6022 /**
6023  * Prepare a new counter and/or a new counter pool.
6024  *
6025  * @param[in] dev
6026  *   Pointer to the Ethernet device structure.
6027  * @param[out] cnt_free
6028  *   Where to put the pointer of a new counter.
6029  * @param[in] age
6030  *   Whether the pool is for counter that was allocated for aging.
6031  *
6032  * @return
6033  *   The counter pool pointer and @p cnt_free is set on success,
6034  *   NULL otherwise and rte_errno is set.
6035  */
6036 static struct mlx5_flow_counter_pool *
6037 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6038                              struct mlx5_flow_counter **cnt_free,
6039                              uint32_t age)
6040 {
6041         struct mlx5_priv *priv = dev->data->dev_private;
6042         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6043         struct mlx5_flow_counter_pool *pool;
6044         struct mlx5_counters tmp_tq;
6045         struct mlx5_devx_obj *dcs = NULL;
6046         struct mlx5_flow_counter *cnt;
6047         enum mlx5_counter_type cnt_type =
6048                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6049         bool fallback = priv->sh->cmng.counter_fallback;
6050         uint32_t i;
6051
6052         if (fallback) {
6053                 /* bulk_bitmap must be 0 for single counter allocation. */
6054                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6055                 if (!dcs)
6056                         return NULL;
6057                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6058                 if (!pool) {
6059                         pool = flow_dv_pool_create(dev, dcs, age);
6060                         if (!pool) {
6061                                 mlx5_devx_cmd_destroy(dcs);
6062                                 return NULL;
6063                         }
6064                 }
6065                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6066                 cnt = MLX5_POOL_GET_CNT(pool, i);
6067                 cnt->pool = pool;
6068                 cnt->dcs_when_free = dcs;
6069                 *cnt_free = cnt;
6070                 return pool;
6071         }
6072         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6073         if (!dcs) {
6074                 rte_errno = ENODATA;
6075                 return NULL;
6076         }
6077         pool = flow_dv_pool_create(dev, dcs, age);
6078         if (!pool) {
6079                 mlx5_devx_cmd_destroy(dcs);
6080                 return NULL;
6081         }
6082         TAILQ_INIT(&tmp_tq);
6083         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6084                 cnt = MLX5_POOL_GET_CNT(pool, i);
6085                 cnt->pool = pool;
6086                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6087         }
6088         rte_spinlock_lock(&cmng->csl[cnt_type]);
6089         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6090         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6091         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6092         (*cnt_free)->pool = pool;
6093         return pool;
6094 }
6095
6096 /**
6097  * Allocate a flow counter.
6098  *
6099  * @param[in] dev
6100  *   Pointer to the Ethernet device structure.
6101  * @param[in] age
6102  *   Whether the counter was allocated for aging.
6103  *
6104  * @return
6105  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6106  */
6107 static uint32_t
6108 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6109 {
6110         struct mlx5_priv *priv = dev->data->dev_private;
6111         struct mlx5_flow_counter_pool *pool = NULL;
6112         struct mlx5_flow_counter *cnt_free = NULL;
6113         bool fallback = priv->sh->cmng.counter_fallback;
6114         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6115         enum mlx5_counter_type cnt_type =
6116                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6117         uint32_t cnt_idx;
6118
6119         if (!priv->sh->devx) {
6120                 rte_errno = ENOTSUP;
6121                 return 0;
6122         }
6123         /* Get free counters from container. */
6124         rte_spinlock_lock(&cmng->csl[cnt_type]);
6125         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6126         if (cnt_free)
6127                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6128         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6129         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6130                 goto err;
6131         pool = cnt_free->pool;
6132         if (fallback)
6133                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6134         /* Create a DV counter action only in the first time usage. */
6135         if (!cnt_free->action) {
6136                 uint16_t offset;
6137                 struct mlx5_devx_obj *dcs;
6138                 int ret;
6139
6140                 if (!fallback) {
6141                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6142                         dcs = pool->min_dcs;
6143                 } else {
6144                         offset = 0;
6145                         dcs = cnt_free->dcs_when_free;
6146                 }
6147                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6148                                                             &cnt_free->action);
6149                 if (ret) {
6150                         rte_errno = errno;
6151                         goto err;
6152                 }
6153         }
6154         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6155                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6156         /* Update the counter reset values. */
6157         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6158                                  &cnt_free->bytes))
6159                 goto err;
6160         if (!fallback && !priv->sh->cmng.query_thread_on)
6161                 /* Start the asynchronous batch query by the host thread. */
6162                 mlx5_set_query_alarm(priv->sh);
6163         /*
6164          * When the count action isn't shared (by ID), shared_info field is
6165          * used for indirect action API's refcnt.
6166          * When the counter action is not shared neither by ID nor by indirect
6167          * action API, shared info must be 1.
6168          */
6169         cnt_free->shared_info.refcnt = 1;
6170         return cnt_idx;
6171 err:
6172         if (cnt_free) {
6173                 cnt_free->pool = pool;
6174                 if (fallback)
6175                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6176                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6177                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6178                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6179         }
6180         return 0;
6181 }
6182
6183 /**
6184  * Get age param from counter index.
6185  *
6186  * @param[in] dev
6187  *   Pointer to the Ethernet device structure.
6188  * @param[in] counter
6189  *   Index to the counter handler.
6190  *
6191  * @return
6192  *   The aging parameter specified for the counter index.
6193  */
6194 static struct mlx5_age_param*
6195 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6196                                 uint32_t counter)
6197 {
6198         struct mlx5_flow_counter *cnt;
6199         struct mlx5_flow_counter_pool *pool = NULL;
6200
6201         flow_dv_counter_get_by_idx(dev, counter, &pool);
6202         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6203         cnt = MLX5_POOL_GET_CNT(pool, counter);
6204         return MLX5_CNT_TO_AGE(cnt);
6205 }
6206
6207 /**
6208  * Remove a flow counter from aged counter list.
6209  *
6210  * @param[in] dev
6211  *   Pointer to the Ethernet device structure.
6212  * @param[in] counter
6213  *   Index to the counter handler.
6214  * @param[in] cnt
6215  *   Pointer to the counter handler.
6216  */
6217 static void
6218 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6219                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6220 {
6221         struct mlx5_age_info *age_info;
6222         struct mlx5_age_param *age_param;
6223         struct mlx5_priv *priv = dev->data->dev_private;
6224         uint16_t expected = AGE_CANDIDATE;
6225
6226         age_info = GET_PORT_AGE_INFO(priv);
6227         age_param = flow_dv_counter_idx_get_age(dev, counter);
6228         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6229                                          AGE_FREE, false, __ATOMIC_RELAXED,
6230                                          __ATOMIC_RELAXED)) {
6231                 /**
6232                  * We need the lock even it is age timeout,
6233                  * since counter may still in process.
6234                  */
6235                 rte_spinlock_lock(&age_info->aged_sl);
6236                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6237                 rte_spinlock_unlock(&age_info->aged_sl);
6238                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6239         }
6240 }
6241
6242 /**
6243  * Release a flow counter.
6244  *
6245  * @param[in] dev
6246  *   Pointer to the Ethernet device structure.
6247  * @param[in] counter
6248  *   Index to the counter handler.
6249  */
6250 static void
6251 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6252 {
6253         struct mlx5_priv *priv = dev->data->dev_private;
6254         struct mlx5_flow_counter_pool *pool = NULL;
6255         struct mlx5_flow_counter *cnt;
6256         enum mlx5_counter_type cnt_type;
6257
6258         if (!counter)
6259                 return;
6260         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6261         MLX5_ASSERT(pool);
6262         if (pool->is_aged) {
6263                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6264         } else {
6265                 /*
6266                  * If the counter action is shared by indirect action API,
6267                  * the atomic function reduces its references counter.
6268                  * If after the reduction the action is still referenced, the
6269                  * function returns here and does not release it.
6270                  * When the counter action is not shared by
6271                  * indirect action API, shared info is 1 before the reduction,
6272                  * so this condition is failed and function doesn't return here.
6273                  */
6274                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6275                                        __ATOMIC_RELAXED))
6276                         return;
6277         }
6278         cnt->pool = pool;
6279         /*
6280          * Put the counter back to list to be updated in none fallback mode.
6281          * Currently, we are using two list alternately, while one is in query,
6282          * add the freed counter to the other list based on the pool query_gen
6283          * value. After query finishes, add counter the list to the global
6284          * container counter list. The list changes while query starts. In
6285          * this case, lock will not be needed as query callback and release
6286          * function both operate with the different list.
6287          */
6288         if (!priv->sh->cmng.counter_fallback) {
6289                 rte_spinlock_lock(&pool->csl);
6290                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6291                 rte_spinlock_unlock(&pool->csl);
6292         } else {
6293                 cnt->dcs_when_free = cnt->dcs_when_active;
6294                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6295                                            MLX5_COUNTER_TYPE_ORIGIN;
6296                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6297                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6298                                   cnt, next);
6299                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6300         }
6301 }
6302
6303 /**
6304  * Resize a meter id container.
6305  *
6306  * @param[in] dev
6307  *   Pointer to the Ethernet device structure.
6308  *
6309  * @return
6310  *   0 on success, otherwise negative errno value and rte_errno is set.
6311  */
6312 static int
6313 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6314 {
6315         struct mlx5_priv *priv = dev->data->dev_private;
6316         struct mlx5_aso_mtr_pools_mng *pools_mng =
6317                                 &priv->sh->mtrmng->pools_mng;
6318         void *old_pools = pools_mng->pools;
6319         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6320         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6321         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6322
6323         if (!pools) {
6324                 rte_errno = ENOMEM;
6325                 return -ENOMEM;
6326         }
6327         if (!pools_mng->n)
6328                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6329                         mlx5_free(pools);
6330                         return -ENOMEM;
6331                 }
6332         if (old_pools)
6333                 memcpy(pools, old_pools, pools_mng->n *
6334                                        sizeof(struct mlx5_aso_mtr_pool *));
6335         pools_mng->n = resize;
6336         pools_mng->pools = pools;
6337         if (old_pools)
6338                 mlx5_free(old_pools);
6339         return 0;
6340 }
6341
6342 /**
6343  * Prepare a new meter and/or a new meter pool.
6344  *
6345  * @param[in] dev
6346  *   Pointer to the Ethernet device structure.
6347  * @param[out] mtr_free
6348  *   Where to put the pointer of a new meter.g.
6349  *
6350  * @return
6351  *   The meter pool pointer and @mtr_free is set on success,
6352  *   NULL otherwise and rte_errno is set.
6353  */
6354 static struct mlx5_aso_mtr_pool *
6355 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6356 {
6357         struct mlx5_priv *priv = dev->data->dev_private;
6358         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6359         struct mlx5_aso_mtr_pool *pool = NULL;
6360         struct mlx5_devx_obj *dcs = NULL;
6361         uint32_t i;
6362         uint32_t log_obj_size;
6363
6364         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6365         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6366                                                       priv->sh->cdev->pdn,
6367                                                       log_obj_size);
6368         if (!dcs) {
6369                 rte_errno = ENODATA;
6370                 return NULL;
6371         }
6372         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6373         if (!pool) {
6374                 rte_errno = ENOMEM;
6375                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6376                 return NULL;
6377         }
6378         pool->devx_obj = dcs;
6379         pool->index = pools_mng->n_valid;
6380         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6381                 mlx5_free(pool);
6382                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6383                 return NULL;
6384         }
6385         pools_mng->pools[pool->index] = pool;
6386         pools_mng->n_valid++;
6387         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6388                 pool->mtrs[i].offset = i;
6389                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6390         }
6391         pool->mtrs[0].offset = 0;
6392         *mtr_free = &pool->mtrs[0];
6393         return pool;
6394 }
6395
6396 /**
6397  * Release a flow meter into pool.
6398  *
6399  * @param[in] dev
6400  *   Pointer to the Ethernet device structure.
6401  * @param[in] mtr_idx
6402  *   Index to aso flow meter.
6403  */
6404 static void
6405 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6406 {
6407         struct mlx5_priv *priv = dev->data->dev_private;
6408         struct mlx5_aso_mtr_pools_mng *pools_mng =
6409                                 &priv->sh->mtrmng->pools_mng;
6410         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6411
6412         MLX5_ASSERT(aso_mtr);
6413         rte_spinlock_lock(&pools_mng->mtrsl);
6414         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6415         aso_mtr->state = ASO_METER_FREE;
6416         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6417         rte_spinlock_unlock(&pools_mng->mtrsl);
6418 }
6419
6420 /**
6421  * Allocate a aso flow meter.
6422  *
6423  * @param[in] dev
6424  *   Pointer to the Ethernet device structure.
6425  *
6426  * @return
6427  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6428  */
6429 static uint32_t
6430 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6431 {
6432         struct mlx5_priv *priv = dev->data->dev_private;
6433         struct mlx5_aso_mtr *mtr_free = NULL;
6434         struct mlx5_aso_mtr_pools_mng *pools_mng =
6435                                 &priv->sh->mtrmng->pools_mng;
6436         struct mlx5_aso_mtr_pool *pool;
6437         uint32_t mtr_idx = 0;
6438
6439         if (!priv->sh->devx) {
6440                 rte_errno = ENOTSUP;
6441                 return 0;
6442         }
6443         /* Allocate the flow meter memory. */
6444         /* Get free meters from management. */
6445         rte_spinlock_lock(&pools_mng->mtrsl);
6446         mtr_free = LIST_FIRST(&pools_mng->meters);
6447         if (mtr_free)
6448                 LIST_REMOVE(mtr_free, next);
6449         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6450                 rte_spinlock_unlock(&pools_mng->mtrsl);
6451                 return 0;
6452         }
6453         mtr_free->state = ASO_METER_WAIT;
6454         rte_spinlock_unlock(&pools_mng->mtrsl);
6455         pool = container_of(mtr_free,
6456                         struct mlx5_aso_mtr_pool,
6457                         mtrs[mtr_free->offset]);
6458         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6459         if (!mtr_free->fm.meter_action) {
6460 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6461                 struct rte_flow_error error;
6462                 uint8_t reg_id;
6463
6464                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6465                 mtr_free->fm.meter_action =
6466                         mlx5_glue->dv_create_flow_action_aso
6467                                                 (priv->sh->rx_domain,
6468                                                  pool->devx_obj->obj,
6469                                                  mtr_free->offset,
6470                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6471                                                  reg_id - REG_C_0);
6472 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6473                 if (!mtr_free->fm.meter_action) {
6474                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6475                         return 0;
6476                 }
6477         }
6478         return mtr_idx;
6479 }
6480
6481 /**
6482  * Verify the @p attributes will be correctly understood by the NIC and store
6483  * them in the @p flow if everything is correct.
6484  *
6485  * @param[in] dev
6486  *   Pointer to dev struct.
6487  * @param[in] attributes
6488  *   Pointer to flow attributes
6489  * @param[in] external
6490  *   This flow rule is created by request external to PMD.
6491  * @param[out] error
6492  *   Pointer to error structure.
6493  *
6494  * @return
6495  *   - 0 on success and non root table.
6496  *   - 1 on success and root table.
6497  *   - a negative errno value otherwise and rte_errno is set.
6498  */
6499 static int
6500 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6501                             const struct mlx5_flow_tunnel *tunnel,
6502                             const struct rte_flow_attr *attributes,
6503                             const struct flow_grp_info *grp_info,
6504                             struct rte_flow_error *error)
6505 {
6506         struct mlx5_priv *priv = dev->data->dev_private;
6507         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6508         int ret = 0;
6509
6510 #ifndef HAVE_MLX5DV_DR
6511         RTE_SET_USED(tunnel);
6512         RTE_SET_USED(grp_info);
6513         if (attributes->group)
6514                 return rte_flow_error_set(error, ENOTSUP,
6515                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6516                                           NULL,
6517                                           "groups are not supported");
6518 #else
6519         uint32_t table = 0;
6520
6521         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6522                                        grp_info, error);
6523         if (ret)
6524                 return ret;
6525         if (!table)
6526                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6527 #endif
6528         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6529             attributes->priority > lowest_priority)
6530                 return rte_flow_error_set(error, ENOTSUP,
6531                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6532                                           NULL,
6533                                           "priority out of range");
6534         if (attributes->transfer) {
6535                 if (!priv->config.dv_esw_en)
6536                         return rte_flow_error_set
6537                                 (error, ENOTSUP,
6538                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6539                                  "E-Switch dr is not supported");
6540                 if (!(priv->representor || priv->master))
6541                         return rte_flow_error_set
6542                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6543                                  NULL, "E-Switch configuration can only be"
6544                                  " done by a master or a representor device");
6545                 if (attributes->egress)
6546                         return rte_flow_error_set
6547                                 (error, ENOTSUP,
6548                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6549                                  "egress is not supported");
6550         }
6551         if (!(attributes->egress ^ attributes->ingress))
6552                 return rte_flow_error_set(error, ENOTSUP,
6553                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6554                                           "must specify exactly one of "
6555                                           "ingress or egress");
6556         return ret;
6557 }
6558
6559 static int
6560 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6561                         int64_t pattern_flags, uint64_t l3_flags,
6562                         uint64_t l4_flags, uint64_t ip4_flag,
6563                         struct rte_flow_error *error)
6564 {
6565         if (mask->l3_ok && !(pattern_flags & l3_flags))
6566                 return rte_flow_error_set(error, EINVAL,
6567                                           RTE_FLOW_ERROR_TYPE_ITEM,
6568                                           NULL, "missing L3 protocol");
6569
6570         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6571                 return rte_flow_error_set(error, EINVAL,
6572                                           RTE_FLOW_ERROR_TYPE_ITEM,
6573                                           NULL, "missing IPv4 protocol");
6574
6575         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6576                 return rte_flow_error_set(error, EINVAL,
6577                                           RTE_FLOW_ERROR_TYPE_ITEM,
6578                                           NULL, "missing L4 protocol");
6579
6580         return 0;
6581 }
6582
6583 static int
6584 flow_dv_validate_item_integrity_post(const struct
6585                                      rte_flow_item *integrity_items[2],
6586                                      int64_t pattern_flags,
6587                                      struct rte_flow_error *error)
6588 {
6589         const struct rte_flow_item_integrity *mask;
6590         int ret;
6591
6592         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6593                 mask = (typeof(mask))integrity_items[0]->mask;
6594                 ret = validate_integrity_bits(mask, pattern_flags,
6595                                               MLX5_FLOW_LAYER_OUTER_L3,
6596                                               MLX5_FLOW_LAYER_OUTER_L4,
6597                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6598                                               error);
6599                 if (ret)
6600                         return ret;
6601         }
6602         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6603                 mask = (typeof(mask))integrity_items[1]->mask;
6604                 ret = validate_integrity_bits(mask, pattern_flags,
6605                                               MLX5_FLOW_LAYER_INNER_L3,
6606                                               MLX5_FLOW_LAYER_INNER_L4,
6607                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6608                                               error);
6609                 if (ret)
6610                         return ret;
6611         }
6612         return 0;
6613 }
6614
6615 static int
6616 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6617                                 const struct rte_flow_item *integrity_item,
6618                                 uint64_t pattern_flags, uint64_t *last_item,
6619                                 const struct rte_flow_item *integrity_items[2],
6620                                 struct rte_flow_error *error)
6621 {
6622         struct mlx5_priv *priv = dev->data->dev_private;
6623         const struct rte_flow_item_integrity *mask = (typeof(mask))
6624                                                      integrity_item->mask;
6625         const struct rte_flow_item_integrity *spec = (typeof(spec))
6626                                                      integrity_item->spec;
6627
6628         if (!priv->config.hca_attr.pkt_integrity_match)
6629                 return rte_flow_error_set(error, ENOTSUP,
6630                                           RTE_FLOW_ERROR_TYPE_ITEM,
6631                                           integrity_item,
6632                                           "packet integrity integrity_item not supported");
6633         if (!spec)
6634                 return rte_flow_error_set(error, ENOTSUP,
6635                                           RTE_FLOW_ERROR_TYPE_ITEM,
6636                                           integrity_item,
6637                                           "no spec for integrity item");
6638         if (!mask)
6639                 mask = &rte_flow_item_integrity_mask;
6640         if (!mlx5_validate_integrity_item(mask))
6641                 return rte_flow_error_set(error, ENOTSUP,
6642                                           RTE_FLOW_ERROR_TYPE_ITEM,
6643                                           integrity_item,
6644                                           "unsupported integrity filter");
6645         if (spec->level > 1) {
6646                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6647                         return rte_flow_error_set
6648                                 (error, ENOTSUP,
6649                                  RTE_FLOW_ERROR_TYPE_ITEM,
6650                                  NULL, "multiple inner integrity items not supported");
6651                 integrity_items[1] = integrity_item;
6652                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6653         } else {
6654                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6655                         return rte_flow_error_set
6656                                 (error, ENOTSUP,
6657                                  RTE_FLOW_ERROR_TYPE_ITEM,
6658                                  NULL, "multiple outer integrity items not supported");
6659                 integrity_items[0] = integrity_item;
6660                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6661         }
6662         return 0;
6663 }
6664
6665 /**
6666  * Internal validation function. For validating both actions and items.
6667  *
6668  * @param[in] dev
6669  *   Pointer to the rte_eth_dev structure.
6670  * @param[in] attr
6671  *   Pointer to the flow attributes.
6672  * @param[in] items
6673  *   Pointer to the list of items.
6674  * @param[in] actions
6675  *   Pointer to the list of actions.
6676  * @param[in] external
6677  *   This flow rule is created by request external to PMD.
6678  * @param[in] hairpin
6679  *   Number of hairpin TX actions, 0 means classic flow.
6680  * @param[out] error
6681  *   Pointer to the error structure.
6682  *
6683  * @return
6684  *   0 on success, a negative errno value otherwise and rte_errno is set.
6685  */
6686 static int
6687 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6688                  const struct rte_flow_item items[],
6689                  const struct rte_flow_action actions[],
6690                  bool external, int hairpin, struct rte_flow_error *error)
6691 {
6692         int ret;
6693         uint64_t action_flags = 0;
6694         uint64_t item_flags = 0;
6695         uint64_t last_item = 0;
6696         uint8_t next_protocol = 0xff;
6697         uint16_t ether_type = 0;
6698         int actions_n = 0;
6699         uint8_t item_ipv6_proto = 0;
6700         int fdb_mirror_limit = 0;
6701         int modify_after_mirror = 0;
6702         const struct rte_flow_item *geneve_item = NULL;
6703         const struct rte_flow_item *gre_item = NULL;
6704         const struct rte_flow_item *gtp_item = NULL;
6705         const struct rte_flow_action_raw_decap *decap;
6706         const struct rte_flow_action_raw_encap *encap;
6707         const struct rte_flow_action_rss *rss = NULL;
6708         const struct rte_flow_action_rss *sample_rss = NULL;
6709         const struct rte_flow_action_count *sample_count = NULL;
6710         const struct rte_flow_item_tcp nic_tcp_mask = {
6711                 .hdr = {
6712                         .tcp_flags = 0xFF,
6713                         .src_port = RTE_BE16(UINT16_MAX),
6714                         .dst_port = RTE_BE16(UINT16_MAX),
6715                 }
6716         };
6717         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6718                 .hdr = {
6719                         .src_addr =
6720                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6721                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6722                         .dst_addr =
6723                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6724                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6725                         .vtc_flow = RTE_BE32(0xffffffff),
6726                         .proto = 0xff,
6727                         .hop_limits = 0xff,
6728                 },
6729                 .has_frag_ext = 1,
6730         };
6731         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6732                 .hdr = {
6733                         .common = {
6734                                 .u32 =
6735                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6736                                         .type = 0xFF,
6737                                         }).u32),
6738                         },
6739                         .dummy[0] = 0xffffffff,
6740                 },
6741         };
6742         struct mlx5_priv *priv = dev->data->dev_private;
6743         struct mlx5_dev_config *dev_conf = &priv->config;
6744         uint16_t queue_index = 0xFFFF;
6745         const struct rte_flow_item_vlan *vlan_m = NULL;
6746         uint32_t rw_act_num = 0;
6747         uint64_t is_root;
6748         const struct mlx5_flow_tunnel *tunnel;
6749         enum mlx5_tof_rule_type tof_rule_type;
6750         struct flow_grp_info grp_info = {
6751                 .external = !!external,
6752                 .transfer = !!attr->transfer,
6753                 .fdb_def_rule = !!priv->fdb_def_rule,
6754                 .std_tbl_fix = true,
6755         };
6756         const struct rte_eth_hairpin_conf *conf;
6757         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6758         const struct rte_flow_item *port_id_item = NULL;
6759         bool def_policy = false;
6760         uint16_t udp_dport = 0;
6761
6762         if (items == NULL)
6763                 return -1;
6764         tunnel = is_tunnel_offload_active(dev) ?
6765                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6766         if (tunnel) {
6767                 if (priv->representor)
6768                         return rte_flow_error_set
6769                                 (error, ENOTSUP,
6770                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6771                                  NULL, "decap not supported for VF representor");
6772                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6773                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6774                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6775                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6776                                         MLX5_FLOW_ACTION_DECAP;
6777                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6778                                         (dev, attr, tunnel, tof_rule_type);
6779         }
6780         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6781         if (ret < 0)
6782                 return ret;
6783         is_root = (uint64_t)ret;
6784         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6785                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6786                 int type = items->type;
6787
6788                 if (!mlx5_flow_os_item_supported(type))
6789                         return rte_flow_error_set(error, ENOTSUP,
6790                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6791                                                   NULL, "item not supported");
6792                 switch (type) {
6793                 case RTE_FLOW_ITEM_TYPE_VOID:
6794                         break;
6795                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6796                         ret = flow_dv_validate_item_port_id
6797                                         (dev, items, attr, item_flags, error);
6798                         if (ret < 0)
6799                                 return ret;
6800                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6801                         port_id_item = items;
6802                         break;
6803                 case RTE_FLOW_ITEM_TYPE_ETH:
6804                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6805                                                           true, error);
6806                         if (ret < 0)
6807                                 return ret;
6808                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6809                                              MLX5_FLOW_LAYER_OUTER_L2;
6810                         if (items->mask != NULL && items->spec != NULL) {
6811                                 ether_type =
6812                                         ((const struct rte_flow_item_eth *)
6813                                          items->spec)->type;
6814                                 ether_type &=
6815                                         ((const struct rte_flow_item_eth *)
6816                                          items->mask)->type;
6817                                 ether_type = rte_be_to_cpu_16(ether_type);
6818                         } else {
6819                                 ether_type = 0;
6820                         }
6821                         break;
6822                 case RTE_FLOW_ITEM_TYPE_VLAN:
6823                         ret = flow_dv_validate_item_vlan(items, item_flags,
6824                                                          dev, error);
6825                         if (ret < 0)
6826                                 return ret;
6827                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6828                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6829                         if (items->mask != NULL && items->spec != NULL) {
6830                                 ether_type =
6831                                         ((const struct rte_flow_item_vlan *)
6832                                          items->spec)->inner_type;
6833                                 ether_type &=
6834                                         ((const struct rte_flow_item_vlan *)
6835                                          items->mask)->inner_type;
6836                                 ether_type = rte_be_to_cpu_16(ether_type);
6837                         } else {
6838                                 ether_type = 0;
6839                         }
6840                         /* Store outer VLAN mask for of_push_vlan action. */
6841                         if (!tunnel)
6842                                 vlan_m = items->mask;
6843                         break;
6844                 case RTE_FLOW_ITEM_TYPE_IPV4:
6845                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6846                                                   &item_flags, &tunnel);
6847                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
6848                                                          last_item, ether_type,
6849                                                          error);
6850                         if (ret < 0)
6851                                 return ret;
6852                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6853                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6854                         if (items->mask != NULL &&
6855                             ((const struct rte_flow_item_ipv4 *)
6856                              items->mask)->hdr.next_proto_id) {
6857                                 next_protocol =
6858                                         ((const struct rte_flow_item_ipv4 *)
6859                                          (items->spec))->hdr.next_proto_id;
6860                                 next_protocol &=
6861                                         ((const struct rte_flow_item_ipv4 *)
6862                                          (items->mask))->hdr.next_proto_id;
6863                         } else {
6864                                 /* Reset for inner layer. */
6865                                 next_protocol = 0xff;
6866                         }
6867                         break;
6868                 case RTE_FLOW_ITEM_TYPE_IPV6:
6869                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6870                                                   &item_flags, &tunnel);
6871                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6872                                                            last_item,
6873                                                            ether_type,
6874                                                            &nic_ipv6_mask,
6875                                                            error);
6876                         if (ret < 0)
6877                                 return ret;
6878                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6879                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6880                         if (items->mask != NULL &&
6881                             ((const struct rte_flow_item_ipv6 *)
6882                              items->mask)->hdr.proto) {
6883                                 item_ipv6_proto =
6884                                         ((const struct rte_flow_item_ipv6 *)
6885                                          items->spec)->hdr.proto;
6886                                 next_protocol =
6887                                         ((const struct rte_flow_item_ipv6 *)
6888                                          items->spec)->hdr.proto;
6889                                 next_protocol &=
6890                                         ((const struct rte_flow_item_ipv6 *)
6891                                          items->mask)->hdr.proto;
6892                         } else {
6893                                 /* Reset for inner layer. */
6894                                 next_protocol = 0xff;
6895                         }
6896                         break;
6897                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6898                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6899                                                                   item_flags,
6900                                                                   error);
6901                         if (ret < 0)
6902                                 return ret;
6903                         last_item = tunnel ?
6904                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6905                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6906                         if (items->mask != NULL &&
6907                             ((const struct rte_flow_item_ipv6_frag_ext *)
6908                              items->mask)->hdr.next_header) {
6909                                 next_protocol =
6910                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6911                                  items->spec)->hdr.next_header;
6912                                 next_protocol &=
6913                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6914                                  items->mask)->hdr.next_header;
6915                         } else {
6916                                 /* Reset for inner layer. */
6917                                 next_protocol = 0xff;
6918                         }
6919                         break;
6920                 case RTE_FLOW_ITEM_TYPE_TCP:
6921                         ret = mlx5_flow_validate_item_tcp
6922                                                 (items, item_flags,
6923                                                  next_protocol,
6924                                                  &nic_tcp_mask,
6925                                                  error);
6926                         if (ret < 0)
6927                                 return ret;
6928                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6929                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6930                         break;
6931                 case RTE_FLOW_ITEM_TYPE_UDP:
6932                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6933                                                           next_protocol,
6934                                                           error);
6935                         const struct rte_flow_item_udp *spec = items->spec;
6936                         const struct rte_flow_item_udp *mask = items->mask;
6937                         if (!mask)
6938                                 mask = &rte_flow_item_udp_mask;
6939                         if (spec != NULL)
6940                                 udp_dport = rte_be_to_cpu_16
6941                                                 (spec->hdr.dst_port &
6942                                                  mask->hdr.dst_port);
6943                         if (ret < 0)
6944                                 return ret;
6945                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6946                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6947                         break;
6948                 case RTE_FLOW_ITEM_TYPE_GRE:
6949                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6950                                                           next_protocol, error);
6951                         if (ret < 0)
6952                                 return ret;
6953                         gre_item = items;
6954                         last_item = MLX5_FLOW_LAYER_GRE;
6955                         break;
6956                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6957                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6958                                                             next_protocol,
6959                                                             error);
6960                         if (ret < 0)
6961                                 return ret;
6962                         last_item = MLX5_FLOW_LAYER_NVGRE;
6963                         break;
6964                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6965                         ret = mlx5_flow_validate_item_gre_key
6966                                 (items, item_flags, gre_item, error);
6967                         if (ret < 0)
6968                                 return ret;
6969                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6970                         break;
6971                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6972                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
6973                                                             items, item_flags,
6974                                                             attr, error);
6975                         if (ret < 0)
6976                                 return ret;
6977                         last_item = MLX5_FLOW_LAYER_VXLAN;
6978                         break;
6979                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6980                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6981                                                                 item_flags, dev,
6982                                                                 error);
6983                         if (ret < 0)
6984                                 return ret;
6985                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6986                         break;
6987                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6988                         ret = mlx5_flow_validate_item_geneve(items,
6989                                                              item_flags, dev,
6990                                                              error);
6991                         if (ret < 0)
6992                                 return ret;
6993                         geneve_item = items;
6994                         last_item = MLX5_FLOW_LAYER_GENEVE;
6995                         break;
6996                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6997                         ret = mlx5_flow_validate_item_geneve_opt(items,
6998                                                                  last_item,
6999                                                                  geneve_item,
7000                                                                  dev,
7001                                                                  error);
7002                         if (ret < 0)
7003                                 return ret;
7004                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7005                         break;
7006                 case RTE_FLOW_ITEM_TYPE_MPLS:
7007                         ret = mlx5_flow_validate_item_mpls(dev, items,
7008                                                            item_flags,
7009                                                            last_item, error);
7010                         if (ret < 0)
7011                                 return ret;
7012                         last_item = MLX5_FLOW_LAYER_MPLS;
7013                         break;
7014
7015                 case RTE_FLOW_ITEM_TYPE_MARK:
7016                         ret = flow_dv_validate_item_mark(dev, items, attr,
7017                                                          error);
7018                         if (ret < 0)
7019                                 return ret;
7020                         last_item = MLX5_FLOW_ITEM_MARK;
7021                         break;
7022                 case RTE_FLOW_ITEM_TYPE_META:
7023                         ret = flow_dv_validate_item_meta(dev, items, attr,
7024                                                          error);
7025                         if (ret < 0)
7026                                 return ret;
7027                         last_item = MLX5_FLOW_ITEM_METADATA;
7028                         break;
7029                 case RTE_FLOW_ITEM_TYPE_ICMP:
7030                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7031                                                            next_protocol,
7032                                                            error);
7033                         if (ret < 0)
7034                                 return ret;
7035                         last_item = MLX5_FLOW_LAYER_ICMP;
7036                         break;
7037                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7038                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7039                                                             next_protocol,
7040                                                             error);
7041                         if (ret < 0)
7042                                 return ret;
7043                         item_ipv6_proto = IPPROTO_ICMPV6;
7044                         last_item = MLX5_FLOW_LAYER_ICMP6;
7045                         break;
7046                 case RTE_FLOW_ITEM_TYPE_TAG:
7047                         ret = flow_dv_validate_item_tag(dev, items,
7048                                                         attr, error);
7049                         if (ret < 0)
7050                                 return ret;
7051                         last_item = MLX5_FLOW_ITEM_TAG;
7052                         break;
7053                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7054                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7055                         break;
7056                 case RTE_FLOW_ITEM_TYPE_GTP:
7057                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7058                                                         error);
7059                         if (ret < 0)
7060                                 return ret;
7061                         gtp_item = items;
7062                         last_item = MLX5_FLOW_LAYER_GTP;
7063                         break;
7064                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7065                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7066                                                             gtp_item, attr,
7067                                                             error);
7068                         if (ret < 0)
7069                                 return ret;
7070                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7071                         break;
7072                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7073                         /* Capacity will be checked in the translate stage. */
7074                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7075                                                             last_item,
7076                                                             ether_type,
7077                                                             &nic_ecpri_mask,
7078                                                             error);
7079                         if (ret < 0)
7080                                 return ret;
7081                         last_item = MLX5_FLOW_LAYER_ECPRI;
7082                         break;
7083                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7084                         ret = flow_dv_validate_item_integrity(dev, items,
7085                                                               item_flags,
7086                                                               &last_item,
7087                                                               integrity_items,
7088                                                               error);
7089                         if (ret < 0)
7090                                 return ret;
7091                         break;
7092                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7093                         ret = flow_dv_validate_item_aso_ct(dev, items,
7094                                                            &item_flags, error);
7095                         if (ret < 0)
7096                                 return ret;
7097                         break;
7098                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7099                         /* tunnel offload item was processed before
7100                          * list it here as a supported type
7101                          */
7102                         break;
7103                 default:
7104                         return rte_flow_error_set(error, ENOTSUP,
7105                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7106                                                   NULL, "item not supported");
7107                 }
7108                 item_flags |= last_item;
7109         }
7110         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7111                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7112                                                            item_flags, error);
7113                 if (ret)
7114                         return ret;
7115         }
7116         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7117                 int type = actions->type;
7118                 bool shared_count = false;
7119
7120                 if (!mlx5_flow_os_action_supported(type))
7121                         return rte_flow_error_set(error, ENOTSUP,
7122                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7123                                                   actions,
7124                                                   "action not supported");
7125                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7126                         return rte_flow_error_set(error, ENOTSUP,
7127                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7128                                                   actions, "too many actions");
7129                 if (action_flags &
7130                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7131                         return rte_flow_error_set(error, ENOTSUP,
7132                                 RTE_FLOW_ERROR_TYPE_ACTION,
7133                                 NULL, "meter action with policy "
7134                                 "must be the last action");
7135                 switch (type) {
7136                 case RTE_FLOW_ACTION_TYPE_VOID:
7137                         break;
7138                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7139                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7140                         ret = flow_dv_validate_action_port_id(dev,
7141                                                               action_flags,
7142                                                               actions,
7143                                                               attr,
7144                                                               error);
7145                         if (ret)
7146                                 return ret;
7147                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7148                         ++actions_n;
7149                         break;
7150                 case RTE_FLOW_ACTION_TYPE_FLAG:
7151                         ret = flow_dv_validate_action_flag(dev, action_flags,
7152                                                            attr, error);
7153                         if (ret < 0)
7154                                 return ret;
7155                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7156                                 /* Count all modify-header actions as one. */
7157                                 if (!(action_flags &
7158                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7159                                         ++actions_n;
7160                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7161                                                 MLX5_FLOW_ACTION_MARK_EXT;
7162                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7163                                         modify_after_mirror = 1;
7164
7165                         } else {
7166                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7167                                 ++actions_n;
7168                         }
7169                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7170                         break;
7171                 case RTE_FLOW_ACTION_TYPE_MARK:
7172                         ret = flow_dv_validate_action_mark(dev, actions,
7173                                                            action_flags,
7174                                                            attr, error);
7175                         if (ret < 0)
7176                                 return ret;
7177                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7178                                 /* Count all modify-header actions as one. */
7179                                 if (!(action_flags &
7180                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7181                                         ++actions_n;
7182                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7183                                                 MLX5_FLOW_ACTION_MARK_EXT;
7184                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7185                                         modify_after_mirror = 1;
7186                         } else {
7187                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7188                                 ++actions_n;
7189                         }
7190                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7191                         break;
7192                 case RTE_FLOW_ACTION_TYPE_SET_META:
7193                         ret = flow_dv_validate_action_set_meta(dev, actions,
7194                                                                action_flags,
7195                                                                attr, error);
7196                         if (ret < 0)
7197                                 return ret;
7198                         /* Count all modify-header actions as one action. */
7199                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7200                                 ++actions_n;
7201                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7202                                 modify_after_mirror = 1;
7203                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7204                         rw_act_num += MLX5_ACT_NUM_SET_META;
7205                         break;
7206                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7207                         ret = flow_dv_validate_action_set_tag(dev, actions,
7208                                                               action_flags,
7209                                                               attr, error);
7210                         if (ret < 0)
7211                                 return ret;
7212                         /* Count all modify-header actions as one action. */
7213                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7214                                 ++actions_n;
7215                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7216                                 modify_after_mirror = 1;
7217                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7218                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7219                         break;
7220                 case RTE_FLOW_ACTION_TYPE_DROP:
7221                         ret = mlx5_flow_validate_action_drop(action_flags,
7222                                                              attr, error);
7223                         if (ret < 0)
7224                                 return ret;
7225                         action_flags |= MLX5_FLOW_ACTION_DROP;
7226                         ++actions_n;
7227                         break;
7228                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7229                         ret = mlx5_flow_validate_action_queue(actions,
7230                                                               action_flags, dev,
7231                                                               attr, error);
7232                         if (ret < 0)
7233                                 return ret;
7234                         queue_index = ((const struct rte_flow_action_queue *)
7235                                                         (actions->conf))->index;
7236                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7237                         ++actions_n;
7238                         break;
7239                 case RTE_FLOW_ACTION_TYPE_RSS:
7240                         rss = actions->conf;
7241                         ret = mlx5_flow_validate_action_rss(actions,
7242                                                             action_flags, dev,
7243                                                             attr, item_flags,
7244                                                             error);
7245                         if (ret < 0)
7246                                 return ret;
7247                         if (rss && sample_rss &&
7248                             (sample_rss->level != rss->level ||
7249                             sample_rss->types != rss->types))
7250                                 return rte_flow_error_set(error, ENOTSUP,
7251                                         RTE_FLOW_ERROR_TYPE_ACTION,
7252                                         NULL,
7253                                         "Can't use the different RSS types "
7254                                         "or level in the same flow");
7255                         if (rss != NULL && rss->queue_num)
7256                                 queue_index = rss->queue[0];
7257                         action_flags |= MLX5_FLOW_ACTION_RSS;
7258                         ++actions_n;
7259                         break;
7260                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7261                         ret =
7262                         mlx5_flow_validate_action_default_miss(action_flags,
7263                                         attr, error);
7264                         if (ret < 0)
7265                                 return ret;
7266                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7267                         ++actions_n;
7268                         break;
7269                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7270                         shared_count = true;
7271                         /* fall-through. */
7272                 case RTE_FLOW_ACTION_TYPE_COUNT:
7273                         ret = flow_dv_validate_action_count(dev, shared_count,
7274                                                             action_flags,
7275                                                             error);
7276                         if (ret < 0)
7277                                 return ret;
7278                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7279                         ++actions_n;
7280                         break;
7281                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7282                         if (flow_dv_validate_action_pop_vlan(dev,
7283                                                              action_flags,
7284                                                              actions,
7285                                                              item_flags, attr,
7286                                                              error))
7287                                 return -rte_errno;
7288                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7289                                 modify_after_mirror = 1;
7290                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7291                         ++actions_n;
7292                         break;
7293                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7294                         ret = flow_dv_validate_action_push_vlan(dev,
7295                                                                 action_flags,
7296                                                                 vlan_m,
7297                                                                 actions, attr,
7298                                                                 error);
7299                         if (ret < 0)
7300                                 return ret;
7301                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7302                                 modify_after_mirror = 1;
7303                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7304                         ++actions_n;
7305                         break;
7306                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7307                         ret = flow_dv_validate_action_set_vlan_pcp
7308                                                 (action_flags, actions, error);
7309                         if (ret < 0)
7310                                 return ret;
7311                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7312                                 modify_after_mirror = 1;
7313                         /* Count PCP with push_vlan command. */
7314                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7315                         break;
7316                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7317                         ret = flow_dv_validate_action_set_vlan_vid
7318                                                 (item_flags, action_flags,
7319                                                  actions, error);
7320                         if (ret < 0)
7321                                 return ret;
7322                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7323                                 modify_after_mirror = 1;
7324                         /* Count VID with push_vlan command. */
7325                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7326                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7327                         break;
7328                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7329                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7330                         ret = flow_dv_validate_action_l2_encap(dev,
7331                                                                action_flags,
7332                                                                actions, attr,
7333                                                                error);
7334                         if (ret < 0)
7335                                 return ret;
7336                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7337                         ++actions_n;
7338                         break;
7339                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7340                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7341                         ret = flow_dv_validate_action_decap(dev, action_flags,
7342                                                             actions, item_flags,
7343                                                             attr, error);
7344                         if (ret < 0)
7345                                 return ret;
7346                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7347                                 modify_after_mirror = 1;
7348                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7349                         ++actions_n;
7350                         break;
7351                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7352                         ret = flow_dv_validate_action_raw_encap_decap
7353                                 (dev, NULL, actions->conf, attr, &action_flags,
7354                                  &actions_n, actions, item_flags, error);
7355                         if (ret < 0)
7356                                 return ret;
7357                         break;
7358                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7359                         decap = actions->conf;
7360                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7361                                 ;
7362                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7363                                 encap = NULL;
7364                                 actions--;
7365                         } else {
7366                                 encap = actions->conf;
7367                         }
7368                         ret = flow_dv_validate_action_raw_encap_decap
7369                                            (dev,
7370                                             decap ? decap : &empty_decap, encap,
7371                                             attr, &action_flags, &actions_n,
7372                                             actions, item_flags, error);
7373                         if (ret < 0)
7374                                 return ret;
7375                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7376                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7377                                 modify_after_mirror = 1;
7378                         break;
7379                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7380                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7381                         ret = flow_dv_validate_action_modify_mac(action_flags,
7382                                                                  actions,
7383                                                                  item_flags,
7384                                                                  error);
7385                         if (ret < 0)
7386                                 return ret;
7387                         /* Count all modify-header actions as one action. */
7388                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7389                                 ++actions_n;
7390                         action_flags |= actions->type ==
7391                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7392                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7393                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7394                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7395                                 modify_after_mirror = 1;
7396                         /*
7397                          * Even if the source and destination MAC addresses have
7398                          * overlap in the header with 4B alignment, the convert
7399                          * function will handle them separately and 4 SW actions
7400                          * will be created. And 2 actions will be added each
7401                          * time no matter how many bytes of address will be set.
7402                          */
7403                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7404                         break;
7405                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7406                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7407                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7408                                                                   actions,
7409                                                                   item_flags,
7410                                                                   error);
7411                         if (ret < 0)
7412                                 return ret;
7413                         /* Count all modify-header actions as one action. */
7414                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7415                                 ++actions_n;
7416                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7417                                 modify_after_mirror = 1;
7418                         action_flags |= actions->type ==
7419                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7420                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7421                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7422                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7423                         break;
7424                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7425                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7426                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7427                                                                   actions,
7428                                                                   item_flags,
7429                                                                   error);
7430                         if (ret < 0)
7431                                 return ret;
7432                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7433                                 return rte_flow_error_set(error, ENOTSUP,
7434                                         RTE_FLOW_ERROR_TYPE_ACTION,
7435                                         actions,
7436                                         "Can't change header "
7437                                         "with ICMPv6 proto");
7438                         /* Count all modify-header actions as one action. */
7439                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7440                                 ++actions_n;
7441                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7442                                 modify_after_mirror = 1;
7443                         action_flags |= actions->type ==
7444                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7445                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7446                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7447                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7448                         break;
7449                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7450                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7451                         ret = flow_dv_validate_action_modify_tp(action_flags,
7452                                                                 actions,
7453                                                                 item_flags,
7454                                                                 error);
7455                         if (ret < 0)
7456                                 return ret;
7457                         /* Count all modify-header actions as one action. */
7458                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7459                                 ++actions_n;
7460                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7461                                 modify_after_mirror = 1;
7462                         action_flags |= actions->type ==
7463                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7464                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7465                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7466                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7467                         break;
7468                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7469                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7470                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7471                                                                  actions,
7472                                                                  item_flags,
7473                                                                  error);
7474                         if (ret < 0)
7475                                 return ret;
7476                         /* Count all modify-header actions as one action. */
7477                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7478                                 ++actions_n;
7479                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7480                                 modify_after_mirror = 1;
7481                         action_flags |= actions->type ==
7482                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7483                                                 MLX5_FLOW_ACTION_SET_TTL :
7484                                                 MLX5_FLOW_ACTION_DEC_TTL;
7485                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7486                         break;
7487                 case RTE_FLOW_ACTION_TYPE_JUMP:
7488                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7489                                                            action_flags,
7490                                                            attr, external,
7491                                                            error);
7492                         if (ret)
7493                                 return ret;
7494                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7495                             fdb_mirror_limit)
7496                                 return rte_flow_error_set(error, EINVAL,
7497                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7498                                                   NULL,
7499                                                   "sample and jump action combination is not supported");
7500                         ++actions_n;
7501                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7502                         break;
7503                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7504                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7505                         ret = flow_dv_validate_action_modify_tcp_seq
7506                                                                 (action_flags,
7507                                                                  actions,
7508                                                                  item_flags,
7509                                                                  error);
7510                         if (ret < 0)
7511                                 return ret;
7512                         /* Count all modify-header actions as one action. */
7513                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7514                                 ++actions_n;
7515                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7516                                 modify_after_mirror = 1;
7517                         action_flags |= actions->type ==
7518                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7519                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7520                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7521                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7522                         break;
7523                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7524                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7525                         ret = flow_dv_validate_action_modify_tcp_ack
7526                                                                 (action_flags,
7527                                                                  actions,
7528                                                                  item_flags,
7529                                                                  error);
7530                         if (ret < 0)
7531                                 return ret;
7532                         /* Count all modify-header actions as one action. */
7533                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7534                                 ++actions_n;
7535                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7536                                 modify_after_mirror = 1;
7537                         action_flags |= actions->type ==
7538                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7539                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7540                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7541                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7542                         break;
7543                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7544                         break;
7545                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7546                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7547                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7548                         break;
7549                 case RTE_FLOW_ACTION_TYPE_METER:
7550                         ret = mlx5_flow_validate_action_meter(dev,
7551                                                               action_flags,
7552                                                               actions, attr,
7553                                                               port_id_item,
7554                                                               &def_policy,
7555                                                               error);
7556                         if (ret < 0)
7557                                 return ret;
7558                         action_flags |= MLX5_FLOW_ACTION_METER;
7559                         if (!def_policy)
7560                                 action_flags |=
7561                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7562                         ++actions_n;
7563                         /* Meter action will add one more TAG action. */
7564                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7565                         break;
7566                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7567                         if (!attr->transfer && !attr->group)
7568                                 return rte_flow_error_set(error, ENOTSUP,
7569                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7570                                                                            NULL,
7571                           "Shared ASO age action is not supported for group 0");
7572                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7573                                 return rte_flow_error_set
7574                                                   (error, EINVAL,
7575                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7576                                                    NULL,
7577                                                    "duplicate age actions set");
7578                         action_flags |= MLX5_FLOW_ACTION_AGE;
7579                         ++actions_n;
7580                         break;
7581                 case RTE_FLOW_ACTION_TYPE_AGE:
7582                         ret = flow_dv_validate_action_age(action_flags,
7583                                                           actions, dev,
7584                                                           error);
7585                         if (ret < 0)
7586                                 return ret;
7587                         /*
7588                          * Validate the regular AGE action (using counter)
7589                          * mutual exclusion with share counter actions.
7590                          */
7591                         if (!priv->sh->flow_hit_aso_en) {
7592                                 if (shared_count)
7593                                         return rte_flow_error_set
7594                                                 (error, EINVAL,
7595                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7596                                                 NULL,
7597                                                 "old age and shared count combination is not supported");
7598                                 if (sample_count)
7599                                         return rte_flow_error_set
7600                                                 (error, EINVAL,
7601                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7602                                                 NULL,
7603                                                 "old age action and count must be in the same sub flow");
7604                         }
7605                         action_flags |= MLX5_FLOW_ACTION_AGE;
7606                         ++actions_n;
7607                         break;
7608                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7609                         ret = flow_dv_validate_action_modify_ipv4_dscp
7610                                                          (action_flags,
7611                                                           actions,
7612                                                           item_flags,
7613                                                           error);
7614                         if (ret < 0)
7615                                 return ret;
7616                         /* Count all modify-header actions as one action. */
7617                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7618                                 ++actions_n;
7619                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7620                                 modify_after_mirror = 1;
7621                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7622                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7623                         break;
7624                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7625                         ret = flow_dv_validate_action_modify_ipv6_dscp
7626                                                                 (action_flags,
7627                                                                  actions,
7628                                                                  item_flags,
7629                                                                  error);
7630                         if (ret < 0)
7631                                 return ret;
7632                         /* Count all modify-header actions as one action. */
7633                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7634                                 ++actions_n;
7635                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7636                                 modify_after_mirror = 1;
7637                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7638                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7639                         break;
7640                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7641                         ret = flow_dv_validate_action_sample(&action_flags,
7642                                                              actions, dev,
7643                                                              attr, item_flags,
7644                                                              rss, &sample_rss,
7645                                                              &sample_count,
7646                                                              &fdb_mirror_limit,
7647                                                              error);
7648                         if (ret < 0)
7649                                 return ret;
7650                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7651                         ++actions_n;
7652                         break;
7653                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7654                         ret = flow_dv_validate_action_modify_field(dev,
7655                                                                    action_flags,
7656                                                                    actions,
7657                                                                    attr,
7658                                                                    error);
7659                         if (ret < 0)
7660                                 return ret;
7661                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7662                                 modify_after_mirror = 1;
7663                         /* Count all modify-header actions as one action. */
7664                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7665                                 ++actions_n;
7666                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7667                         rw_act_num += ret;
7668                         break;
7669                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7670                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7671                                                              item_flags, attr,
7672                                                              error);
7673                         if (ret < 0)
7674                                 return ret;
7675                         action_flags |= MLX5_FLOW_ACTION_CT;
7676                         break;
7677                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7678                         /* tunnel offload action was processed before
7679                          * list it here as a supported type
7680                          */
7681                         break;
7682                 default:
7683                         return rte_flow_error_set(error, ENOTSUP,
7684                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7685                                                   actions,
7686                                                   "action not supported");
7687                 }
7688         }
7689         /*
7690          * Validate actions in flow rules
7691          * - Explicit decap action is prohibited by the tunnel offload API.
7692          * - Drop action in tunnel steer rule is prohibited by the API.
7693          * - Application cannot use MARK action because it's value can mask
7694          *   tunnel default miss nitification.
7695          * - JUMP in tunnel match rule has no support in current PMD
7696          *   implementation.
7697          * - TAG & META are reserved for future uses.
7698          */
7699         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7700                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7701                                             MLX5_FLOW_ACTION_MARK     |
7702                                             MLX5_FLOW_ACTION_SET_TAG  |
7703                                             MLX5_FLOW_ACTION_SET_META |
7704                                             MLX5_FLOW_ACTION_DROP;
7705
7706                 if (action_flags & bad_actions_mask)
7707                         return rte_flow_error_set
7708                                         (error, EINVAL,
7709                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7710                                         "Invalid RTE action in tunnel "
7711                                         "set decap rule");
7712                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7713                         return rte_flow_error_set
7714                                         (error, EINVAL,
7715                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7716                                         "tunnel set decap rule must terminate "
7717                                         "with JUMP");
7718                 if (!attr->ingress)
7719                         return rte_flow_error_set
7720                                         (error, EINVAL,
7721                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7722                                         "tunnel flows for ingress traffic only");
7723         }
7724         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7725                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7726                                             MLX5_FLOW_ACTION_MARK    |
7727                                             MLX5_FLOW_ACTION_SET_TAG |
7728                                             MLX5_FLOW_ACTION_SET_META;
7729
7730                 if (action_flags & bad_actions_mask)
7731                         return rte_flow_error_set
7732                                         (error, EINVAL,
7733                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7734                                         "Invalid RTE action in tunnel "
7735                                         "set match rule");
7736         }
7737         /*
7738          * Validate the drop action mutual exclusion with other actions.
7739          * Drop action is mutually-exclusive with any other action, except for
7740          * Count action.
7741          * Drop action compatibility with tunnel offload was already validated.
7742          */
7743         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7744                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7745         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7746             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7747                 return rte_flow_error_set(error, EINVAL,
7748                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7749                                           "Drop action is mutually-exclusive "
7750                                           "with any other action, except for "
7751                                           "Count action");
7752         /* Eswitch has few restrictions on using items and actions */
7753         if (attr->transfer) {
7754                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7755                     action_flags & MLX5_FLOW_ACTION_FLAG)
7756                         return rte_flow_error_set(error, ENOTSUP,
7757                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7758                                                   NULL,
7759                                                   "unsupported action FLAG");
7760                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7761                     action_flags & MLX5_FLOW_ACTION_MARK)
7762                         return rte_flow_error_set(error, ENOTSUP,
7763                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7764                                                   NULL,
7765                                                   "unsupported action MARK");
7766                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7767                         return rte_flow_error_set(error, ENOTSUP,
7768                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7769                                                   NULL,
7770                                                   "unsupported action QUEUE");
7771                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7772                         return rte_flow_error_set(error, ENOTSUP,
7773                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7774                                                   NULL,
7775                                                   "unsupported action RSS");
7776                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7777                         return rte_flow_error_set(error, EINVAL,
7778                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7779                                                   actions,
7780                                                   "no fate action is found");
7781         } else {
7782                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7783                         return rte_flow_error_set(error, EINVAL,
7784                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7785                                                   actions,
7786                                                   "no fate action is found");
7787         }
7788         /*
7789          * Continue validation for Xcap and VLAN actions.
7790          * If hairpin is working in explicit TX rule mode, there is no actions
7791          * splitting and the validation of hairpin ingress flow should be the
7792          * same as other standard flows.
7793          */
7794         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7795                              MLX5_FLOW_VLAN_ACTIONS)) &&
7796             (queue_index == 0xFFFF ||
7797              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7798              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7799              conf->tx_explicit != 0))) {
7800                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7801                     MLX5_FLOW_XCAP_ACTIONS)
7802                         return rte_flow_error_set(error, ENOTSUP,
7803                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7804                                                   NULL, "encap and decap "
7805                                                   "combination aren't supported");
7806                 if (!attr->transfer && attr->ingress) {
7807                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7808                                 return rte_flow_error_set
7809                                                 (error, ENOTSUP,
7810                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7811                                                  NULL, "encap is not supported"
7812                                                  " for ingress traffic");
7813                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7814                                 return rte_flow_error_set
7815                                                 (error, ENOTSUP,
7816                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7817                                                  NULL, "push VLAN action not "
7818                                                  "supported for ingress");
7819                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7820                                         MLX5_FLOW_VLAN_ACTIONS)
7821                                 return rte_flow_error_set
7822                                                 (error, ENOTSUP,
7823                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7824                                                  NULL, "no support for "
7825                                                  "multiple VLAN actions");
7826                 }
7827         }
7828         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7829                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7830                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7831                         attr->ingress)
7832                         return rte_flow_error_set
7833                                 (error, ENOTSUP,
7834                                 RTE_FLOW_ERROR_TYPE_ACTION,
7835                                 NULL, "fate action not supported for "
7836                                 "meter with policy");
7837                 if (attr->egress) {
7838                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7839                                 return rte_flow_error_set
7840                                         (error, ENOTSUP,
7841                                         RTE_FLOW_ERROR_TYPE_ACTION,
7842                                         NULL, "modify header action in egress "
7843                                         "cannot be done before meter action");
7844                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7845                                 return rte_flow_error_set
7846                                         (error, ENOTSUP,
7847                                         RTE_FLOW_ERROR_TYPE_ACTION,
7848                                         NULL, "encap action in egress "
7849                                         "cannot be done before meter action");
7850                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7851                                 return rte_flow_error_set
7852                                         (error, ENOTSUP,
7853                                         RTE_FLOW_ERROR_TYPE_ACTION,
7854                                         NULL, "push vlan action in egress "
7855                                         "cannot be done before meter action");
7856                 }
7857         }
7858         /*
7859          * Hairpin flow will add one more TAG action in TX implicit mode.
7860          * In TX explicit mode, there will be no hairpin flow ID.
7861          */
7862         if (hairpin > 0)
7863                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7864         /* extra metadata enabled: one more TAG action will be add. */
7865         if (dev_conf->dv_flow_en &&
7866             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7867             mlx5_flow_ext_mreg_supported(dev))
7868                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7869         if (rw_act_num >
7870                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7871                 return rte_flow_error_set(error, ENOTSUP,
7872                                           RTE_FLOW_ERROR_TYPE_ACTION,
7873                                           NULL, "too many header modify"
7874                                           " actions to support");
7875         }
7876         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7877         if (fdb_mirror_limit && modify_after_mirror)
7878                 return rte_flow_error_set(error, EINVAL,
7879                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7880                                 "sample before modify action is not supported");
7881         return 0;
7882 }
7883
7884 /**
7885  * Internal preparation function. Allocates the DV flow size,
7886  * this size is constant.
7887  *
7888  * @param[in] dev
7889  *   Pointer to the rte_eth_dev structure.
7890  * @param[in] attr
7891  *   Pointer to the flow attributes.
7892  * @param[in] items
7893  *   Pointer to the list of items.
7894  * @param[in] actions
7895  *   Pointer to the list of actions.
7896  * @param[out] error
7897  *   Pointer to the error structure.
7898  *
7899  * @return
7900  *   Pointer to mlx5_flow object on success,
7901  *   otherwise NULL and rte_errno is set.
7902  */
7903 static struct mlx5_flow *
7904 flow_dv_prepare(struct rte_eth_dev *dev,
7905                 const struct rte_flow_attr *attr __rte_unused,
7906                 const struct rte_flow_item items[] __rte_unused,
7907                 const struct rte_flow_action actions[] __rte_unused,
7908                 struct rte_flow_error *error)
7909 {
7910         uint32_t handle_idx = 0;
7911         struct mlx5_flow *dev_flow;
7912         struct mlx5_flow_handle *dev_handle;
7913         struct mlx5_priv *priv = dev->data->dev_private;
7914         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7915
7916         MLX5_ASSERT(wks);
7917         wks->skip_matcher_reg = 0;
7918         wks->policy = NULL;
7919         wks->final_policy = NULL;
7920         /* In case of corrupting the memory. */
7921         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7922                 rte_flow_error_set(error, ENOSPC,
7923                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7924                                    "not free temporary device flow");
7925                 return NULL;
7926         }
7927         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7928                                    &handle_idx);
7929         if (!dev_handle) {
7930                 rte_flow_error_set(error, ENOMEM,
7931                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7932                                    "not enough memory to create flow handle");
7933                 return NULL;
7934         }
7935         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7936         dev_flow = &wks->flows[wks->flow_idx++];
7937         memset(dev_flow, 0, sizeof(*dev_flow));
7938         dev_flow->handle = dev_handle;
7939         dev_flow->handle_idx = handle_idx;
7940         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
7941         dev_flow->ingress = attr->ingress;
7942         dev_flow->dv.transfer = attr->transfer;
7943         return dev_flow;
7944 }
7945
7946 #ifdef RTE_LIBRTE_MLX5_DEBUG
7947 /**
7948  * Sanity check for match mask and value. Similar to check_valid_spec() in
7949  * kernel driver. If unmasked bit is present in value, it returns failure.
7950  *
7951  * @param match_mask
7952  *   pointer to match mask buffer.
7953  * @param match_value
7954  *   pointer to match value buffer.
7955  *
7956  * @return
7957  *   0 if valid, -EINVAL otherwise.
7958  */
7959 static int
7960 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7961 {
7962         uint8_t *m = match_mask;
7963         uint8_t *v = match_value;
7964         unsigned int i;
7965
7966         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7967                 if (v[i] & ~m[i]) {
7968                         DRV_LOG(ERR,
7969                                 "match_value differs from match_criteria"
7970                                 " %p[%u] != %p[%u]",
7971                                 match_value, i, match_mask, i);
7972                         return -EINVAL;
7973                 }
7974         }
7975         return 0;
7976 }
7977 #endif
7978
7979 /**
7980  * Add match of ip_version.
7981  *
7982  * @param[in] group
7983  *   Flow group.
7984  * @param[in] headers_v
7985  *   Values header pointer.
7986  * @param[in] headers_m
7987  *   Masks header pointer.
7988  * @param[in] ip_version
7989  *   The IP version to set.
7990  */
7991 static inline void
7992 flow_dv_set_match_ip_version(uint32_t group,
7993                              void *headers_v,
7994                              void *headers_m,
7995                              uint8_t ip_version)
7996 {
7997         if (group == 0)
7998                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7999         else
8000                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8001                          ip_version);
8002         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8003         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8004         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8005 }
8006
8007 /**
8008  * Add Ethernet item to matcher and to the value.
8009  *
8010  * @param[in, out] matcher
8011  *   Flow matcher.
8012  * @param[in, out] key
8013  *   Flow matcher value.
8014  * @param[in] item
8015  *   Flow pattern to translate.
8016  * @param[in] inner
8017  *   Item is inner pattern.
8018  */
8019 static void
8020 flow_dv_translate_item_eth(void *matcher, void *key,
8021                            const struct rte_flow_item *item, int inner,
8022                            uint32_t group)
8023 {
8024         const struct rte_flow_item_eth *eth_m = item->mask;
8025         const struct rte_flow_item_eth *eth_v = item->spec;
8026         const struct rte_flow_item_eth nic_mask = {
8027                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8028                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8029                 .type = RTE_BE16(0xffff),
8030                 .has_vlan = 0,
8031         };
8032         void *hdrs_m;
8033         void *hdrs_v;
8034         char *l24_v;
8035         unsigned int i;
8036
8037         if (!eth_v)
8038                 return;
8039         if (!eth_m)
8040                 eth_m = &nic_mask;
8041         if (inner) {
8042                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8043                                          inner_headers);
8044                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8045         } else {
8046                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8047                                          outer_headers);
8048                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8049         }
8050         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8051                &eth_m->dst, sizeof(eth_m->dst));
8052         /* The value must be in the range of the mask. */
8053         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8054         for (i = 0; i < sizeof(eth_m->dst); ++i)
8055                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8056         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8057                &eth_m->src, sizeof(eth_m->src));
8058         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8059         /* The value must be in the range of the mask. */
8060         for (i = 0; i < sizeof(eth_m->dst); ++i)
8061                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8062         /*
8063          * HW supports match on one Ethertype, the Ethertype following the last
8064          * VLAN tag of the packet (see PRM).
8065          * Set match on ethertype only if ETH header is not followed by VLAN.
8066          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8067          * ethertype, and use ip_version field instead.
8068          * eCPRI over Ether layer will use type value 0xAEFE.
8069          */
8070         if (eth_m->type == 0xFFFF) {
8071                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8072                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8073                 switch (eth_v->type) {
8074                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8075                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8076                         return;
8077                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8078                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8079                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8080                         return;
8081                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8082                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8083                         return;
8084                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8085                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8086                         return;
8087                 default:
8088                         break;
8089                 }
8090         }
8091         if (eth_m->has_vlan) {
8092                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8093                 if (eth_v->has_vlan) {
8094                         /*
8095                          * Here, when also has_more_vlan field in VLAN item is
8096                          * not set, only single-tagged packets will be matched.
8097                          */
8098                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8099                         return;
8100                 }
8101         }
8102         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8103                  rte_be_to_cpu_16(eth_m->type));
8104         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8105         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8106 }
8107
8108 /**
8109  * Add VLAN item to matcher and to the value.
8110  *
8111  * @param[in, out] dev_flow
8112  *   Flow descriptor.
8113  * @param[in, out] matcher
8114  *   Flow matcher.
8115  * @param[in, out] key
8116  *   Flow matcher value.
8117  * @param[in] item
8118  *   Flow pattern to translate.
8119  * @param[in] inner
8120  *   Item is inner pattern.
8121  */
8122 static void
8123 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8124                             void *matcher, void *key,
8125                             const struct rte_flow_item *item,
8126                             int inner, uint32_t group)
8127 {
8128         const struct rte_flow_item_vlan *vlan_m = item->mask;
8129         const struct rte_flow_item_vlan *vlan_v = item->spec;
8130         void *hdrs_m;
8131         void *hdrs_v;
8132         uint16_t tci_m;
8133         uint16_t tci_v;
8134
8135         if (inner) {
8136                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8137                                          inner_headers);
8138                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8139         } else {
8140                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8141                                          outer_headers);
8142                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8143                 /*
8144                  * This is workaround, masks are not supported,
8145                  * and pre-validated.
8146                  */
8147                 if (vlan_v)
8148                         dev_flow->handle->vf_vlan.tag =
8149                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8150         }
8151         /*
8152          * When VLAN item exists in flow, mark packet as tagged,
8153          * even if TCI is not specified.
8154          */
8155         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8156                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8157                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8158         }
8159         if (!vlan_v)
8160                 return;
8161         if (!vlan_m)
8162                 vlan_m = &rte_flow_item_vlan_mask;
8163         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8164         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8165         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8166         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8167         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8168         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8169         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8170         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8171         /*
8172          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8173          * ethertype, and use ip_version field instead.
8174          */
8175         if (vlan_m->inner_type == 0xFFFF) {
8176                 switch (vlan_v->inner_type) {
8177                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8178                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8179                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8180                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8181                         return;
8182                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8183                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8184                         return;
8185                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8186                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8187                         return;
8188                 default:
8189                         break;
8190                 }
8191         }
8192         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8193                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8194                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8195                 /* Only one vlan_tag bit can be set. */
8196                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8197                 return;
8198         }
8199         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8200                  rte_be_to_cpu_16(vlan_m->inner_type));
8201         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8202                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8203 }
8204
8205 /**
8206  * Add IPV4 item to matcher and to the value.
8207  *
8208  * @param[in, out] matcher
8209  *   Flow matcher.
8210  * @param[in, out] key
8211  *   Flow matcher value.
8212  * @param[in] item
8213  *   Flow pattern to translate.
8214  * @param[in] inner
8215  *   Item is inner pattern.
8216  * @param[in] group
8217  *   The group to insert the rule.
8218  */
8219 static void
8220 flow_dv_translate_item_ipv4(void *matcher, void *key,
8221                             const struct rte_flow_item *item,
8222                             int inner, uint32_t group)
8223 {
8224         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8225         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8226         const struct rte_flow_item_ipv4 nic_mask = {
8227                 .hdr = {
8228                         .src_addr = RTE_BE32(0xffffffff),
8229                         .dst_addr = RTE_BE32(0xffffffff),
8230                         .type_of_service = 0xff,
8231                         .next_proto_id = 0xff,
8232                         .time_to_live = 0xff,
8233                 },
8234         };
8235         void *headers_m;
8236         void *headers_v;
8237         char *l24_m;
8238         char *l24_v;
8239         uint8_t tos, ihl_m, ihl_v;
8240
8241         if (inner) {
8242                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8243                                          inner_headers);
8244                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8245         } else {
8246                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8247                                          outer_headers);
8248                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8249         }
8250         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8251         if (!ipv4_v)
8252                 return;
8253         if (!ipv4_m)
8254                 ipv4_m = &nic_mask;
8255         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8256                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8257         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8258                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8259         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8260         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8261         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8262                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8263         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8264                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8265         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8266         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8267         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8268         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8269         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8270         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8271         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8272         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8273                  ipv4_m->hdr.type_of_service);
8274         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8275         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8276                  ipv4_m->hdr.type_of_service >> 2);
8277         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8278         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8279                  ipv4_m->hdr.next_proto_id);
8280         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8281                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8282         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8283                  ipv4_m->hdr.time_to_live);
8284         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8285                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8286         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8287                  !!(ipv4_m->hdr.fragment_offset));
8288         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8289                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8290 }
8291
8292 /**
8293  * Add IPV6 item to matcher and to the value.
8294  *
8295  * @param[in, out] matcher
8296  *   Flow matcher.
8297  * @param[in, out] key
8298  *   Flow matcher value.
8299  * @param[in] item
8300  *   Flow pattern to translate.
8301  * @param[in] inner
8302  *   Item is inner pattern.
8303  * @param[in] group
8304  *   The group to insert the rule.
8305  */
8306 static void
8307 flow_dv_translate_item_ipv6(void *matcher, void *key,
8308                             const struct rte_flow_item *item,
8309                             int inner, uint32_t group)
8310 {
8311         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8312         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8313         const struct rte_flow_item_ipv6 nic_mask = {
8314                 .hdr = {
8315                         .src_addr =
8316                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8317                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8318                         .dst_addr =
8319                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8320                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8321                         .vtc_flow = RTE_BE32(0xffffffff),
8322                         .proto = 0xff,
8323                         .hop_limits = 0xff,
8324                 },
8325         };
8326         void *headers_m;
8327         void *headers_v;
8328         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8329         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8330         char *l24_m;
8331         char *l24_v;
8332         uint32_t vtc_m;
8333         uint32_t vtc_v;
8334         int i;
8335         int size;
8336
8337         if (inner) {
8338                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8339                                          inner_headers);
8340                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8341         } else {
8342                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8343                                          outer_headers);
8344                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8345         }
8346         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8347         if (!ipv6_v)
8348                 return;
8349         if (!ipv6_m)
8350                 ipv6_m = &nic_mask;
8351         size = sizeof(ipv6_m->hdr.dst_addr);
8352         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8353                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8354         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8355                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8356         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8357         for (i = 0; i < size; ++i)
8358                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8359         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8360                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8361         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8362                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8363         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8364         for (i = 0; i < size; ++i)
8365                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8366         /* TOS. */
8367         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8368         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8369         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8370         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8371         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8372         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8373         /* Label. */
8374         if (inner) {
8375                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8376                          vtc_m);
8377                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8378                          vtc_v);
8379         } else {
8380                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8381                          vtc_m);
8382                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8383                          vtc_v);
8384         }
8385         /* Protocol. */
8386         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8387                  ipv6_m->hdr.proto);
8388         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8389                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8390         /* Hop limit. */
8391         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8392                  ipv6_m->hdr.hop_limits);
8393         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8394                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8395         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8396                  !!(ipv6_m->has_frag_ext));
8397         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8398                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8399 }
8400
8401 /**
8402  * Add IPV6 fragment extension item to matcher and to the value.
8403  *
8404  * @param[in, out] matcher
8405  *   Flow matcher.
8406  * @param[in, out] key
8407  *   Flow matcher value.
8408  * @param[in] item
8409  *   Flow pattern to translate.
8410  * @param[in] inner
8411  *   Item is inner pattern.
8412  */
8413 static void
8414 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8415                                      const struct rte_flow_item *item,
8416                                      int inner)
8417 {
8418         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8419         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8420         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8421                 .hdr = {
8422                         .next_header = 0xff,
8423                         .frag_data = RTE_BE16(0xffff),
8424                 },
8425         };
8426         void *headers_m;
8427         void *headers_v;
8428
8429         if (inner) {
8430                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8431                                          inner_headers);
8432                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8433         } else {
8434                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8435                                          outer_headers);
8436                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8437         }
8438         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8439         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8440         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8441         if (!ipv6_frag_ext_v)
8442                 return;
8443         if (!ipv6_frag_ext_m)
8444                 ipv6_frag_ext_m = &nic_mask;
8445         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8446                  ipv6_frag_ext_m->hdr.next_header);
8447         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8448                  ipv6_frag_ext_v->hdr.next_header &
8449                  ipv6_frag_ext_m->hdr.next_header);
8450 }
8451
8452 /**
8453  * Add TCP item to matcher and to the value.
8454  *
8455  * @param[in, out] matcher
8456  *   Flow matcher.
8457  * @param[in, out] key
8458  *   Flow matcher value.
8459  * @param[in] item
8460  *   Flow pattern to translate.
8461  * @param[in] inner
8462  *   Item is inner pattern.
8463  */
8464 static void
8465 flow_dv_translate_item_tcp(void *matcher, void *key,
8466                            const struct rte_flow_item *item,
8467                            int inner)
8468 {
8469         const struct rte_flow_item_tcp *tcp_m = item->mask;
8470         const struct rte_flow_item_tcp *tcp_v = item->spec;
8471         void *headers_m;
8472         void *headers_v;
8473
8474         if (inner) {
8475                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8476                                          inner_headers);
8477                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8478         } else {
8479                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8480                                          outer_headers);
8481                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8482         }
8483         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8484         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8485         if (!tcp_v)
8486                 return;
8487         if (!tcp_m)
8488                 tcp_m = &rte_flow_item_tcp_mask;
8489         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8490                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8491         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8492                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8493         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8494                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8495         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8496                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8497         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8498                  tcp_m->hdr.tcp_flags);
8499         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8500                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8501 }
8502
8503 /**
8504  * Add UDP item to matcher and to the value.
8505  *
8506  * @param[in, out] matcher
8507  *   Flow matcher.
8508  * @param[in, out] key
8509  *   Flow matcher value.
8510  * @param[in] item
8511  *   Flow pattern to translate.
8512  * @param[in] inner
8513  *   Item is inner pattern.
8514  */
8515 static void
8516 flow_dv_translate_item_udp(void *matcher, void *key,
8517                            const struct rte_flow_item *item,
8518                            int inner)
8519 {
8520         const struct rte_flow_item_udp *udp_m = item->mask;
8521         const struct rte_flow_item_udp *udp_v = item->spec;
8522         void *headers_m;
8523         void *headers_v;
8524
8525         if (inner) {
8526                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8527                                          inner_headers);
8528                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8529         } else {
8530                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8531                                          outer_headers);
8532                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8533         }
8534         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8535         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8536         if (!udp_v)
8537                 return;
8538         if (!udp_m)
8539                 udp_m = &rte_flow_item_udp_mask;
8540         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8541                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8542         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8543                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8544         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8545                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8546         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8547                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8548 }
8549
8550 /**
8551  * Add GRE optional Key item to matcher and to the value.
8552  *
8553  * @param[in, out] matcher
8554  *   Flow matcher.
8555  * @param[in, out] key
8556  *   Flow matcher value.
8557  * @param[in] item
8558  *   Flow pattern to translate.
8559  * @param[in] inner
8560  *   Item is inner pattern.
8561  */
8562 static void
8563 flow_dv_translate_item_gre_key(void *matcher, void *key,
8564                                    const struct rte_flow_item *item)
8565 {
8566         const rte_be32_t *key_m = item->mask;
8567         const rte_be32_t *key_v = item->spec;
8568         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8569         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8570         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8571
8572         /* GRE K bit must be on and should already be validated */
8573         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8574         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8575         if (!key_v)
8576                 return;
8577         if (!key_m)
8578                 key_m = &gre_key_default_mask;
8579         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8580                  rte_be_to_cpu_32(*key_m) >> 8);
8581         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8582                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8583         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8584                  rte_be_to_cpu_32(*key_m) & 0xFF);
8585         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8586                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8587 }
8588
8589 /**
8590  * Add GRE item to matcher and to the value.
8591  *
8592  * @param[in, out] matcher
8593  *   Flow matcher.
8594  * @param[in, out] key
8595  *   Flow matcher value.
8596  * @param[in] item
8597  *   Flow pattern to translate.
8598  * @param[in] inner
8599  *   Item is inner pattern.
8600  */
8601 static void
8602 flow_dv_translate_item_gre(void *matcher, void *key,
8603                            const struct rte_flow_item *item,
8604                            int inner)
8605 {
8606         const struct rte_flow_item_gre *gre_m = item->mask;
8607         const struct rte_flow_item_gre *gre_v = item->spec;
8608         void *headers_m;
8609         void *headers_v;
8610         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8611         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8612         struct {
8613                 union {
8614                         __extension__
8615                         struct {
8616                                 uint16_t version:3;
8617                                 uint16_t rsvd0:9;
8618                                 uint16_t s_present:1;
8619                                 uint16_t k_present:1;
8620                                 uint16_t rsvd_bit1:1;
8621                                 uint16_t c_present:1;
8622                         };
8623                         uint16_t value;
8624                 };
8625         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8626
8627         if (inner) {
8628                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8629                                          inner_headers);
8630                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8631         } else {
8632                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8633                                          outer_headers);
8634                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8635         }
8636         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8637         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8638         if (!gre_v)
8639                 return;
8640         if (!gre_m)
8641                 gre_m = &rte_flow_item_gre_mask;
8642         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8643                  rte_be_to_cpu_16(gre_m->protocol));
8644         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8645                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8646         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8647         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8648         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8649                  gre_crks_rsvd0_ver_m.c_present);
8650         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8651                  gre_crks_rsvd0_ver_v.c_present &
8652                  gre_crks_rsvd0_ver_m.c_present);
8653         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8654                  gre_crks_rsvd0_ver_m.k_present);
8655         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8656                  gre_crks_rsvd0_ver_v.k_present &
8657                  gre_crks_rsvd0_ver_m.k_present);
8658         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8659                  gre_crks_rsvd0_ver_m.s_present);
8660         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8661                  gre_crks_rsvd0_ver_v.s_present &
8662                  gre_crks_rsvd0_ver_m.s_present);
8663 }
8664
8665 /**
8666  * Add NVGRE item to matcher and to the value.
8667  *
8668  * @param[in, out] matcher
8669  *   Flow matcher.
8670  * @param[in, out] key
8671  *   Flow matcher value.
8672  * @param[in] item
8673  *   Flow pattern to translate.
8674  * @param[in] inner
8675  *   Item is inner pattern.
8676  */
8677 static void
8678 flow_dv_translate_item_nvgre(void *matcher, void *key,
8679                              const struct rte_flow_item *item,
8680                              int inner)
8681 {
8682         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8683         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8684         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8685         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8686         const char *tni_flow_id_m;
8687         const char *tni_flow_id_v;
8688         char *gre_key_m;
8689         char *gre_key_v;
8690         int size;
8691         int i;
8692
8693         /* For NVGRE, GRE header fields must be set with defined values. */
8694         const struct rte_flow_item_gre gre_spec = {
8695                 .c_rsvd0_ver = RTE_BE16(0x2000),
8696                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8697         };
8698         const struct rte_flow_item_gre gre_mask = {
8699                 .c_rsvd0_ver = RTE_BE16(0xB000),
8700                 .protocol = RTE_BE16(UINT16_MAX),
8701         };
8702         const struct rte_flow_item gre_item = {
8703                 .spec = &gre_spec,
8704                 .mask = &gre_mask,
8705                 .last = NULL,
8706         };
8707         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8708         if (!nvgre_v)
8709                 return;
8710         if (!nvgre_m)
8711                 nvgre_m = &rte_flow_item_nvgre_mask;
8712         tni_flow_id_m = (const char *)nvgre_m->tni;
8713         tni_flow_id_v = (const char *)nvgre_v->tni;
8714         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8715         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8716         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8717         memcpy(gre_key_m, tni_flow_id_m, size);
8718         for (i = 0; i < size; ++i)
8719                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8720 }
8721
8722 /**
8723  * Add VXLAN item to matcher and to the value.
8724  *
8725  * @param[in] dev
8726  *   Pointer to the Ethernet device structure.
8727  * @param[in] attr
8728  *   Flow rule attributes.
8729  * @param[in, out] matcher
8730  *   Flow matcher.
8731  * @param[in, out] key
8732  *   Flow matcher value.
8733  * @param[in] item
8734  *   Flow pattern to translate.
8735  * @param[in] inner
8736  *   Item is inner pattern.
8737  */
8738 static void
8739 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8740                              const struct rte_flow_attr *attr,
8741                              void *matcher, void *key,
8742                              const struct rte_flow_item *item,
8743                              int inner)
8744 {
8745         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8746         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8747         void *headers_m;
8748         void *headers_v;
8749         void *misc5_m;
8750         void *misc5_v;
8751         uint32_t *tunnel_header_v;
8752         uint32_t *tunnel_header_m;
8753         uint16_t dport;
8754         struct mlx5_priv *priv = dev->data->dev_private;
8755         const struct rte_flow_item_vxlan nic_mask = {
8756                 .vni = "\xff\xff\xff",
8757                 .rsvd1 = 0xff,
8758         };
8759
8760         if (inner) {
8761                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8762                                          inner_headers);
8763                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8764         } else {
8765                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8766                                          outer_headers);
8767                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8768         }
8769         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8770                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8771         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8772                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8773                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8774         }
8775         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8776         if (!vxlan_v)
8777                 return;
8778         if (!vxlan_m) {
8779                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8780                     (attr->group && !priv->sh->misc5_cap))
8781                         vxlan_m = &rte_flow_item_vxlan_mask;
8782                 else
8783                         vxlan_m = &nic_mask;
8784         }
8785         if ((priv->sh->steering_format_version ==
8786             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8787             dport != MLX5_UDP_PORT_VXLAN) ||
8788             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8789             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8790                 void *misc_m;
8791                 void *misc_v;
8792                 char *vni_m;
8793                 char *vni_v;
8794                 int size;
8795                 int i;
8796                 misc_m = MLX5_ADDR_OF(fte_match_param,
8797                                       matcher, misc_parameters);
8798                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8799                 size = sizeof(vxlan_m->vni);
8800                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8801                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8802                 memcpy(vni_m, vxlan_m->vni, size);
8803                 for (i = 0; i < size; ++i)
8804                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8805                 return;
8806         }
8807         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8808         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8809         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8810                                                    misc5_v,
8811                                                    tunnel_header_1);
8812         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8813                                                    misc5_m,
8814                                                    tunnel_header_1);
8815         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8816                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8817                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8818         if (*tunnel_header_v)
8819                 *tunnel_header_m = vxlan_m->vni[0] |
8820                         vxlan_m->vni[1] << 8 |
8821                         vxlan_m->vni[2] << 16;
8822         else
8823                 *tunnel_header_m = 0x0;
8824         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8825         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8826                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8827 }
8828
8829 /**
8830  * Add VXLAN-GPE item to matcher and to the value.
8831  *
8832  * @param[in, out] matcher
8833  *   Flow matcher.
8834  * @param[in, out] key
8835  *   Flow matcher value.
8836  * @param[in] item
8837  *   Flow pattern to translate.
8838  * @param[in] inner
8839  *   Item is inner pattern.
8840  */
8841
8842 static void
8843 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8844                                  const struct rte_flow_item *item, int inner)
8845 {
8846         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8847         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8848         void *headers_m;
8849         void *headers_v;
8850         void *misc_m =
8851                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8852         void *misc_v =
8853                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8854         char *vni_m;
8855         char *vni_v;
8856         uint16_t dport;
8857         int size;
8858         int i;
8859         uint8_t flags_m = 0xff;
8860         uint8_t flags_v = 0xc;
8861
8862         if (inner) {
8863                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8864                                          inner_headers);
8865                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8866         } else {
8867                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8868                                          outer_headers);
8869                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8870         }
8871         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8872                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8873         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8874                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8875                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8876         }
8877         if (!vxlan_v)
8878                 return;
8879         if (!vxlan_m)
8880                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8881         size = sizeof(vxlan_m->vni);
8882         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8883         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8884         memcpy(vni_m, vxlan_m->vni, size);
8885         for (i = 0; i < size; ++i)
8886                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8887         if (vxlan_m->flags) {
8888                 flags_m = vxlan_m->flags;
8889                 flags_v = vxlan_v->flags;
8890         }
8891         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8892         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8893         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8894                  vxlan_m->protocol);
8895         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8896                  vxlan_v->protocol);
8897 }
8898
8899 /**
8900  * Add Geneve item to matcher and to the value.
8901  *
8902  * @param[in, out] matcher
8903  *   Flow matcher.
8904  * @param[in, out] key
8905  *   Flow matcher value.
8906  * @param[in] item
8907  *   Flow pattern to translate.
8908  * @param[in] inner
8909  *   Item is inner pattern.
8910  */
8911
8912 static void
8913 flow_dv_translate_item_geneve(void *matcher, void *key,
8914                               const struct rte_flow_item *item, int inner)
8915 {
8916         const struct rte_flow_item_geneve *geneve_m = item->mask;
8917         const struct rte_flow_item_geneve *geneve_v = item->spec;
8918         void *headers_m;
8919         void *headers_v;
8920         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8921         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8922         uint16_t dport;
8923         uint16_t gbhdr_m;
8924         uint16_t gbhdr_v;
8925         char *vni_m;
8926         char *vni_v;
8927         size_t size, i;
8928
8929         if (inner) {
8930                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8931                                          inner_headers);
8932                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8933         } else {
8934                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8935                                          outer_headers);
8936                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8937         }
8938         dport = MLX5_UDP_PORT_GENEVE;
8939         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8940                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8941                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8942         }
8943         if (!geneve_v)
8944                 return;
8945         if (!geneve_m)
8946                 geneve_m = &rte_flow_item_geneve_mask;
8947         size = sizeof(geneve_m->vni);
8948         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8949         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8950         memcpy(vni_m, geneve_m->vni, size);
8951         for (i = 0; i < size; ++i)
8952                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8953         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8954                  rte_be_to_cpu_16(geneve_m->protocol));
8955         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8956                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8957         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8958         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8959         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8960                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8961         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8962                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8963         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8964                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8965         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8966                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8967                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8968 }
8969
8970 /**
8971  * Create Geneve TLV option resource.
8972  *
8973  * @param dev[in, out]
8974  *   Pointer to rte_eth_dev structure.
8975  * @param[in, out] tag_be24
8976  *   Tag value in big endian then R-shift 8.
8977  * @parm[in, out] dev_flow
8978  *   Pointer to the dev_flow.
8979  * @param[out] error
8980  *   pointer to error structure.
8981  *
8982  * @return
8983  *   0 on success otherwise -errno and errno is set.
8984  */
8985
8986 int
8987 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8988                                              const struct rte_flow_item *item,
8989                                              struct rte_flow_error *error)
8990 {
8991         struct mlx5_priv *priv = dev->data->dev_private;
8992         struct mlx5_dev_ctx_shared *sh = priv->sh;
8993         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8994                         sh->geneve_tlv_option_resource;
8995         struct mlx5_devx_obj *obj;
8996         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8997         int ret = 0;
8998
8999         if (!geneve_opt_v)
9000                 return -1;
9001         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9002         if (geneve_opt_resource != NULL) {
9003                 if (geneve_opt_resource->option_class ==
9004                         geneve_opt_v->option_class &&
9005                         geneve_opt_resource->option_type ==
9006                         geneve_opt_v->option_type &&
9007                         geneve_opt_resource->length ==
9008                         geneve_opt_v->option_len) {
9009                         /* We already have GENVE TLV option obj allocated. */
9010                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9011                                            __ATOMIC_RELAXED);
9012                 } else {
9013                         ret = rte_flow_error_set(error, ENOMEM,
9014                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9015                                 "Only one GENEVE TLV option supported");
9016                         goto exit;
9017                 }
9018         } else {
9019                 /* Create a GENEVE TLV object and resource. */
9020                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9021                                 geneve_opt_v->option_class,
9022                                 geneve_opt_v->option_type,
9023                                 geneve_opt_v->option_len);
9024                 if (!obj) {
9025                         ret = rte_flow_error_set(error, ENODATA,
9026                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9027                                 "Failed to create GENEVE TLV Devx object");
9028                         goto exit;
9029                 }
9030                 sh->geneve_tlv_option_resource =
9031                                 mlx5_malloc(MLX5_MEM_ZERO,
9032                                                 sizeof(*geneve_opt_resource),
9033                                                 0, SOCKET_ID_ANY);
9034                 if (!sh->geneve_tlv_option_resource) {
9035                         claim_zero(mlx5_devx_cmd_destroy(obj));
9036                         ret = rte_flow_error_set(error, ENOMEM,
9037                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9038                                 "GENEVE TLV object memory allocation failed");
9039                         goto exit;
9040                 }
9041                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9042                 geneve_opt_resource->obj = obj;
9043                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9044                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9045                 geneve_opt_resource->length = geneve_opt_v->option_len;
9046                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9047                                 __ATOMIC_RELAXED);
9048         }
9049 exit:
9050         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9051         return ret;
9052 }
9053
9054 /**
9055  * Add Geneve TLV option item to matcher.
9056  *
9057  * @param[in, out] dev
9058  *   Pointer to rte_eth_dev structure.
9059  * @param[in, out] matcher
9060  *   Flow matcher.
9061  * @param[in, out] key
9062  *   Flow matcher value.
9063  * @param[in] item
9064  *   Flow pattern to translate.
9065  * @param[out] error
9066  *   Pointer to error structure.
9067  */
9068 static int
9069 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9070                                   void *key, const struct rte_flow_item *item,
9071                                   struct rte_flow_error *error)
9072 {
9073         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9074         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9075         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9076         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9077         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9078                         misc_parameters_3);
9079         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9080         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9081         int ret = 0;
9082
9083         if (!geneve_opt_v)
9084                 return -1;
9085         if (!geneve_opt_m)
9086                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9087         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9088                                                            error);
9089         if (ret) {
9090                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9091                 return ret;
9092         }
9093         /*
9094          * Set the option length in GENEVE header if not requested.
9095          * The GENEVE TLV option length is expressed by the option length field
9096          * in the GENEVE header.
9097          * If the option length was not requested but the GENEVE TLV option item
9098          * is present we set the option length field implicitly.
9099          */
9100         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9101                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9102                          MLX5_GENEVE_OPTLEN_MASK);
9103                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9104                          geneve_opt_v->option_len + 1);
9105         }
9106         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9107         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9108         /* Set the data. */
9109         if (geneve_opt_v->data) {
9110                 memcpy(&opt_data_key, geneve_opt_v->data,
9111                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9112                                 sizeof(opt_data_key)));
9113                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9114                                 sizeof(opt_data_key));
9115                 memcpy(&opt_data_mask, geneve_opt_m->data,
9116                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9117                                 sizeof(opt_data_mask)));
9118                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9119                                 sizeof(opt_data_mask));
9120                 MLX5_SET(fte_match_set_misc3, misc3_m,
9121                                 geneve_tlv_option_0_data,
9122                                 rte_be_to_cpu_32(opt_data_mask));
9123                 MLX5_SET(fte_match_set_misc3, misc3_v,
9124                                 geneve_tlv_option_0_data,
9125                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9126         }
9127         return ret;
9128 }
9129
9130 /**
9131  * Add MPLS item to matcher and to the value.
9132  *
9133  * @param[in, out] matcher
9134  *   Flow matcher.
9135  * @param[in, out] key
9136  *   Flow matcher value.
9137  * @param[in] item
9138  *   Flow pattern to translate.
9139  * @param[in] prev_layer
9140  *   The protocol layer indicated in previous item.
9141  * @param[in] inner
9142  *   Item is inner pattern.
9143  */
9144 static void
9145 flow_dv_translate_item_mpls(void *matcher, void *key,
9146                             const struct rte_flow_item *item,
9147                             uint64_t prev_layer,
9148                             int inner)
9149 {
9150         const uint32_t *in_mpls_m = item->mask;
9151         const uint32_t *in_mpls_v = item->spec;
9152         uint32_t *out_mpls_m = 0;
9153         uint32_t *out_mpls_v = 0;
9154         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9155         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9156         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9157                                      misc_parameters_2);
9158         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9159         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9160         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9161
9162         switch (prev_layer) {
9163         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9164                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9165                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9166                          MLX5_UDP_PORT_MPLS);
9167                 break;
9168         case MLX5_FLOW_LAYER_GRE:
9169                 /* Fall-through. */
9170         case MLX5_FLOW_LAYER_GRE_KEY:
9171                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9172                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9173                          RTE_ETHER_TYPE_MPLS);
9174                 break;
9175         default:
9176                 break;
9177         }
9178         if (!in_mpls_v)
9179                 return;
9180         if (!in_mpls_m)
9181                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9182         switch (prev_layer) {
9183         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9184                 out_mpls_m =
9185                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9186                                                  outer_first_mpls_over_udp);
9187                 out_mpls_v =
9188                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9189                                                  outer_first_mpls_over_udp);
9190                 break;
9191         case MLX5_FLOW_LAYER_GRE:
9192                 out_mpls_m =
9193                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9194                                                  outer_first_mpls_over_gre);
9195                 out_mpls_v =
9196                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9197                                                  outer_first_mpls_over_gre);
9198                 break;
9199         default:
9200                 /* Inner MPLS not over GRE is not supported. */
9201                 if (!inner) {
9202                         out_mpls_m =
9203                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9204                                                          misc2_m,
9205                                                          outer_first_mpls);
9206                         out_mpls_v =
9207                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9208                                                          misc2_v,
9209                                                          outer_first_mpls);
9210                 }
9211                 break;
9212         }
9213         if (out_mpls_m && out_mpls_v) {
9214                 *out_mpls_m = *in_mpls_m;
9215                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9216         }
9217 }
9218
9219 /**
9220  * Add metadata register item to matcher
9221  *
9222  * @param[in, out] matcher
9223  *   Flow matcher.
9224  * @param[in, out] key
9225  *   Flow matcher value.
9226  * @param[in] reg_type
9227  *   Type of device metadata register
9228  * @param[in] value
9229  *   Register value
9230  * @param[in] mask
9231  *   Register mask
9232  */
9233 static void
9234 flow_dv_match_meta_reg(void *matcher, void *key,
9235                        enum modify_reg reg_type,
9236                        uint32_t data, uint32_t mask)
9237 {
9238         void *misc2_m =
9239                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9240         void *misc2_v =
9241                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9242         uint32_t temp;
9243
9244         data &= mask;
9245         switch (reg_type) {
9246         case REG_A:
9247                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9248                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9249                 break;
9250         case REG_B:
9251                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9252                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9253                 break;
9254         case REG_C_0:
9255                 /*
9256                  * The metadata register C0 field might be divided into
9257                  * source vport index and META item value, we should set
9258                  * this field according to specified mask, not as whole one.
9259                  */
9260                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9261                 temp |= mask;
9262                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9263                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9264                 temp &= ~mask;
9265                 temp |= data;
9266                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9267                 break;
9268         case REG_C_1:
9269                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9270                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9271                 break;
9272         case REG_C_2:
9273                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9274                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9275                 break;
9276         case REG_C_3:
9277                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9278                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9279                 break;
9280         case REG_C_4:
9281                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9282                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9283                 break;
9284         case REG_C_5:
9285                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9286                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9287                 break;
9288         case REG_C_6:
9289                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9290                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9291                 break;
9292         case REG_C_7:
9293                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9294                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9295                 break;
9296         default:
9297                 MLX5_ASSERT(false);
9298                 break;
9299         }
9300 }
9301
9302 /**
9303  * Add MARK item to matcher
9304  *
9305  * @param[in] dev
9306  *   The device to configure through.
9307  * @param[in, out] matcher
9308  *   Flow matcher.
9309  * @param[in, out] key
9310  *   Flow matcher value.
9311  * @param[in] item
9312  *   Flow pattern to translate.
9313  */
9314 static void
9315 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9316                             void *matcher, void *key,
9317                             const struct rte_flow_item *item)
9318 {
9319         struct mlx5_priv *priv = dev->data->dev_private;
9320         const struct rte_flow_item_mark *mark;
9321         uint32_t value;
9322         uint32_t mask;
9323
9324         mark = item->mask ? (const void *)item->mask :
9325                             &rte_flow_item_mark_mask;
9326         mask = mark->id & priv->sh->dv_mark_mask;
9327         mark = (const void *)item->spec;
9328         MLX5_ASSERT(mark);
9329         value = mark->id & priv->sh->dv_mark_mask & mask;
9330         if (mask) {
9331                 enum modify_reg reg;
9332
9333                 /* Get the metadata register index for the mark. */
9334                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9335                 MLX5_ASSERT(reg > 0);
9336                 if (reg == REG_C_0) {
9337                         struct mlx5_priv *priv = dev->data->dev_private;
9338                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9339                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9340
9341                         mask &= msk_c0;
9342                         mask <<= shl_c0;
9343                         value <<= shl_c0;
9344                 }
9345                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9346         }
9347 }
9348
9349 /**
9350  * Add META item to matcher
9351  *
9352  * @param[in] dev
9353  *   The devich to configure through.
9354  * @param[in, out] matcher
9355  *   Flow matcher.
9356  * @param[in, out] key
9357  *   Flow matcher value.
9358  * @param[in] attr
9359  *   Attributes of flow that includes this item.
9360  * @param[in] item
9361  *   Flow pattern to translate.
9362  */
9363 static void
9364 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9365                             void *matcher, void *key,
9366                             const struct rte_flow_attr *attr,
9367                             const struct rte_flow_item *item)
9368 {
9369         const struct rte_flow_item_meta *meta_m;
9370         const struct rte_flow_item_meta *meta_v;
9371
9372         meta_m = (const void *)item->mask;
9373         if (!meta_m)
9374                 meta_m = &rte_flow_item_meta_mask;
9375         meta_v = (const void *)item->spec;
9376         if (meta_v) {
9377                 int reg;
9378                 uint32_t value = meta_v->data;
9379                 uint32_t mask = meta_m->data;
9380
9381                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9382                 if (reg < 0)
9383                         return;
9384                 MLX5_ASSERT(reg != REG_NON);
9385                 if (reg == REG_C_0) {
9386                         struct mlx5_priv *priv = dev->data->dev_private;
9387                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9388                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9389
9390                         mask &= msk_c0;
9391                         mask <<= shl_c0;
9392                         value <<= shl_c0;
9393                 }
9394                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9395         }
9396 }
9397
9398 /**
9399  * Add vport metadata Reg C0 item to matcher
9400  *
9401  * @param[in, out] matcher
9402  *   Flow matcher.
9403  * @param[in, out] key
9404  *   Flow matcher value.
9405  * @param[in] reg
9406  *   Flow pattern to translate.
9407  */
9408 static void
9409 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9410                                   uint32_t value, uint32_t mask)
9411 {
9412         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9413 }
9414
9415 /**
9416  * Add tag item to matcher
9417  *
9418  * @param[in] dev
9419  *   The devich to configure through.
9420  * @param[in, out] matcher
9421  *   Flow matcher.
9422  * @param[in, out] key
9423  *   Flow matcher value.
9424  * @param[in] item
9425  *   Flow pattern to translate.
9426  */
9427 static void
9428 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9429                                 void *matcher, void *key,
9430                                 const struct rte_flow_item *item)
9431 {
9432         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9433         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9434         uint32_t mask, value;
9435
9436         MLX5_ASSERT(tag_v);
9437         value = tag_v->data;
9438         mask = tag_m ? tag_m->data : UINT32_MAX;
9439         if (tag_v->id == REG_C_0) {
9440                 struct mlx5_priv *priv = dev->data->dev_private;
9441                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9442                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9443
9444                 mask &= msk_c0;
9445                 mask <<= shl_c0;
9446                 value <<= shl_c0;
9447         }
9448         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9449 }
9450
9451 /**
9452  * Add TAG item to matcher
9453  *
9454  * @param[in] dev
9455  *   The devich to configure through.
9456  * @param[in, out] matcher
9457  *   Flow matcher.
9458  * @param[in, out] key
9459  *   Flow matcher value.
9460  * @param[in] item
9461  *   Flow pattern to translate.
9462  */
9463 static void
9464 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9465                            void *matcher, void *key,
9466                            const struct rte_flow_item *item)
9467 {
9468         const struct rte_flow_item_tag *tag_v = item->spec;
9469         const struct rte_flow_item_tag *tag_m = item->mask;
9470         enum modify_reg reg;
9471
9472         MLX5_ASSERT(tag_v);
9473         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9474         /* Get the metadata register index for the tag. */
9475         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9476         MLX5_ASSERT(reg > 0);
9477         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9478 }
9479
9480 /**
9481  * Add source vport match to the specified matcher.
9482  *
9483  * @param[in, out] matcher
9484  *   Flow matcher.
9485  * @param[in, out] key
9486  *   Flow matcher value.
9487  * @param[in] port
9488  *   Source vport value to match
9489  * @param[in] mask
9490  *   Mask
9491  */
9492 static void
9493 flow_dv_translate_item_source_vport(void *matcher, void *key,
9494                                     int16_t port, uint16_t mask)
9495 {
9496         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9497         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9498
9499         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9500         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9501 }
9502
9503 /**
9504  * Translate port-id item to eswitch match on  port-id.
9505  *
9506  * @param[in] dev
9507  *   The devich to configure through.
9508  * @param[in, out] matcher
9509  *   Flow matcher.
9510  * @param[in, out] key
9511  *   Flow matcher value.
9512  * @param[in] item
9513  *   Flow pattern to translate.
9514  * @param[in]
9515  *   Flow attributes.
9516  *
9517  * @return
9518  *   0 on success, a negative errno value otherwise.
9519  */
9520 static int
9521 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9522                                void *key, const struct rte_flow_item *item,
9523                                const struct rte_flow_attr *attr)
9524 {
9525         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9526         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9527         struct mlx5_priv *priv;
9528         uint16_t mask, id;
9529
9530         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9531                 flow_dv_translate_item_source_vport(matcher, key,
9532                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9533                 return 0;
9534         }
9535         mask = pid_m ? pid_m->id : 0xffff;
9536         id = pid_v ? pid_v->id : dev->data->port_id;
9537         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9538         if (!priv)
9539                 return -rte_errno;
9540         /*
9541          * Translate to vport field or to metadata, depending on mode.
9542          * Kernel can use either misc.source_port or half of C0 metadata
9543          * register.
9544          */
9545         if (priv->vport_meta_mask) {
9546                 /*
9547                  * Provide the hint for SW steering library
9548                  * to insert the flow into ingress domain and
9549                  * save the extra vport match.
9550                  */
9551                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9552                     priv->pf_bond < 0 && attr->transfer)
9553                         flow_dv_translate_item_source_vport
9554                                 (matcher, key, priv->vport_id, mask);
9555                 /*
9556                  * We should always set the vport metadata register,
9557                  * otherwise the SW steering library can drop
9558                  * the rule if wire vport metadata value is not zero,
9559                  * it depends on kernel configuration.
9560                  */
9561                 flow_dv_translate_item_meta_vport(matcher, key,
9562                                                   priv->vport_meta_tag,
9563                                                   priv->vport_meta_mask);
9564         } else {
9565                 flow_dv_translate_item_source_vport(matcher, key,
9566                                                     priv->vport_id, mask);
9567         }
9568         return 0;
9569 }
9570
9571 /**
9572  * Add ICMP6 item to matcher and to the value.
9573  *
9574  * @param[in, out] matcher
9575  *   Flow matcher.
9576  * @param[in, out] key
9577  *   Flow matcher value.
9578  * @param[in] item
9579  *   Flow pattern to translate.
9580  * @param[in] inner
9581  *   Item is inner pattern.
9582  */
9583 static void
9584 flow_dv_translate_item_icmp6(void *matcher, void *key,
9585                               const struct rte_flow_item *item,
9586                               int inner)
9587 {
9588         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9589         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9590         void *headers_m;
9591         void *headers_v;
9592         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9593                                      misc_parameters_3);
9594         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9595         if (inner) {
9596                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9597                                          inner_headers);
9598                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9599         } else {
9600                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9601                                          outer_headers);
9602                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9603         }
9604         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9605         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9606         if (!icmp6_v)
9607                 return;
9608         if (!icmp6_m)
9609                 icmp6_m = &rte_flow_item_icmp6_mask;
9610         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9611         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9612                  icmp6_v->type & icmp6_m->type);
9613         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9614         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9615                  icmp6_v->code & icmp6_m->code);
9616 }
9617
9618 /**
9619  * Add ICMP item to matcher and to the value.
9620  *
9621  * @param[in, out] matcher
9622  *   Flow matcher.
9623  * @param[in, out] key
9624  *   Flow matcher value.
9625  * @param[in] item
9626  *   Flow pattern to translate.
9627  * @param[in] inner
9628  *   Item is inner pattern.
9629  */
9630 static void
9631 flow_dv_translate_item_icmp(void *matcher, void *key,
9632                             const struct rte_flow_item *item,
9633                             int inner)
9634 {
9635         const struct rte_flow_item_icmp *icmp_m = item->mask;
9636         const struct rte_flow_item_icmp *icmp_v = item->spec;
9637         uint32_t icmp_header_data_m = 0;
9638         uint32_t icmp_header_data_v = 0;
9639         void *headers_m;
9640         void *headers_v;
9641         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9642                                      misc_parameters_3);
9643         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9644         if (inner) {
9645                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9646                                          inner_headers);
9647                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9648         } else {
9649                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9650                                          outer_headers);
9651                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9652         }
9653         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9654         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9655         if (!icmp_v)
9656                 return;
9657         if (!icmp_m)
9658                 icmp_m = &rte_flow_item_icmp_mask;
9659         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9660                  icmp_m->hdr.icmp_type);
9661         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9662                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9663         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9664                  icmp_m->hdr.icmp_code);
9665         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9666                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9667         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9668         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9669         if (icmp_header_data_m) {
9670                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9671                 icmp_header_data_v |=
9672                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9673                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9674                          icmp_header_data_m);
9675                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9676                          icmp_header_data_v & icmp_header_data_m);
9677         }
9678 }
9679
9680 /**
9681  * Add GTP item to matcher and to the value.
9682  *
9683  * @param[in, out] matcher
9684  *   Flow matcher.
9685  * @param[in, out] key
9686  *   Flow matcher value.
9687  * @param[in] item
9688  *   Flow pattern to translate.
9689  * @param[in] inner
9690  *   Item is inner pattern.
9691  */
9692 static void
9693 flow_dv_translate_item_gtp(void *matcher, void *key,
9694                            const struct rte_flow_item *item, int inner)
9695 {
9696         const struct rte_flow_item_gtp *gtp_m = item->mask;
9697         const struct rte_flow_item_gtp *gtp_v = item->spec;
9698         void *headers_m;
9699         void *headers_v;
9700         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9701                                      misc_parameters_3);
9702         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9703         uint16_t dport = RTE_GTPU_UDP_PORT;
9704
9705         if (inner) {
9706                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9707                                          inner_headers);
9708                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9709         } else {
9710                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9711                                          outer_headers);
9712                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9713         }
9714         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9715                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9716                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9717         }
9718         if (!gtp_v)
9719                 return;
9720         if (!gtp_m)
9721                 gtp_m = &rte_flow_item_gtp_mask;
9722         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9723                  gtp_m->v_pt_rsv_flags);
9724         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9725                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9726         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9727         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9728                  gtp_v->msg_type & gtp_m->msg_type);
9729         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9730                  rte_be_to_cpu_32(gtp_m->teid));
9731         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9732                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9733 }
9734
9735 /**
9736  * Add GTP PSC item to matcher.
9737  *
9738  * @param[in, out] matcher
9739  *   Flow matcher.
9740  * @param[in, out] key
9741  *   Flow matcher value.
9742  * @param[in] item
9743  *   Flow pattern to translate.
9744  */
9745 static int
9746 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9747                                const struct rte_flow_item *item)
9748 {
9749         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9750         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9751         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9752                         misc_parameters_3);
9753         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9754         union {
9755                 uint32_t w32;
9756                 struct {
9757                         uint16_t seq_num;
9758                         uint8_t npdu_num;
9759                         uint8_t next_ext_header_type;
9760                 };
9761         } dw_2;
9762         uint8_t gtp_flags;
9763
9764         /* Always set E-flag match on one, regardless of GTP item settings. */
9765         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9766         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9767         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9768         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9769         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9770         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9771         /*Set next extension header type. */
9772         dw_2.seq_num = 0;
9773         dw_2.npdu_num = 0;
9774         dw_2.next_ext_header_type = 0xff;
9775         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9776                  rte_cpu_to_be_32(dw_2.w32));
9777         dw_2.seq_num = 0;
9778         dw_2.npdu_num = 0;
9779         dw_2.next_ext_header_type = 0x85;
9780         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9781                  rte_cpu_to_be_32(dw_2.w32));
9782         if (gtp_psc_v) {
9783                 union {
9784                         uint32_t w32;
9785                         struct {
9786                                 uint8_t len;
9787                                 uint8_t type_flags;
9788                                 uint8_t qfi;
9789                                 uint8_t reserved;
9790                         };
9791                 } dw_0;
9792
9793                 /*Set extension header PDU type and Qos. */
9794                 if (!gtp_psc_m)
9795                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9796                 dw_0.w32 = 0;
9797                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9798                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9799                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9800                          rte_cpu_to_be_32(dw_0.w32));
9801                 dw_0.w32 = 0;
9802                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9803                                                         gtp_psc_m->hdr.type);
9804                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9805                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9806                          rte_cpu_to_be_32(dw_0.w32));
9807         }
9808         return 0;
9809 }
9810
9811 /**
9812  * Add eCPRI item to matcher and to the value.
9813  *
9814  * @param[in] dev
9815  *   The devich to configure through.
9816  * @param[in, out] matcher
9817  *   Flow matcher.
9818  * @param[in, out] key
9819  *   Flow matcher value.
9820  * @param[in] item
9821  *   Flow pattern to translate.
9822  * @param[in] last_item
9823  *   Last item flags.
9824  */
9825 static void
9826 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9827                              void *key, const struct rte_flow_item *item,
9828                              uint64_t last_item)
9829 {
9830         struct mlx5_priv *priv = dev->data->dev_private;
9831         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9832         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9833         struct rte_ecpri_common_hdr common;
9834         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9835                                      misc_parameters_4);
9836         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9837         uint32_t *samples;
9838         void *dw_m;
9839         void *dw_v;
9840
9841         /*
9842          * In case of eCPRI over Ethernet, if EtherType is not specified,
9843          * match on eCPRI EtherType implicitly.
9844          */
9845         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
9846                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
9847
9848                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9849                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9850                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
9851                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
9852                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
9853                         *(uint16_t *)l2m = UINT16_MAX;
9854                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
9855                 }
9856         }
9857         if (!ecpri_v)
9858                 return;
9859         if (!ecpri_m)
9860                 ecpri_m = &rte_flow_item_ecpri_mask;
9861         /*
9862          * Maximal four DW samples are supported in a single matching now.
9863          * Two are used now for a eCPRI matching:
9864          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9865          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9866          *    if any.
9867          */
9868         if (!ecpri_m->hdr.common.u32)
9869                 return;
9870         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9871         /* Need to take the whole DW as the mask to fill the entry. */
9872         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9873                             prog_sample_field_value_0);
9874         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9875                             prog_sample_field_value_0);
9876         /* Already big endian (network order) in the header. */
9877         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9878         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9879         /* Sample#0, used for matching type, offset 0. */
9880         MLX5_SET(fte_match_set_misc4, misc4_m,
9881                  prog_sample_field_id_0, samples[0]);
9882         /* It makes no sense to set the sample ID in the mask field. */
9883         MLX5_SET(fte_match_set_misc4, misc4_v,
9884                  prog_sample_field_id_0, samples[0]);
9885         /*
9886          * Checking if message body part needs to be matched.
9887          * Some wildcard rules only matching type field should be supported.
9888          */
9889         if (ecpri_m->hdr.dummy[0]) {
9890                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9891                 switch (common.type) {
9892                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9893                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9894                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9895                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9896                                             prog_sample_field_value_1);
9897                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9898                                             prog_sample_field_value_1);
9899                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9900                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9901                                             ecpri_m->hdr.dummy[0];
9902                         /* Sample#1, to match message body, offset 4. */
9903                         MLX5_SET(fte_match_set_misc4, misc4_m,
9904                                  prog_sample_field_id_1, samples[1]);
9905                         MLX5_SET(fte_match_set_misc4, misc4_v,
9906                                  prog_sample_field_id_1, samples[1]);
9907                         break;
9908                 default:
9909                         /* Others, do not match any sample ID. */
9910                         break;
9911                 }
9912         }
9913 }
9914
9915 /*
9916  * Add connection tracking status item to matcher
9917  *
9918  * @param[in] dev
9919  *   The devich to configure through.
9920  * @param[in, out] matcher
9921  *   Flow matcher.
9922  * @param[in, out] key
9923  *   Flow matcher value.
9924  * @param[in] item
9925  *   Flow pattern to translate.
9926  */
9927 static void
9928 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9929                               void *matcher, void *key,
9930                               const struct rte_flow_item *item)
9931 {
9932         uint32_t reg_value = 0;
9933         int reg_id;
9934         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9935         uint32_t reg_mask = 0;
9936         const struct rte_flow_item_conntrack *spec = item->spec;
9937         const struct rte_flow_item_conntrack *mask = item->mask;
9938         uint32_t flags;
9939         struct rte_flow_error error;
9940
9941         if (!mask)
9942                 mask = &rte_flow_item_conntrack_mask;
9943         if (!spec || !mask->flags)
9944                 return;
9945         flags = spec->flags & mask->flags;
9946         /* The conflict should be checked in the validation. */
9947         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9948                 reg_value |= MLX5_CT_SYNDROME_VALID;
9949         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9950                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9951         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9952                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9953         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9954                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9955         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9956                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9957         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9958                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9959                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9960                 reg_mask |= 0xc0;
9961         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9962                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9963         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9964                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9965         /* The REG_C_x value could be saved during startup. */
9966         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9967         if (reg_id == REG_NON)
9968                 return;
9969         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9970                                reg_value, reg_mask);
9971 }
9972
9973 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9974
9975 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9976         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9977                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9978
9979 /**
9980  * Calculate flow matcher enable bitmap.
9981  *
9982  * @param match_criteria
9983  *   Pointer to flow matcher criteria.
9984  *
9985  * @return
9986  *   Bitmap of enabled fields.
9987  */
9988 static uint8_t
9989 flow_dv_matcher_enable(uint32_t *match_criteria)
9990 {
9991         uint8_t match_criteria_enable;
9992
9993         match_criteria_enable =
9994                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9995                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9996         match_criteria_enable |=
9997                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9998                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9999         match_criteria_enable |=
10000                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10001                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10002         match_criteria_enable |=
10003                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10004                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10005         match_criteria_enable |=
10006                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10007                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10008         match_criteria_enable |=
10009                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10010                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10011         match_criteria_enable |=
10012                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10013                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10014         return match_criteria_enable;
10015 }
10016
10017 static void
10018 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10019 {
10020         /*
10021          * Check flow matching criteria first, subtract misc5/4 length if flow
10022          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10023          * misc5/4 are not supported, and matcher creation failure is expected
10024          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10025          * misc5 is right after misc4.
10026          */
10027         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10028                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10029                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10030                 if (!(match_criteria & (1 <<
10031                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10032                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10033                 }
10034         }
10035 }
10036
10037 static struct mlx5_list_entry *
10038 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10039                          struct mlx5_list_entry *entry, void *cb_ctx)
10040 {
10041         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10042         struct mlx5_flow_dv_matcher *ref = ctx->data;
10043         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10044                                                             typeof(*tbl), tbl);
10045         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10046                                                             sizeof(*resource),
10047                                                             0, SOCKET_ID_ANY);
10048
10049         if (!resource) {
10050                 rte_flow_error_set(ctx->error, ENOMEM,
10051                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10052                                    "cannot create matcher");
10053                 return NULL;
10054         }
10055         memcpy(resource, entry, sizeof(*resource));
10056         resource->tbl = &tbl->tbl;
10057         return &resource->entry;
10058 }
10059
10060 static void
10061 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10062                              struct mlx5_list_entry *entry)
10063 {
10064         mlx5_free(entry);
10065 }
10066
10067 struct mlx5_list_entry *
10068 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10069 {
10070         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10071         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10072         struct rte_eth_dev *dev = ctx->dev;
10073         struct mlx5_flow_tbl_data_entry *tbl_data;
10074         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10075         struct rte_flow_error *error = ctx->error;
10076         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10077         struct mlx5_flow_tbl_resource *tbl;
10078         void *domain;
10079         uint32_t idx = 0;
10080         int ret;
10081
10082         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10083         if (!tbl_data) {
10084                 rte_flow_error_set(error, ENOMEM,
10085                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10086                                    NULL,
10087                                    "cannot allocate flow table data entry");
10088                 return NULL;
10089         }
10090         tbl_data->idx = idx;
10091         tbl_data->tunnel = tt_prm->tunnel;
10092         tbl_data->group_id = tt_prm->group_id;
10093         tbl_data->external = !!tt_prm->external;
10094         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10095         tbl_data->is_egress = !!key.is_egress;
10096         tbl_data->is_transfer = !!key.is_fdb;
10097         tbl_data->dummy = !!key.dummy;
10098         tbl_data->level = key.level;
10099         tbl_data->id = key.id;
10100         tbl = &tbl_data->tbl;
10101         if (key.dummy)
10102                 return &tbl_data->entry;
10103         if (key.is_fdb)
10104                 domain = sh->fdb_domain;
10105         else if (key.is_egress)
10106                 domain = sh->tx_domain;
10107         else
10108                 domain = sh->rx_domain;
10109         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10110         if (ret) {
10111                 rte_flow_error_set(error, ENOMEM,
10112                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10113                                    NULL, "cannot create flow table object");
10114                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10115                 return NULL;
10116         }
10117         if (key.level != 0) {
10118                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10119                                         (tbl->obj, &tbl_data->jump.action);
10120                 if (ret) {
10121                         rte_flow_error_set(error, ENOMEM,
10122                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10123                                            NULL,
10124                                            "cannot create flow jump action");
10125                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10126                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10127                         return NULL;
10128                 }
10129         }
10130         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10131               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10132               key.level, key.id);
10133         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10134                                               flow_dv_matcher_create_cb,
10135                                               flow_dv_matcher_match_cb,
10136                                               flow_dv_matcher_remove_cb,
10137                                               flow_dv_matcher_clone_cb,
10138                                               flow_dv_matcher_clone_free_cb);
10139         if (!tbl_data->matchers) {
10140                 rte_flow_error_set(error, ENOMEM,
10141                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10142                                    NULL,
10143                                    "cannot create tbl matcher list");
10144                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10145                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10146                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10147                 return NULL;
10148         }
10149         return &tbl_data->entry;
10150 }
10151
10152 int
10153 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10154                      void *cb_ctx)
10155 {
10156         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10157         struct mlx5_flow_tbl_data_entry *tbl_data =
10158                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10159         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10160
10161         return tbl_data->level != key.level ||
10162                tbl_data->id != key.id ||
10163                tbl_data->dummy != key.dummy ||
10164                tbl_data->is_transfer != !!key.is_fdb ||
10165                tbl_data->is_egress != !!key.is_egress;
10166 }
10167
10168 struct mlx5_list_entry *
10169 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10170                       void *cb_ctx)
10171 {
10172         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10173         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10174         struct mlx5_flow_tbl_data_entry *tbl_data;
10175         struct rte_flow_error *error = ctx->error;
10176         uint32_t idx = 0;
10177
10178         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10179         if (!tbl_data) {
10180                 rte_flow_error_set(error, ENOMEM,
10181                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10182                                    NULL,
10183                                    "cannot allocate flow table data entry");
10184                 return NULL;
10185         }
10186         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10187         tbl_data->idx = idx;
10188         return &tbl_data->entry;
10189 }
10190
10191 void
10192 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10193 {
10194         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10195         struct mlx5_flow_tbl_data_entry *tbl_data =
10196                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10197
10198         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10199 }
10200
10201 /**
10202  * Get a flow table.
10203  *
10204  * @param[in, out] dev
10205  *   Pointer to rte_eth_dev structure.
10206  * @param[in] table_level
10207  *   Table level to use.
10208  * @param[in] egress
10209  *   Direction of the table.
10210  * @param[in] transfer
10211  *   E-Switch or NIC flow.
10212  * @param[in] dummy
10213  *   Dummy entry for dv API.
10214  * @param[in] table_id
10215  *   Table id to use.
10216  * @param[out] error
10217  *   pointer to error structure.
10218  *
10219  * @return
10220  *   Returns tables resource based on the index, NULL in case of failed.
10221  */
10222 struct mlx5_flow_tbl_resource *
10223 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10224                          uint32_t table_level, uint8_t egress,
10225                          uint8_t transfer,
10226                          bool external,
10227                          const struct mlx5_flow_tunnel *tunnel,
10228                          uint32_t group_id, uint8_t dummy,
10229                          uint32_t table_id,
10230                          struct rte_flow_error *error)
10231 {
10232         struct mlx5_priv *priv = dev->data->dev_private;
10233         union mlx5_flow_tbl_key table_key = {
10234                 {
10235                         .level = table_level,
10236                         .id = table_id,
10237                         .reserved = 0,
10238                         .dummy = !!dummy,
10239                         .is_fdb = !!transfer,
10240                         .is_egress = !!egress,
10241                 }
10242         };
10243         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10244                 .tunnel = tunnel,
10245                 .group_id = group_id,
10246                 .external = external,
10247         };
10248         struct mlx5_flow_cb_ctx ctx = {
10249                 .dev = dev,
10250                 .error = error,
10251                 .data = &table_key.v64,
10252                 .data2 = &tt_prm,
10253         };
10254         struct mlx5_list_entry *entry;
10255         struct mlx5_flow_tbl_data_entry *tbl_data;
10256
10257         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10258         if (!entry) {
10259                 rte_flow_error_set(error, ENOMEM,
10260                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10261                                    "cannot get table");
10262                 return NULL;
10263         }
10264         DRV_LOG(DEBUG, "table_level %u table_id %u "
10265                 "tunnel %u group %u registered.",
10266                 table_level, table_id,
10267                 tunnel ? tunnel->tunnel_id : 0, group_id);
10268         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10269         return &tbl_data->tbl;
10270 }
10271
10272 void
10273 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10274 {
10275         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10276         struct mlx5_flow_tbl_data_entry *tbl_data =
10277                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10278
10279         MLX5_ASSERT(entry && sh);
10280         if (tbl_data->jump.action)
10281                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10282         if (tbl_data->tbl.obj)
10283                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10284         if (tbl_data->tunnel_offload && tbl_data->external) {
10285                 struct mlx5_list_entry *he;
10286                 struct mlx5_hlist *tunnel_grp_hash;
10287                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10288                 union tunnel_tbl_key tunnel_key = {
10289                         .tunnel_id = tbl_data->tunnel ?
10290                                         tbl_data->tunnel->tunnel_id : 0,
10291                         .group = tbl_data->group_id
10292                 };
10293                 uint32_t table_level = tbl_data->level;
10294                 struct mlx5_flow_cb_ctx ctx = {
10295                         .data = (void *)&tunnel_key.val,
10296                 };
10297
10298                 tunnel_grp_hash = tbl_data->tunnel ?
10299                                         tbl_data->tunnel->groups :
10300                                         thub->groups;
10301                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10302                 if (he)
10303                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10304                 DRV_LOG(DEBUG,
10305                         "table_level %u id %u tunnel %u group %u released.",
10306                         table_level,
10307                         tbl_data->id,
10308                         tbl_data->tunnel ?
10309                         tbl_data->tunnel->tunnel_id : 0,
10310                         tbl_data->group_id);
10311         }
10312         mlx5_list_destroy(tbl_data->matchers);
10313         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10314 }
10315
10316 /**
10317  * Release a flow table.
10318  *
10319  * @param[in] sh
10320  *   Pointer to device shared structure.
10321  * @param[in] tbl
10322  *   Table resource to be released.
10323  *
10324  * @return
10325  *   Returns 0 if table was released, else return 1;
10326  */
10327 static int
10328 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10329                              struct mlx5_flow_tbl_resource *tbl)
10330 {
10331         struct mlx5_flow_tbl_data_entry *tbl_data =
10332                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10333
10334         if (!tbl)
10335                 return 0;
10336         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10337 }
10338
10339 int
10340 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10341                          struct mlx5_list_entry *entry, void *cb_ctx)
10342 {
10343         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10344         struct mlx5_flow_dv_matcher *ref = ctx->data;
10345         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10346                                                         entry);
10347
10348         return cur->crc != ref->crc ||
10349                cur->priority != ref->priority ||
10350                memcmp((const void *)cur->mask.buf,
10351                       (const void *)ref->mask.buf, ref->mask.size);
10352 }
10353
10354 struct mlx5_list_entry *
10355 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10356 {
10357         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10358         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10359         struct mlx5_flow_dv_matcher *ref = ctx->data;
10360         struct mlx5_flow_dv_matcher *resource;
10361         struct mlx5dv_flow_matcher_attr dv_attr = {
10362                 .type = IBV_FLOW_ATTR_NORMAL,
10363                 .match_mask = (void *)&ref->mask,
10364         };
10365         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10366                                                             typeof(*tbl), tbl);
10367         int ret;
10368
10369         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10370                                SOCKET_ID_ANY);
10371         if (!resource) {
10372                 rte_flow_error_set(ctx->error, ENOMEM,
10373                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10374                                    "cannot create matcher");
10375                 return NULL;
10376         }
10377         *resource = *ref;
10378         dv_attr.match_criteria_enable =
10379                 flow_dv_matcher_enable(resource->mask.buf);
10380         __flow_dv_adjust_buf_size(&ref->mask.size,
10381                                   dv_attr.match_criteria_enable);
10382         dv_attr.priority = ref->priority;
10383         if (tbl->is_egress)
10384                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10385         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10386                                                tbl->tbl.obj,
10387                                                &resource->matcher_object);
10388         if (ret) {
10389                 mlx5_free(resource);
10390                 rte_flow_error_set(ctx->error, ENOMEM,
10391                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10392                                    "cannot create matcher");
10393                 return NULL;
10394         }
10395         return &resource->entry;
10396 }
10397
10398 /**
10399  * Register the flow matcher.
10400  *
10401  * @param[in, out] dev
10402  *   Pointer to rte_eth_dev structure.
10403  * @param[in, out] matcher
10404  *   Pointer to flow matcher.
10405  * @param[in, out] key
10406  *   Pointer to flow table key.
10407  * @parm[in, out] dev_flow
10408  *   Pointer to the dev_flow.
10409  * @param[out] error
10410  *   pointer to error structure.
10411  *
10412  * @return
10413  *   0 on success otherwise -errno and errno is set.
10414  */
10415 static int
10416 flow_dv_matcher_register(struct rte_eth_dev *dev,
10417                          struct mlx5_flow_dv_matcher *ref,
10418                          union mlx5_flow_tbl_key *key,
10419                          struct mlx5_flow *dev_flow,
10420                          const struct mlx5_flow_tunnel *tunnel,
10421                          uint32_t group_id,
10422                          struct rte_flow_error *error)
10423 {
10424         struct mlx5_list_entry *entry;
10425         struct mlx5_flow_dv_matcher *resource;
10426         struct mlx5_flow_tbl_resource *tbl;
10427         struct mlx5_flow_tbl_data_entry *tbl_data;
10428         struct mlx5_flow_cb_ctx ctx = {
10429                 .error = error,
10430                 .data = ref,
10431         };
10432         /**
10433          * tunnel offload API requires this registration for cases when
10434          * tunnel match rule was inserted before tunnel set rule.
10435          */
10436         tbl = flow_dv_tbl_resource_get(dev, key->level,
10437                                        key->is_egress, key->is_fdb,
10438                                        dev_flow->external, tunnel,
10439                                        group_id, 0, key->id, error);
10440         if (!tbl)
10441                 return -rte_errno;      /* No need to refill the error info */
10442         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10443         ref->tbl = tbl;
10444         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10445         if (!entry) {
10446                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10447                 return rte_flow_error_set(error, ENOMEM,
10448                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10449                                           "cannot allocate ref memory");
10450         }
10451         resource = container_of(entry, typeof(*resource), entry);
10452         dev_flow->handle->dvh.matcher = resource;
10453         return 0;
10454 }
10455
10456 struct mlx5_list_entry *
10457 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10458 {
10459         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10460         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10461         struct mlx5_flow_dv_tag_resource *entry;
10462         uint32_t idx = 0;
10463         int ret;
10464
10465         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10466         if (!entry) {
10467                 rte_flow_error_set(ctx->error, ENOMEM,
10468                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10469                                    "cannot allocate resource memory");
10470                 return NULL;
10471         }
10472         entry->idx = idx;
10473         entry->tag_id = *(uint32_t *)(ctx->data);
10474         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10475                                                   &entry->action);
10476         if (ret) {
10477                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10478                 rte_flow_error_set(ctx->error, ENOMEM,
10479                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10480                                    NULL, "cannot create action");
10481                 return NULL;
10482         }
10483         return &entry->entry;
10484 }
10485
10486 int
10487 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10488                      void *cb_ctx)
10489 {
10490         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10491         struct mlx5_flow_dv_tag_resource *tag =
10492                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10493
10494         return *(uint32_t *)(ctx->data) != tag->tag_id;
10495 }
10496
10497 struct mlx5_list_entry *
10498 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10499                      void *cb_ctx)
10500 {
10501         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10502         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10503         struct mlx5_flow_dv_tag_resource *entry;
10504         uint32_t idx = 0;
10505
10506         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10507         if (!entry) {
10508                 rte_flow_error_set(ctx->error, ENOMEM,
10509                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10510                                    "cannot allocate tag resource memory");
10511                 return NULL;
10512         }
10513         memcpy(entry, oentry, sizeof(*entry));
10514         entry->idx = idx;
10515         return &entry->entry;
10516 }
10517
10518 void
10519 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10520 {
10521         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10522         struct mlx5_flow_dv_tag_resource *tag =
10523                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10524
10525         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10526 }
10527
10528 /**
10529  * Find existing tag resource or create and register a new one.
10530  *
10531  * @param dev[in, out]
10532  *   Pointer to rte_eth_dev structure.
10533  * @param[in, out] tag_be24
10534  *   Tag value in big endian then R-shift 8.
10535  * @parm[in, out] dev_flow
10536  *   Pointer to the dev_flow.
10537  * @param[out] error
10538  *   pointer to error structure.
10539  *
10540  * @return
10541  *   0 on success otherwise -errno and errno is set.
10542  */
10543 static int
10544 flow_dv_tag_resource_register
10545                         (struct rte_eth_dev *dev,
10546                          uint32_t tag_be24,
10547                          struct mlx5_flow *dev_flow,
10548                          struct rte_flow_error *error)
10549 {
10550         struct mlx5_priv *priv = dev->data->dev_private;
10551         struct mlx5_flow_dv_tag_resource *resource;
10552         struct mlx5_list_entry *entry;
10553         struct mlx5_flow_cb_ctx ctx = {
10554                                         .error = error,
10555                                         .data = &tag_be24,
10556                                         };
10557         struct mlx5_hlist *tag_table;
10558
10559         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10560                                       "tags",
10561                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10562                                       false, false, priv->sh,
10563                                       flow_dv_tag_create_cb,
10564                                       flow_dv_tag_match_cb,
10565                                       flow_dv_tag_remove_cb,
10566                                       flow_dv_tag_clone_cb,
10567                                       flow_dv_tag_clone_free_cb);
10568         if (unlikely(!tag_table))
10569                 return -rte_errno;
10570         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10571         if (entry) {
10572                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10573                                         entry);
10574                 dev_flow->handle->dvh.rix_tag = resource->idx;
10575                 dev_flow->dv.tag_resource = resource;
10576                 return 0;
10577         }
10578         return -rte_errno;
10579 }
10580
10581 void
10582 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10583 {
10584         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10585         struct mlx5_flow_dv_tag_resource *tag =
10586                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10587
10588         MLX5_ASSERT(tag && sh && tag->action);
10589         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10590         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10591         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10592 }
10593
10594 /**
10595  * Release the tag.
10596  *
10597  * @param dev
10598  *   Pointer to Ethernet device.
10599  * @param tag_idx
10600  *   Tag index.
10601  *
10602  * @return
10603  *   1 while a reference on it exists, 0 when freed.
10604  */
10605 static int
10606 flow_dv_tag_release(struct rte_eth_dev *dev,
10607                     uint32_t tag_idx)
10608 {
10609         struct mlx5_priv *priv = dev->data->dev_private;
10610         struct mlx5_flow_dv_tag_resource *tag;
10611
10612         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10613         if (!tag)
10614                 return 0;
10615         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10616                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10617         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10618 }
10619
10620 /**
10621  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10622  *
10623  * @param[in] dev
10624  *   Pointer to rte_eth_dev structure.
10625  * @param[in] action
10626  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10627  * @param[out] dst_port_id
10628  *   The target port ID.
10629  * @param[out] error
10630  *   Pointer to the error structure.
10631  *
10632  * @return
10633  *   0 on success, a negative errno value otherwise and rte_errno is set.
10634  */
10635 static int
10636 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10637                                  const struct rte_flow_action *action,
10638                                  uint32_t *dst_port_id,
10639                                  struct rte_flow_error *error)
10640 {
10641         uint32_t port;
10642         struct mlx5_priv *priv;
10643
10644         switch (action->type) {
10645         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10646                 const struct rte_flow_action_port_id *conf;
10647
10648                 conf = (const struct rte_flow_action_port_id *)action->conf;
10649                 port = conf->original ? dev->data->port_id : conf->id;
10650                 break;
10651         }
10652         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10653                 const struct rte_flow_action_ethdev *ethdev;
10654
10655                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10656                 port = ethdev->port_id;
10657                 break;
10658         }
10659         default:
10660                 MLX5_ASSERT(false);
10661                 return rte_flow_error_set(error, EINVAL,
10662                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10663                                           "unknown E-Switch action");
10664         }
10665
10666         priv = mlx5_port_to_eswitch_info(port, false);
10667         if (!priv)
10668                 return rte_flow_error_set(error, -rte_errno,
10669                                           RTE_FLOW_ERROR_TYPE_ACTION,
10670                                           NULL,
10671                                           "No eswitch info was found for port");
10672 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10673         /*
10674          * This parameter is transferred to
10675          * mlx5dv_dr_action_create_dest_ib_port().
10676          */
10677         *dst_port_id = priv->dev_port;
10678 #else
10679         /*
10680          * Legacy mode, no LAG configurations is supported.
10681          * This parameter is transferred to
10682          * mlx5dv_dr_action_create_dest_vport().
10683          */
10684         *dst_port_id = priv->vport_id;
10685 #endif
10686         return 0;
10687 }
10688
10689 /**
10690  * Create a counter with aging configuration.
10691  *
10692  * @param[in] dev
10693  *   Pointer to rte_eth_dev structure.
10694  * @param[in] dev_flow
10695  *   Pointer to the mlx5_flow.
10696  * @param[out] count
10697  *   Pointer to the counter action configuration.
10698  * @param[in] age
10699  *   Pointer to the aging action configuration.
10700  *
10701  * @return
10702  *   Index to flow counter on success, 0 otherwise.
10703  */
10704 static uint32_t
10705 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10706                                 struct mlx5_flow *dev_flow,
10707                                 const struct rte_flow_action_count *count
10708                                         __rte_unused,
10709                                 const struct rte_flow_action_age *age)
10710 {
10711         uint32_t counter;
10712         struct mlx5_age_param *age_param;
10713
10714         counter = flow_dv_counter_alloc(dev, !!age);
10715         if (!counter || age == NULL)
10716                 return counter;
10717         age_param = flow_dv_counter_idx_get_age(dev, counter);
10718         age_param->context = age->context ? age->context :
10719                 (void *)(uintptr_t)(dev_flow->flow_idx);
10720         age_param->timeout = age->timeout;
10721         age_param->port_id = dev->data->port_id;
10722         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10723         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10724         return counter;
10725 }
10726
10727 /**
10728  * Add Tx queue matcher
10729  *
10730  * @param[in] dev
10731  *   Pointer to the dev struct.
10732  * @param[in, out] matcher
10733  *   Flow matcher.
10734  * @param[in, out] key
10735  *   Flow matcher value.
10736  * @param[in] item
10737  *   Flow pattern to translate.
10738  * @param[in] inner
10739  *   Item is inner pattern.
10740  */
10741 static void
10742 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10743                                 void *matcher, void *key,
10744                                 const struct rte_flow_item *item)
10745 {
10746         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10747         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10748         void *misc_m =
10749                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10750         void *misc_v =
10751                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10752         struct mlx5_txq_ctrl *txq;
10753         uint32_t queue, mask;
10754
10755         queue_m = (const void *)item->mask;
10756         queue_v = (const void *)item->spec;
10757         if (!queue_v)
10758                 return;
10759         txq = mlx5_txq_get(dev, queue_v->queue);
10760         if (!txq)
10761                 return;
10762         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10763                 queue = txq->obj->sq->id;
10764         else
10765                 queue = txq->obj->sq_obj.sq->id;
10766         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10767         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10768         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10769         mlx5_txq_release(dev, queue_v->queue);
10770 }
10771
10772 /**
10773  * Set the hash fields according to the @p flow information.
10774  *
10775  * @param[in] dev_flow
10776  *   Pointer to the mlx5_flow.
10777  * @param[in] rss_desc
10778  *   Pointer to the mlx5_flow_rss_desc.
10779  */
10780 static void
10781 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10782                        struct mlx5_flow_rss_desc *rss_desc)
10783 {
10784         uint64_t items = dev_flow->handle->layers;
10785         int rss_inner = 0;
10786         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10787
10788         dev_flow->hash_fields = 0;
10789 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10790         if (rss_desc->level >= 2)
10791                 rss_inner = 1;
10792 #endif
10793         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10794             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10795                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10796                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10797                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10798                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10799                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10800                         else
10801                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10802                 }
10803         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10804                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10805                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10806                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10807                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10808                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10809                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10810                         else
10811                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10812                 }
10813         }
10814         if (dev_flow->hash_fields == 0)
10815                 /*
10816                  * There is no match between the RSS types and the
10817                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
10818                  */
10819                 return;
10820         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10821             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10822                 if (rss_types & RTE_ETH_RSS_UDP) {
10823                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
10824                                 dev_flow->hash_fields |=
10825                                                 IBV_RX_HASH_SRC_PORT_UDP;
10826                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
10827                                 dev_flow->hash_fields |=
10828                                                 IBV_RX_HASH_DST_PORT_UDP;
10829                         else
10830                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10831                 }
10832         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10833                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10834                 if (rss_types & RTE_ETH_RSS_TCP) {
10835                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
10836                                 dev_flow->hash_fields |=
10837                                                 IBV_RX_HASH_SRC_PORT_TCP;
10838                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
10839                                 dev_flow->hash_fields |=
10840                                                 IBV_RX_HASH_DST_PORT_TCP;
10841                         else
10842                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10843                 }
10844         }
10845         if (rss_inner)
10846                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10847 }
10848
10849 /**
10850  * Prepare an Rx Hash queue.
10851  *
10852  * @param dev
10853  *   Pointer to Ethernet device.
10854  * @param[in] dev_flow
10855  *   Pointer to the mlx5_flow.
10856  * @param[in] rss_desc
10857  *   Pointer to the mlx5_flow_rss_desc.
10858  * @param[out] hrxq_idx
10859  *   Hash Rx queue index.
10860  *
10861  * @return
10862  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10863  */
10864 static struct mlx5_hrxq *
10865 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10866                      struct mlx5_flow *dev_flow,
10867                      struct mlx5_flow_rss_desc *rss_desc,
10868                      uint32_t *hrxq_idx)
10869 {
10870         struct mlx5_priv *priv = dev->data->dev_private;
10871         struct mlx5_flow_handle *dh = dev_flow->handle;
10872         struct mlx5_hrxq *hrxq;
10873
10874         MLX5_ASSERT(rss_desc->queue_num);
10875         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10876         rss_desc->hash_fields = dev_flow->hash_fields;
10877         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10878         rss_desc->shared_rss = 0;
10879         if (rss_desc->hash_fields == 0)
10880                 rss_desc->queue_num = 1;
10881         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10882         if (!*hrxq_idx)
10883                 return NULL;
10884         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10885                               *hrxq_idx);
10886         return hrxq;
10887 }
10888
10889 /**
10890  * Release sample sub action resource.
10891  *
10892  * @param[in, out] dev
10893  *   Pointer to rte_eth_dev structure.
10894  * @param[in] act_res
10895  *   Pointer to sample sub action resource.
10896  */
10897 static void
10898 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10899                                    struct mlx5_flow_sub_actions_idx *act_res)
10900 {
10901         if (act_res->rix_hrxq) {
10902                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10903                 act_res->rix_hrxq = 0;
10904         }
10905         if (act_res->rix_encap_decap) {
10906                 flow_dv_encap_decap_resource_release(dev,
10907                                                      act_res->rix_encap_decap);
10908                 act_res->rix_encap_decap = 0;
10909         }
10910         if (act_res->rix_port_id_action) {
10911                 flow_dv_port_id_action_resource_release(dev,
10912                                                 act_res->rix_port_id_action);
10913                 act_res->rix_port_id_action = 0;
10914         }
10915         if (act_res->rix_tag) {
10916                 flow_dv_tag_release(dev, act_res->rix_tag);
10917                 act_res->rix_tag = 0;
10918         }
10919         if (act_res->rix_jump) {
10920                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10921                 act_res->rix_jump = 0;
10922         }
10923 }
10924
10925 int
10926 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
10927                         struct mlx5_list_entry *entry, void *cb_ctx)
10928 {
10929         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10930         struct rte_eth_dev *dev = ctx->dev;
10931         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10932         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
10933                                                               typeof(*resource),
10934                                                               entry);
10935
10936         if (ctx_resource->ratio == resource->ratio &&
10937             ctx_resource->ft_type == resource->ft_type &&
10938             ctx_resource->ft_id == resource->ft_id &&
10939             ctx_resource->set_action == resource->set_action &&
10940             !memcmp((void *)&ctx_resource->sample_act,
10941                     (void *)&resource->sample_act,
10942                     sizeof(struct mlx5_flow_sub_actions_list))) {
10943                 /*
10944                  * Existing sample action should release the prepared
10945                  * sub-actions reference counter.
10946                  */
10947                 flow_dv_sample_sub_actions_release(dev,
10948                                                    &ctx_resource->sample_idx);
10949                 return 0;
10950         }
10951         return 1;
10952 }
10953
10954 struct mlx5_list_entry *
10955 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
10956 {
10957         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10958         struct rte_eth_dev *dev = ctx->dev;
10959         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10960         void **sample_dv_actions = ctx_resource->sub_actions;
10961         struct mlx5_flow_dv_sample_resource *resource;
10962         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10963         struct mlx5_priv *priv = dev->data->dev_private;
10964         struct mlx5_dev_ctx_shared *sh = priv->sh;
10965         struct mlx5_flow_tbl_resource *tbl;
10966         uint32_t idx = 0;
10967         const uint32_t next_ft_step = 1;
10968         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
10969         uint8_t is_egress = 0;
10970         uint8_t is_transfer = 0;
10971         struct rte_flow_error *error = ctx->error;
10972
10973         /* Register new sample resource. */
10974         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10975         if (!resource) {
10976                 rte_flow_error_set(error, ENOMEM,
10977                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10978                                           NULL,
10979                                           "cannot allocate resource memory");
10980                 return NULL;
10981         }
10982         *resource = *ctx_resource;
10983         /* Create normal path table level */
10984         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10985                 is_transfer = 1;
10986         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10987                 is_egress = 1;
10988         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10989                                         is_egress, is_transfer,
10990                                         true, NULL, 0, 0, 0, error);
10991         if (!tbl) {
10992                 rte_flow_error_set(error, ENOMEM,
10993                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10994                                           NULL,
10995                                           "fail to create normal path table "
10996                                           "for sample");
10997                 goto error;
10998         }
10999         resource->normal_path_tbl = tbl;
11000         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11001                 if (!sh->default_miss_action) {
11002                         rte_flow_error_set(error, ENOMEM,
11003                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11004                                                 NULL,
11005                                                 "default miss action was not "
11006                                                 "created");
11007                         goto error;
11008                 }
11009                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11010                                                 sh->default_miss_action;
11011         }
11012         /* Create a DR sample action */
11013         sampler_attr.sample_ratio = resource->ratio;
11014         sampler_attr.default_next_table = tbl->obj;
11015         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11016         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11017                                                         &sample_dv_actions[0];
11018         sampler_attr.action = resource->set_action;
11019         if (mlx5_os_flow_dr_create_flow_action_sampler
11020                         (&sampler_attr, &resource->verbs_action)) {
11021                 rte_flow_error_set(error, ENOMEM,
11022                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11023                                         NULL, "cannot create sample action");
11024                 goto error;
11025         }
11026         resource->idx = idx;
11027         resource->dev = dev;
11028         return &resource->entry;
11029 error:
11030         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11031                 flow_dv_sample_sub_actions_release(dev,
11032                                                    &resource->sample_idx);
11033         if (resource->normal_path_tbl)
11034                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11035                                 resource->normal_path_tbl);
11036         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11037         return NULL;
11038
11039 }
11040
11041 struct mlx5_list_entry *
11042 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11043                          struct mlx5_list_entry *entry __rte_unused,
11044                          void *cb_ctx)
11045 {
11046         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11047         struct rte_eth_dev *dev = ctx->dev;
11048         struct mlx5_flow_dv_sample_resource *resource;
11049         struct mlx5_priv *priv = dev->data->dev_private;
11050         struct mlx5_dev_ctx_shared *sh = priv->sh;
11051         uint32_t idx = 0;
11052
11053         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11054         if (!resource) {
11055                 rte_flow_error_set(ctx->error, ENOMEM,
11056                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11057                                           NULL,
11058                                           "cannot allocate resource memory");
11059                 return NULL;
11060         }
11061         memcpy(resource, entry, sizeof(*resource));
11062         resource->idx = idx;
11063         resource->dev = dev;
11064         return &resource->entry;
11065 }
11066
11067 void
11068 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11069                              struct mlx5_list_entry *entry)
11070 {
11071         struct mlx5_flow_dv_sample_resource *resource =
11072                                   container_of(entry, typeof(*resource), entry);
11073         struct rte_eth_dev *dev = resource->dev;
11074         struct mlx5_priv *priv = dev->data->dev_private;
11075
11076         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11077 }
11078
11079 /**
11080  * Find existing sample resource or create and register a new one.
11081  *
11082  * @param[in, out] dev
11083  *   Pointer to rte_eth_dev structure.
11084  * @param[in] ref
11085  *   Pointer to sample resource reference.
11086  * @parm[in, out] dev_flow
11087  *   Pointer to the dev_flow.
11088  * @param[out] error
11089  *   pointer to error structure.
11090  *
11091  * @return
11092  *   0 on success otherwise -errno and errno is set.
11093  */
11094 static int
11095 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11096                          struct mlx5_flow_dv_sample_resource *ref,
11097                          struct mlx5_flow *dev_flow,
11098                          struct rte_flow_error *error)
11099 {
11100         struct mlx5_flow_dv_sample_resource *resource;
11101         struct mlx5_list_entry *entry;
11102         struct mlx5_priv *priv = dev->data->dev_private;
11103         struct mlx5_flow_cb_ctx ctx = {
11104                 .dev = dev,
11105                 .error = error,
11106                 .data = ref,
11107         };
11108
11109         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11110         if (!entry)
11111                 return -rte_errno;
11112         resource = container_of(entry, typeof(*resource), entry);
11113         dev_flow->handle->dvh.rix_sample = resource->idx;
11114         dev_flow->dv.sample_res = resource;
11115         return 0;
11116 }
11117
11118 int
11119 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11120                             struct mlx5_list_entry *entry, void *cb_ctx)
11121 {
11122         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11123         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11124         struct rte_eth_dev *dev = ctx->dev;
11125         struct mlx5_flow_dv_dest_array_resource *resource =
11126                                   container_of(entry, typeof(*resource), entry);
11127         uint32_t idx = 0;
11128
11129         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11130             ctx_resource->ft_type == resource->ft_type &&
11131             !memcmp((void *)resource->sample_act,
11132                     (void *)ctx_resource->sample_act,
11133                    (ctx_resource->num_of_dest *
11134                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11135                 /*
11136                  * Existing sample action should release the prepared
11137                  * sub-actions reference counter.
11138                  */
11139                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11140                         flow_dv_sample_sub_actions_release(dev,
11141                                         &ctx_resource->sample_idx[idx]);
11142                 return 0;
11143         }
11144         return 1;
11145 }
11146
11147 struct mlx5_list_entry *
11148 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11149 {
11150         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11151         struct rte_eth_dev *dev = ctx->dev;
11152         struct mlx5_flow_dv_dest_array_resource *resource;
11153         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11154         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11155         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11156         struct mlx5_priv *priv = dev->data->dev_private;
11157         struct mlx5_dev_ctx_shared *sh = priv->sh;
11158         struct mlx5_flow_sub_actions_list *sample_act;
11159         struct mlx5dv_dr_domain *domain;
11160         uint32_t idx = 0, res_idx = 0;
11161         struct rte_flow_error *error = ctx->error;
11162         uint64_t action_flags;
11163         int ret;
11164
11165         /* Register new destination array resource. */
11166         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11167                                             &res_idx);
11168         if (!resource) {
11169                 rte_flow_error_set(error, ENOMEM,
11170                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11171                                           NULL,
11172                                           "cannot allocate resource memory");
11173                 return NULL;
11174         }
11175         *resource = *ctx_resource;
11176         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11177                 domain = sh->fdb_domain;
11178         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11179                 domain = sh->rx_domain;
11180         else
11181                 domain = sh->tx_domain;
11182         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11183                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11184                                  mlx5_malloc(MLX5_MEM_ZERO,
11185                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11186                                  0, SOCKET_ID_ANY);
11187                 if (!dest_attr[idx]) {
11188                         rte_flow_error_set(error, ENOMEM,
11189                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11190                                            NULL,
11191                                            "cannot allocate resource memory");
11192                         goto error;
11193                 }
11194                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11195                 sample_act = &ctx_resource->sample_act[idx];
11196                 action_flags = sample_act->action_flags;
11197                 switch (action_flags) {
11198                 case MLX5_FLOW_ACTION_QUEUE:
11199                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11200                         break;
11201                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11202                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11203                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11204                         dest_attr[idx]->dest_reformat->reformat =
11205                                         sample_act->dr_encap_action;
11206                         dest_attr[idx]->dest_reformat->dest =
11207                                         sample_act->dr_port_id_action;
11208                         break;
11209                 case MLX5_FLOW_ACTION_PORT_ID:
11210                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11211                         break;
11212                 case MLX5_FLOW_ACTION_JUMP:
11213                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11214                         break;
11215                 default:
11216                         rte_flow_error_set(error, EINVAL,
11217                                            RTE_FLOW_ERROR_TYPE_ACTION,
11218                                            NULL,
11219                                            "unsupported actions type");
11220                         goto error;
11221                 }
11222         }
11223         /* create a dest array actioin */
11224         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11225                                                 (domain,
11226                                                  resource->num_of_dest,
11227                                                  dest_attr,
11228                                                  &resource->action);
11229         if (ret) {
11230                 rte_flow_error_set(error, ENOMEM,
11231                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11232                                    NULL,
11233                                    "cannot create destination array action");
11234                 goto error;
11235         }
11236         resource->idx = res_idx;
11237         resource->dev = dev;
11238         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11239                 mlx5_free(dest_attr[idx]);
11240         return &resource->entry;
11241 error:
11242         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11243                 flow_dv_sample_sub_actions_release(dev,
11244                                                    &resource->sample_idx[idx]);
11245                 if (dest_attr[idx])
11246                         mlx5_free(dest_attr[idx]);
11247         }
11248         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11249         return NULL;
11250 }
11251
11252 struct mlx5_list_entry *
11253 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11254                             struct mlx5_list_entry *entry __rte_unused,
11255                             void *cb_ctx)
11256 {
11257         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11258         struct rte_eth_dev *dev = ctx->dev;
11259         struct mlx5_flow_dv_dest_array_resource *resource;
11260         struct mlx5_priv *priv = dev->data->dev_private;
11261         struct mlx5_dev_ctx_shared *sh = priv->sh;
11262         uint32_t res_idx = 0;
11263         struct rte_flow_error *error = ctx->error;
11264
11265         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11266                                       &res_idx);
11267         if (!resource) {
11268                 rte_flow_error_set(error, ENOMEM,
11269                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11270                                           NULL,
11271                                           "cannot allocate dest-array memory");
11272                 return NULL;
11273         }
11274         memcpy(resource, entry, sizeof(*resource));
11275         resource->idx = res_idx;
11276         resource->dev = dev;
11277         return &resource->entry;
11278 }
11279
11280 void
11281 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11282                                  struct mlx5_list_entry *entry)
11283 {
11284         struct mlx5_flow_dv_dest_array_resource *resource =
11285                         container_of(entry, typeof(*resource), entry);
11286         struct rte_eth_dev *dev = resource->dev;
11287         struct mlx5_priv *priv = dev->data->dev_private;
11288
11289         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11290 }
11291
11292 /**
11293  * Find existing destination array resource or create and register a new one.
11294  *
11295  * @param[in, out] dev
11296  *   Pointer to rte_eth_dev structure.
11297  * @param[in] ref
11298  *   Pointer to destination array resource reference.
11299  * @parm[in, out] dev_flow
11300  *   Pointer to the dev_flow.
11301  * @param[out] error
11302  *   pointer to error structure.
11303  *
11304  * @return
11305  *   0 on success otherwise -errno and errno is set.
11306  */
11307 static int
11308 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11309                          struct mlx5_flow_dv_dest_array_resource *ref,
11310                          struct mlx5_flow *dev_flow,
11311                          struct rte_flow_error *error)
11312 {
11313         struct mlx5_flow_dv_dest_array_resource *resource;
11314         struct mlx5_priv *priv = dev->data->dev_private;
11315         struct mlx5_list_entry *entry;
11316         struct mlx5_flow_cb_ctx ctx = {
11317                 .dev = dev,
11318                 .error = error,
11319                 .data = ref,
11320         };
11321
11322         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11323         if (!entry)
11324                 return -rte_errno;
11325         resource = container_of(entry, typeof(*resource), entry);
11326         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11327         dev_flow->dv.dest_array_res = resource;
11328         return 0;
11329 }
11330
11331 /**
11332  * Convert Sample action to DV specification.
11333  *
11334  * @param[in] dev
11335  *   Pointer to rte_eth_dev structure.
11336  * @param[in] action
11337  *   Pointer to sample action structure.
11338  * @param[in, out] dev_flow
11339  *   Pointer to the mlx5_flow.
11340  * @param[in] attr
11341  *   Pointer to the flow attributes.
11342  * @param[in, out] num_of_dest
11343  *   Pointer to the num of destination.
11344  * @param[in, out] sample_actions
11345  *   Pointer to sample actions list.
11346  * @param[in, out] res
11347  *   Pointer to sample resource.
11348  * @param[out] error
11349  *   Pointer to the error structure.
11350  *
11351  * @return
11352  *   0 on success, a negative errno value otherwise and rte_errno is set.
11353  */
11354 static int
11355 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11356                                 const struct rte_flow_action_sample *action,
11357                                 struct mlx5_flow *dev_flow,
11358                                 const struct rte_flow_attr *attr,
11359                                 uint32_t *num_of_dest,
11360                                 void **sample_actions,
11361                                 struct mlx5_flow_dv_sample_resource *res,
11362                                 struct rte_flow_error *error)
11363 {
11364         struct mlx5_priv *priv = dev->data->dev_private;
11365         const struct rte_flow_action *sub_actions;
11366         struct mlx5_flow_sub_actions_list *sample_act;
11367         struct mlx5_flow_sub_actions_idx *sample_idx;
11368         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11369         struct rte_flow *flow = dev_flow->flow;
11370         struct mlx5_flow_rss_desc *rss_desc;
11371         uint64_t action_flags = 0;
11372
11373         MLX5_ASSERT(wks);
11374         rss_desc = &wks->rss_desc;
11375         sample_act = &res->sample_act;
11376         sample_idx = &res->sample_idx;
11377         res->ratio = action->ratio;
11378         sub_actions = action->actions;
11379         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11380                 int type = sub_actions->type;
11381                 uint32_t pre_rix = 0;
11382                 void *pre_r;
11383                 switch (type) {
11384                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11385                 {
11386                         const struct rte_flow_action_queue *queue;
11387                         struct mlx5_hrxq *hrxq;
11388                         uint32_t hrxq_idx;
11389
11390                         queue = sub_actions->conf;
11391                         rss_desc->queue_num = 1;
11392                         rss_desc->queue[0] = queue->index;
11393                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11394                                                     rss_desc, &hrxq_idx);
11395                         if (!hrxq)
11396                                 return rte_flow_error_set
11397                                         (error, rte_errno,
11398                                          RTE_FLOW_ERROR_TYPE_ACTION,
11399                                          NULL,
11400                                          "cannot create fate queue");
11401                         sample_act->dr_queue_action = hrxq->action;
11402                         sample_idx->rix_hrxq = hrxq_idx;
11403                         sample_actions[sample_act->actions_num++] =
11404                                                 hrxq->action;
11405                         (*num_of_dest)++;
11406                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11407                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11408                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11409                         dev_flow->handle->fate_action =
11410                                         MLX5_FLOW_FATE_QUEUE;
11411                         break;
11412                 }
11413                 case RTE_FLOW_ACTION_TYPE_RSS:
11414                 {
11415                         struct mlx5_hrxq *hrxq;
11416                         uint32_t hrxq_idx;
11417                         const struct rte_flow_action_rss *rss;
11418                         const uint8_t *rss_key;
11419
11420                         rss = sub_actions->conf;
11421                         memcpy(rss_desc->queue, rss->queue,
11422                                rss->queue_num * sizeof(uint16_t));
11423                         rss_desc->queue_num = rss->queue_num;
11424                         /* NULL RSS key indicates default RSS key. */
11425                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11426                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11427                         /*
11428                          * rss->level and rss.types should be set in advance
11429                          * when expanding items for RSS.
11430                          */
11431                         flow_dv_hashfields_set(dev_flow, rss_desc);
11432                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11433                                                     rss_desc, &hrxq_idx);
11434                         if (!hrxq)
11435                                 return rte_flow_error_set
11436                                         (error, rte_errno,
11437                                          RTE_FLOW_ERROR_TYPE_ACTION,
11438                                          NULL,
11439                                          "cannot create fate queue");
11440                         sample_act->dr_queue_action = hrxq->action;
11441                         sample_idx->rix_hrxq = hrxq_idx;
11442                         sample_actions[sample_act->actions_num++] =
11443                                                 hrxq->action;
11444                         (*num_of_dest)++;
11445                         action_flags |= MLX5_FLOW_ACTION_RSS;
11446                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11447                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11448                         dev_flow->handle->fate_action =
11449                                         MLX5_FLOW_FATE_QUEUE;
11450                         break;
11451                 }
11452                 case RTE_FLOW_ACTION_TYPE_MARK:
11453                 {
11454                         uint32_t tag_be = mlx5_flow_mark_set
11455                                 (((const struct rte_flow_action_mark *)
11456                                 (sub_actions->conf))->id);
11457
11458                         dev_flow->handle->mark = 1;
11459                         pre_rix = dev_flow->handle->dvh.rix_tag;
11460                         /* Save the mark resource before sample */
11461                         pre_r = dev_flow->dv.tag_resource;
11462                         if (flow_dv_tag_resource_register(dev, tag_be,
11463                                                   dev_flow, error))
11464                                 return -rte_errno;
11465                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11466                         sample_act->dr_tag_action =
11467                                 dev_flow->dv.tag_resource->action;
11468                         sample_idx->rix_tag =
11469                                 dev_flow->handle->dvh.rix_tag;
11470                         sample_actions[sample_act->actions_num++] =
11471                                                 sample_act->dr_tag_action;
11472                         /* Recover the mark resource after sample */
11473                         dev_flow->dv.tag_resource = pre_r;
11474                         dev_flow->handle->dvh.rix_tag = pre_rix;
11475                         action_flags |= MLX5_FLOW_ACTION_MARK;
11476                         break;
11477                 }
11478                 case RTE_FLOW_ACTION_TYPE_COUNT:
11479                 {
11480                         if (!flow->counter) {
11481                                 flow->counter =
11482                                         flow_dv_translate_create_counter(dev,
11483                                                 dev_flow, sub_actions->conf,
11484                                                 0);
11485                                 if (!flow->counter)
11486                                         return rte_flow_error_set
11487                                                 (error, rte_errno,
11488                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11489                                                 NULL,
11490                                                 "cannot create counter"
11491                                                 " object.");
11492                         }
11493                         sample_act->dr_cnt_action =
11494                                   (flow_dv_counter_get_by_idx(dev,
11495                                   flow->counter, NULL))->action;
11496                         sample_actions[sample_act->actions_num++] =
11497                                                 sample_act->dr_cnt_action;
11498                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11499                         break;
11500                 }
11501                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11502                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11503                 {
11504                         struct mlx5_flow_dv_port_id_action_resource
11505                                         port_id_resource;
11506                         uint32_t port_id = 0;
11507
11508                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11509                         /* Save the port id resource before sample */
11510                         pre_rix = dev_flow->handle->rix_port_id_action;
11511                         pre_r = dev_flow->dv.port_id_action;
11512                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11513                                                              &port_id, error))
11514                                 return -rte_errno;
11515                         port_id_resource.port_id = port_id;
11516                         if (flow_dv_port_id_action_resource_register
11517                             (dev, &port_id_resource, dev_flow, error))
11518                                 return -rte_errno;
11519                         sample_act->dr_port_id_action =
11520                                 dev_flow->dv.port_id_action->action;
11521                         sample_idx->rix_port_id_action =
11522                                 dev_flow->handle->rix_port_id_action;
11523                         sample_actions[sample_act->actions_num++] =
11524                                                 sample_act->dr_port_id_action;
11525                         /* Recover the port id resource after sample */
11526                         dev_flow->dv.port_id_action = pre_r;
11527                         dev_flow->handle->rix_port_id_action = pre_rix;
11528                         (*num_of_dest)++;
11529                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11530                         break;
11531                 }
11532                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11533                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11534                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11535                         /* Save the encap resource before sample */
11536                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11537                         pre_r = dev_flow->dv.encap_decap;
11538                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11539                                                            dev_flow,
11540                                                            attr->transfer,
11541                                                            error))
11542                                 return -rte_errno;
11543                         sample_act->dr_encap_action =
11544                                 dev_flow->dv.encap_decap->action;
11545                         sample_idx->rix_encap_decap =
11546                                 dev_flow->handle->dvh.rix_encap_decap;
11547                         sample_actions[sample_act->actions_num++] =
11548                                                 sample_act->dr_encap_action;
11549                         /* Recover the encap resource after sample */
11550                         dev_flow->dv.encap_decap = pre_r;
11551                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11552                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11553                         break;
11554                 default:
11555                         return rte_flow_error_set(error, EINVAL,
11556                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11557                                 NULL,
11558                                 "Not support for sampler action");
11559                 }
11560         }
11561         sample_act->action_flags = action_flags;
11562         res->ft_id = dev_flow->dv.group;
11563         if (attr->transfer) {
11564                 union {
11565                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11566                         uint64_t set_action;
11567                 } action_ctx = { .set_action = 0 };
11568
11569                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11570                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11571                          MLX5_MODIFICATION_TYPE_SET);
11572                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11573                          MLX5_MODI_META_REG_C_0);
11574                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11575                          priv->vport_meta_tag);
11576                 res->set_action = action_ctx.set_action;
11577         } else if (attr->ingress) {
11578                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11579         } else {
11580                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11581         }
11582         return 0;
11583 }
11584
11585 /**
11586  * Convert Sample action to DV specification.
11587  *
11588  * @param[in] dev
11589  *   Pointer to rte_eth_dev structure.
11590  * @param[in, out] dev_flow
11591  *   Pointer to the mlx5_flow.
11592  * @param[in] num_of_dest
11593  *   The num of destination.
11594  * @param[in, out] res
11595  *   Pointer to sample resource.
11596  * @param[in, out] mdest_res
11597  *   Pointer to destination array resource.
11598  * @param[in] sample_actions
11599  *   Pointer to sample path actions list.
11600  * @param[in] action_flags
11601  *   Holds the actions detected until now.
11602  * @param[out] error
11603  *   Pointer to the error structure.
11604  *
11605  * @return
11606  *   0 on success, a negative errno value otherwise and rte_errno is set.
11607  */
11608 static int
11609 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11610                              struct mlx5_flow *dev_flow,
11611                              uint32_t num_of_dest,
11612                              struct mlx5_flow_dv_sample_resource *res,
11613                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11614                              void **sample_actions,
11615                              uint64_t action_flags,
11616                              struct rte_flow_error *error)
11617 {
11618         /* update normal path action resource into last index of array */
11619         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11620         struct mlx5_flow_sub_actions_list *sample_act =
11621                                         &mdest_res->sample_act[dest_index];
11622         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11623         struct mlx5_flow_rss_desc *rss_desc;
11624         uint32_t normal_idx = 0;
11625         struct mlx5_hrxq *hrxq;
11626         uint32_t hrxq_idx;
11627
11628         MLX5_ASSERT(wks);
11629         rss_desc = &wks->rss_desc;
11630         if (num_of_dest > 1) {
11631                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11632                         /* Handle QP action for mirroring */
11633                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11634                                                     rss_desc, &hrxq_idx);
11635                         if (!hrxq)
11636                                 return rte_flow_error_set
11637                                      (error, rte_errno,
11638                                       RTE_FLOW_ERROR_TYPE_ACTION,
11639                                       NULL,
11640                                       "cannot create rx queue");
11641                         normal_idx++;
11642                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11643                         sample_act->dr_queue_action = hrxq->action;
11644                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11645                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11646                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11647                 }
11648                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11649                         normal_idx++;
11650                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11651                                 dev_flow->handle->dvh.rix_encap_decap;
11652                         sample_act->dr_encap_action =
11653                                 dev_flow->dv.encap_decap->action;
11654                         dev_flow->handle->dvh.rix_encap_decap = 0;
11655                 }
11656                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11657                         normal_idx++;
11658                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11659                                 dev_flow->handle->rix_port_id_action;
11660                         sample_act->dr_port_id_action =
11661                                 dev_flow->dv.port_id_action->action;
11662                         dev_flow->handle->rix_port_id_action = 0;
11663                 }
11664                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11665                         normal_idx++;
11666                         mdest_res->sample_idx[dest_index].rix_jump =
11667                                 dev_flow->handle->rix_jump;
11668                         sample_act->dr_jump_action =
11669                                 dev_flow->dv.jump->action;
11670                         dev_flow->handle->rix_jump = 0;
11671                 }
11672                 sample_act->actions_num = normal_idx;
11673                 /* update sample action resource into first index of array */
11674                 mdest_res->ft_type = res->ft_type;
11675                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11676                                 sizeof(struct mlx5_flow_sub_actions_idx));
11677                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11678                                 sizeof(struct mlx5_flow_sub_actions_list));
11679                 mdest_res->num_of_dest = num_of_dest;
11680                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11681                                                          dev_flow, error))
11682                         return rte_flow_error_set(error, EINVAL,
11683                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11684                                                   NULL, "can't create sample "
11685                                                   "action");
11686         } else {
11687                 res->sub_actions = sample_actions;
11688                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11689                         return rte_flow_error_set(error, EINVAL,
11690                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11691                                                   NULL,
11692                                                   "can't create sample action");
11693         }
11694         return 0;
11695 }
11696
11697 /**
11698  * Remove an ASO age action from age actions list.
11699  *
11700  * @param[in] dev
11701  *   Pointer to the Ethernet device structure.
11702  * @param[in] age
11703  *   Pointer to the aso age action handler.
11704  */
11705 static void
11706 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11707                                 struct mlx5_aso_age_action *age)
11708 {
11709         struct mlx5_age_info *age_info;
11710         struct mlx5_age_param *age_param = &age->age_params;
11711         struct mlx5_priv *priv = dev->data->dev_private;
11712         uint16_t expected = AGE_CANDIDATE;
11713
11714         age_info = GET_PORT_AGE_INFO(priv);
11715         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11716                                          AGE_FREE, false, __ATOMIC_RELAXED,
11717                                          __ATOMIC_RELAXED)) {
11718                 /**
11719                  * We need the lock even it is age timeout,
11720                  * since age action may still in process.
11721                  */
11722                 rte_spinlock_lock(&age_info->aged_sl);
11723                 LIST_REMOVE(age, next);
11724                 rte_spinlock_unlock(&age_info->aged_sl);
11725                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11726         }
11727 }
11728
11729 /**
11730  * Release an ASO age action.
11731  *
11732  * @param[in] dev
11733  *   Pointer to the Ethernet device structure.
11734  * @param[in] age_idx
11735  *   Index of ASO age action to release.
11736  * @param[in] flow
11737  *   True if the release operation is during flow destroy operation.
11738  *   False if the release operation is during action destroy operation.
11739  *
11740  * @return
11741  *   0 when age action was removed, otherwise the number of references.
11742  */
11743 static int
11744 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11745 {
11746         struct mlx5_priv *priv = dev->data->dev_private;
11747         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11748         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11749         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11750
11751         if (!ret) {
11752                 flow_dv_aso_age_remove_from_age(dev, age);
11753                 rte_spinlock_lock(&mng->free_sl);
11754                 LIST_INSERT_HEAD(&mng->free, age, next);
11755                 rte_spinlock_unlock(&mng->free_sl);
11756         }
11757         return ret;
11758 }
11759
11760 /**
11761  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11762  *
11763  * @param[in] dev
11764  *   Pointer to the Ethernet device structure.
11765  *
11766  * @return
11767  *   0 on success, otherwise negative errno value and rte_errno is set.
11768  */
11769 static int
11770 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11771 {
11772         struct mlx5_priv *priv = dev->data->dev_private;
11773         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11774         void *old_pools = mng->pools;
11775         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11776         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11777         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11778
11779         if (!pools) {
11780                 rte_errno = ENOMEM;
11781                 return -ENOMEM;
11782         }
11783         if (old_pools) {
11784                 memcpy(pools, old_pools,
11785                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11786                 mlx5_free(old_pools);
11787         } else {
11788                 /* First ASO flow hit allocation - starting ASO data-path. */
11789                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11790
11791                 if (ret) {
11792                         mlx5_free(pools);
11793                         return ret;
11794                 }
11795         }
11796         mng->n = resize;
11797         mng->pools = pools;
11798         return 0;
11799 }
11800
11801 /**
11802  * Create and initialize a new ASO aging pool.
11803  *
11804  * @param[in] dev
11805  *   Pointer to the Ethernet device structure.
11806  * @param[out] age_free
11807  *   Where to put the pointer of a new age action.
11808  *
11809  * @return
11810  *   The age actions pool pointer and @p age_free is set on success,
11811  *   NULL otherwise and rte_errno is set.
11812  */
11813 static struct mlx5_aso_age_pool *
11814 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11815                         struct mlx5_aso_age_action **age_free)
11816 {
11817         struct mlx5_priv *priv = dev->data->dev_private;
11818         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11819         struct mlx5_aso_age_pool *pool = NULL;
11820         struct mlx5_devx_obj *obj = NULL;
11821         uint32_t i;
11822
11823         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
11824                                                     priv->sh->cdev->pdn);
11825         if (!obj) {
11826                 rte_errno = ENODATA;
11827                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11828                 return NULL;
11829         }
11830         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11831         if (!pool) {
11832                 claim_zero(mlx5_devx_cmd_destroy(obj));
11833                 rte_errno = ENOMEM;
11834                 return NULL;
11835         }
11836         pool->flow_hit_aso_obj = obj;
11837         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11838         rte_spinlock_lock(&mng->resize_sl);
11839         pool->index = mng->next;
11840         /* Resize pools array if there is no room for the new pool in it. */
11841         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11842                 claim_zero(mlx5_devx_cmd_destroy(obj));
11843                 mlx5_free(pool);
11844                 rte_spinlock_unlock(&mng->resize_sl);
11845                 return NULL;
11846         }
11847         mng->pools[pool->index] = pool;
11848         mng->next++;
11849         rte_spinlock_unlock(&mng->resize_sl);
11850         /* Assign the first action in the new pool, the rest go to free list. */
11851         *age_free = &pool->actions[0];
11852         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11853                 pool->actions[i].offset = i;
11854                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11855         }
11856         return pool;
11857 }
11858
11859 /**
11860  * Allocate a ASO aging bit.
11861  *
11862  * @param[in] dev
11863  *   Pointer to the Ethernet device structure.
11864  * @param[out] error
11865  *   Pointer to the error structure.
11866  *
11867  * @return
11868  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11869  */
11870 static uint32_t
11871 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11872 {
11873         struct mlx5_priv *priv = dev->data->dev_private;
11874         const struct mlx5_aso_age_pool *pool;
11875         struct mlx5_aso_age_action *age_free = NULL;
11876         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11877
11878         MLX5_ASSERT(mng);
11879         /* Try to get the next free age action bit. */
11880         rte_spinlock_lock(&mng->free_sl);
11881         age_free = LIST_FIRST(&mng->free);
11882         if (age_free) {
11883                 LIST_REMOVE(age_free, next);
11884         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11885                 rte_spinlock_unlock(&mng->free_sl);
11886                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11887                                    NULL, "failed to create ASO age pool");
11888                 return 0; /* 0 is an error. */
11889         }
11890         rte_spinlock_unlock(&mng->free_sl);
11891         pool = container_of
11892           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11893                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11894                                                                        actions);
11895         if (!age_free->dr_action) {
11896                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11897                                                  error);
11898
11899                 if (reg_c < 0) {
11900                         rte_flow_error_set(error, rte_errno,
11901                                            RTE_FLOW_ERROR_TYPE_ACTION,
11902                                            NULL, "failed to get reg_c "
11903                                            "for ASO flow hit");
11904                         return 0; /* 0 is an error. */
11905                 }
11906 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11907                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11908                                 (priv->sh->rx_domain,
11909                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11910                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11911                                  (reg_c - REG_C_0));
11912 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11913                 if (!age_free->dr_action) {
11914                         rte_errno = errno;
11915                         rte_spinlock_lock(&mng->free_sl);
11916                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11917                         rte_spinlock_unlock(&mng->free_sl);
11918                         rte_flow_error_set(error, rte_errno,
11919                                            RTE_FLOW_ERROR_TYPE_ACTION,
11920                                            NULL, "failed to create ASO "
11921                                            "flow hit action");
11922                         return 0; /* 0 is an error. */
11923                 }
11924         }
11925         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11926         return pool->index | ((age_free->offset + 1) << 16);
11927 }
11928
11929 /**
11930  * Initialize flow ASO age parameters.
11931  *
11932  * @param[in] dev
11933  *   Pointer to rte_eth_dev structure.
11934  * @param[in] age_idx
11935  *   Index of ASO age action.
11936  * @param[in] context
11937  *   Pointer to flow counter age context.
11938  * @param[in] timeout
11939  *   Aging timeout in seconds.
11940  *
11941  */
11942 static void
11943 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11944                             uint32_t age_idx,
11945                             void *context,
11946                             uint32_t timeout)
11947 {
11948         struct mlx5_aso_age_action *aso_age;
11949
11950         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11951         MLX5_ASSERT(aso_age);
11952         aso_age->age_params.context = context;
11953         aso_age->age_params.timeout = timeout;
11954         aso_age->age_params.port_id = dev->data->port_id;
11955         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11956                          __ATOMIC_RELAXED);
11957         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11958                          __ATOMIC_RELAXED);
11959 }
11960
11961 static void
11962 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11963                                const struct rte_flow_item_integrity *value,
11964                                void *headers_m, void *headers_v)
11965 {
11966         if (mask->l4_ok) {
11967                 /* application l4_ok filter aggregates all hardware l4 filters
11968                  * therefore hw l4_checksum_ok must be implicitly added here.
11969                  */
11970                 struct rte_flow_item_integrity local_item;
11971
11972                 local_item.l4_csum_ok = 1;
11973                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11974                          local_item.l4_csum_ok);
11975                 if (value->l4_ok) {
11976                         /* application l4_ok = 1 matches sets both hw flags
11977                          * l4_ok and l4_checksum_ok flags to 1.
11978                          */
11979                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11980                                  l4_checksum_ok, local_item.l4_csum_ok);
11981                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11982                                  mask->l4_ok);
11983                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11984                                  value->l4_ok);
11985                 } else {
11986                         /* application l4_ok = 0 matches on hw flag
11987                          * l4_checksum_ok = 0 only.
11988                          */
11989                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11990                                  l4_checksum_ok, 0);
11991                 }
11992         } else if (mask->l4_csum_ok) {
11993                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11994                          mask->l4_csum_ok);
11995                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
11996                          value->l4_csum_ok);
11997         }
11998 }
11999
12000 static void
12001 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12002                                const struct rte_flow_item_integrity *value,
12003                                void *headers_m, void *headers_v, bool is_ipv4)
12004 {
12005         if (mask->l3_ok) {
12006                 /* application l3_ok filter aggregates all hardware l3 filters
12007                  * therefore hw ipv4_checksum_ok must be implicitly added here.
12008                  */
12009                 struct rte_flow_item_integrity local_item;
12010
12011                 local_item.ipv4_csum_ok = !!is_ipv4;
12012                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12013                          local_item.ipv4_csum_ok);
12014                 if (value->l3_ok) {
12015                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12016                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
12017                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
12018                                  mask->l3_ok);
12019                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12020                                  value->l3_ok);
12021                 } else {
12022                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12023                                  ipv4_checksum_ok, 0);
12024                 }
12025         } else if (mask->ipv4_csum_ok) {
12026                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12027                          mask->ipv4_csum_ok);
12028                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12029                          value->ipv4_csum_ok);
12030         }
12031 }
12032
12033 static void
12034 set_integrity_bits(void *headers_m, void *headers_v,
12035                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12036 {
12037         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12038         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12039
12040         /* Integrity bits validation cleared spec pointer */
12041         MLX5_ASSERT(spec != NULL);
12042         if (!mask)
12043                 mask = &rte_flow_item_integrity_mask;
12044         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12045                                        is_l3_ip4);
12046         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12047 }
12048
12049 static void
12050 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12051                                       const
12052                                       struct rte_flow_item *integrity_items[2],
12053                                       uint64_t pattern_flags)
12054 {
12055         void *headers_m, *headers_v;
12056         bool is_l3_ip4;
12057
12058         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12059                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12060                                          inner_headers);
12061                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12062                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12063                             0;
12064                 set_integrity_bits(headers_m, headers_v,
12065                                    integrity_items[1], is_l3_ip4);
12066         }
12067         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12068                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12069                                          outer_headers);
12070                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12071                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12072                             0;
12073                 set_integrity_bits(headers_m, headers_v,
12074                                    integrity_items[0], is_l3_ip4);
12075         }
12076 }
12077
12078 static void
12079 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12080                                  const struct rte_flow_item *integrity_items[2],
12081                                  uint64_t *last_item)
12082 {
12083         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12084
12085         /* integrity bits validation cleared spec pointer */
12086         MLX5_ASSERT(spec != NULL);
12087         if (spec->level > 1) {
12088                 integrity_items[1] = item;
12089                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12090         } else {
12091                 integrity_items[0] = item;
12092                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12093         }
12094 }
12095
12096 /**
12097  * Prepares DV flow counter with aging configuration.
12098  * Gets it by index when exists, creates a new one when doesn't.
12099  *
12100  * @param[in] dev
12101  *   Pointer to rte_eth_dev structure.
12102  * @param[in] dev_flow
12103  *   Pointer to the mlx5_flow.
12104  * @param[in, out] flow
12105  *   Pointer to the sub flow.
12106  * @param[in] count
12107  *   Pointer to the counter action configuration.
12108  * @param[in] age
12109  *   Pointer to the aging action configuration.
12110  * @param[out] error
12111  *   Pointer to the error structure.
12112  *
12113  * @return
12114  *   Pointer to the counter, NULL otherwise.
12115  */
12116 static struct mlx5_flow_counter *
12117 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12118                         struct mlx5_flow *dev_flow,
12119                         struct rte_flow *flow,
12120                         const struct rte_flow_action_count *count,
12121                         const struct rte_flow_action_age *age,
12122                         struct rte_flow_error *error)
12123 {
12124         if (!flow->counter) {
12125                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12126                                                                  count, age);
12127                 if (!flow->counter) {
12128                         rte_flow_error_set(error, rte_errno,
12129                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12130                                            "cannot create counter object.");
12131                         return NULL;
12132                 }
12133         }
12134         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12135 }
12136
12137 /*
12138  * Release an ASO CT action by its own device.
12139  *
12140  * @param[in] dev
12141  *   Pointer to the Ethernet device structure.
12142  * @param[in] idx
12143  *   Index of ASO CT action to release.
12144  *
12145  * @return
12146  *   0 when CT action was removed, otherwise the number of references.
12147  */
12148 static inline int
12149 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12150 {
12151         struct mlx5_priv *priv = dev->data->dev_private;
12152         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12153         uint32_t ret;
12154         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12155         enum mlx5_aso_ct_state state =
12156                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12157
12158         /* Cannot release when CT is in the ASO SQ. */
12159         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12160                 return -1;
12161         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12162         if (!ret) {
12163                 if (ct->dr_action_orig) {
12164 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12165                         claim_zero(mlx5_glue->destroy_flow_action
12166                                         (ct->dr_action_orig));
12167 #endif
12168                         ct->dr_action_orig = NULL;
12169                 }
12170                 if (ct->dr_action_rply) {
12171 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12172                         claim_zero(mlx5_glue->destroy_flow_action
12173                                         (ct->dr_action_rply));
12174 #endif
12175                         ct->dr_action_rply = NULL;
12176                 }
12177                 /* Clear the state to free, no need in 1st allocation. */
12178                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12179                 rte_spinlock_lock(&mng->ct_sl);
12180                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12181                 rte_spinlock_unlock(&mng->ct_sl);
12182         }
12183         return (int)ret;
12184 }
12185
12186 static inline int
12187 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12188                        struct rte_flow_error *error)
12189 {
12190         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12191         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12192         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12193         int ret;
12194
12195         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12196         if (dev->data->dev_started != 1)
12197                 return rte_flow_error_set(error, EAGAIN,
12198                                           RTE_FLOW_ERROR_TYPE_ACTION,
12199                                           NULL,
12200                                           "Indirect CT action cannot be destroyed when the port is stopped");
12201         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12202         if (ret < 0)
12203                 return rte_flow_error_set(error, EAGAIN,
12204                                           RTE_FLOW_ERROR_TYPE_ACTION,
12205                                           NULL,
12206                                           "Current state prevents indirect CT action from being destroyed");
12207         return ret;
12208 }
12209
12210 /*
12211  * Resize the ASO CT pools array by 64 pools.
12212  *
12213  * @param[in] dev
12214  *   Pointer to the Ethernet device structure.
12215  *
12216  * @return
12217  *   0 on success, otherwise negative errno value and rte_errno is set.
12218  */
12219 static int
12220 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12221 {
12222         struct mlx5_priv *priv = dev->data->dev_private;
12223         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12224         void *old_pools = mng->pools;
12225         /* Magic number now, need a macro. */
12226         uint32_t resize = mng->n + 64;
12227         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12228         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12229
12230         if (!pools) {
12231                 rte_errno = ENOMEM;
12232                 return -rte_errno;
12233         }
12234         rte_rwlock_write_lock(&mng->resize_rwl);
12235         /* ASO SQ/QP was already initialized in the startup. */
12236         if (old_pools) {
12237                 /* Realloc could be an alternative choice. */
12238                 rte_memcpy(pools, old_pools,
12239                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12240                 mlx5_free(old_pools);
12241         }
12242         mng->n = resize;
12243         mng->pools = pools;
12244         rte_rwlock_write_unlock(&mng->resize_rwl);
12245         return 0;
12246 }
12247
12248 /*
12249  * Create and initialize a new ASO CT pool.
12250  *
12251  * @param[in] dev
12252  *   Pointer to the Ethernet device structure.
12253  * @param[out] ct_free
12254  *   Where to put the pointer of a new CT action.
12255  *
12256  * @return
12257  *   The CT actions pool pointer and @p ct_free is set on success,
12258  *   NULL otherwise and rte_errno is set.
12259  */
12260 static struct mlx5_aso_ct_pool *
12261 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12262                        struct mlx5_aso_ct_action **ct_free)
12263 {
12264         struct mlx5_priv *priv = dev->data->dev_private;
12265         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12266         struct mlx5_aso_ct_pool *pool = NULL;
12267         struct mlx5_devx_obj *obj = NULL;
12268         uint32_t i;
12269         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12270
12271         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12272                                                           priv->sh->cdev->pdn,
12273                                                           log_obj_size);
12274         if (!obj) {
12275                 rte_errno = ENODATA;
12276                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12277                 return NULL;
12278         }
12279         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12280         if (!pool) {
12281                 rte_errno = ENOMEM;
12282                 claim_zero(mlx5_devx_cmd_destroy(obj));
12283                 return NULL;
12284         }
12285         pool->devx_obj = obj;
12286         pool->index = mng->next;
12287         /* Resize pools array if there is no room for the new pool in it. */
12288         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12289                 claim_zero(mlx5_devx_cmd_destroy(obj));
12290                 mlx5_free(pool);
12291                 return NULL;
12292         }
12293         mng->pools[pool->index] = pool;
12294         mng->next++;
12295         /* Assign the first action in the new pool, the rest go to free list. */
12296         *ct_free = &pool->actions[0];
12297         /* Lock outside, the list operation is safe here. */
12298         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12299                 /* refcnt is 0 when allocating the memory. */
12300                 pool->actions[i].offset = i;
12301                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12302         }
12303         return pool;
12304 }
12305
12306 /*
12307  * Allocate a ASO CT action from free list.
12308  *
12309  * @param[in] dev
12310  *   Pointer to the Ethernet device structure.
12311  * @param[out] error
12312  *   Pointer to the error structure.
12313  *
12314  * @return
12315  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12316  */
12317 static uint32_t
12318 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12319 {
12320         struct mlx5_priv *priv = dev->data->dev_private;
12321         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12322         struct mlx5_aso_ct_action *ct = NULL;
12323         struct mlx5_aso_ct_pool *pool;
12324         uint8_t reg_c;
12325         uint32_t ct_idx;
12326
12327         MLX5_ASSERT(mng);
12328         if (!priv->sh->devx) {
12329                 rte_errno = ENOTSUP;
12330                 return 0;
12331         }
12332         /* Get a free CT action, if no, a new pool will be created. */
12333         rte_spinlock_lock(&mng->ct_sl);
12334         ct = LIST_FIRST(&mng->free_cts);
12335         if (ct) {
12336                 LIST_REMOVE(ct, next);
12337         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12338                 rte_spinlock_unlock(&mng->ct_sl);
12339                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12340                                    NULL, "failed to create ASO CT pool");
12341                 return 0;
12342         }
12343         rte_spinlock_unlock(&mng->ct_sl);
12344         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12345         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12346         /* 0: inactive, 1: created, 2+: used by flows. */
12347         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12348         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12349         if (!ct->dr_action_orig) {
12350 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12351                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12352                         (priv->sh->rx_domain, pool->devx_obj->obj,
12353                          ct->offset,
12354                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12355                          reg_c - REG_C_0);
12356 #else
12357                 RTE_SET_USED(reg_c);
12358 #endif
12359                 if (!ct->dr_action_orig) {
12360                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12361                         rte_flow_error_set(error, rte_errno,
12362                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12363                                            "failed to create ASO CT action");
12364                         return 0;
12365                 }
12366         }
12367         if (!ct->dr_action_rply) {
12368 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12369                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12370                         (priv->sh->rx_domain, pool->devx_obj->obj,
12371                          ct->offset,
12372                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12373                          reg_c - REG_C_0);
12374 #endif
12375                 if (!ct->dr_action_rply) {
12376                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12377                         rte_flow_error_set(error, rte_errno,
12378                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12379                                            "failed to create ASO CT action");
12380                         return 0;
12381                 }
12382         }
12383         return ct_idx;
12384 }
12385
12386 /*
12387  * Create a conntrack object with context and actions by using ASO mechanism.
12388  *
12389  * @param[in] dev
12390  *   Pointer to rte_eth_dev structure.
12391  * @param[in] pro
12392  *   Pointer to conntrack information profile.
12393  * @param[out] error
12394  *   Pointer to the error structure.
12395  *
12396  * @return
12397  *   Index to conntrack object on success, 0 otherwise.
12398  */
12399 static uint32_t
12400 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12401                                    const struct rte_flow_action_conntrack *pro,
12402                                    struct rte_flow_error *error)
12403 {
12404         struct mlx5_priv *priv = dev->data->dev_private;
12405         struct mlx5_dev_ctx_shared *sh = priv->sh;
12406         struct mlx5_aso_ct_action *ct;
12407         uint32_t idx;
12408
12409         if (!sh->ct_aso_en)
12410                 return rte_flow_error_set(error, ENOTSUP,
12411                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12412                                           "Connection is not supported");
12413         idx = flow_dv_aso_ct_alloc(dev, error);
12414         if (!idx)
12415                 return rte_flow_error_set(error, rte_errno,
12416                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12417                                           "Failed to allocate CT object");
12418         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12419         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12420                 return rte_flow_error_set(error, EBUSY,
12421                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12422                                           "Failed to update CT");
12423         ct->is_original = !!pro->is_original_dir;
12424         ct->peer = pro->peer_port;
12425         return idx;
12426 }
12427
12428 /**
12429  * Fill the flow with DV spec, lock free
12430  * (mutex should be acquired by caller).
12431  *
12432  * @param[in] dev
12433  *   Pointer to rte_eth_dev structure.
12434  * @param[in, out] dev_flow
12435  *   Pointer to the sub flow.
12436  * @param[in] attr
12437  *   Pointer to the flow attributes.
12438  * @param[in] items
12439  *   Pointer to the list of items.
12440  * @param[in] actions
12441  *   Pointer to the list of actions.
12442  * @param[out] error
12443  *   Pointer to the error structure.
12444  *
12445  * @return
12446  *   0 on success, a negative errno value otherwise and rte_errno is set.
12447  */
12448 static int
12449 flow_dv_translate(struct rte_eth_dev *dev,
12450                   struct mlx5_flow *dev_flow,
12451                   const struct rte_flow_attr *attr,
12452                   const struct rte_flow_item items[],
12453                   const struct rte_flow_action actions[],
12454                   struct rte_flow_error *error)
12455 {
12456         struct mlx5_priv *priv = dev->data->dev_private;
12457         struct mlx5_dev_config *dev_conf = &priv->config;
12458         struct rte_flow *flow = dev_flow->flow;
12459         struct mlx5_flow_handle *handle = dev_flow->handle;
12460         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12461         struct mlx5_flow_rss_desc *rss_desc;
12462         uint64_t item_flags = 0;
12463         uint64_t last_item = 0;
12464         uint64_t action_flags = 0;
12465         struct mlx5_flow_dv_matcher matcher = {
12466                 .mask = {
12467                         .size = sizeof(matcher.mask.buf),
12468                 },
12469         };
12470         int actions_n = 0;
12471         bool actions_end = false;
12472         union {
12473                 struct mlx5_flow_dv_modify_hdr_resource res;
12474                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12475                             sizeof(struct mlx5_modification_cmd) *
12476                             (MLX5_MAX_MODIFY_NUM + 1)];
12477         } mhdr_dummy;
12478         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12479         const struct rte_flow_action_count *count = NULL;
12480         const struct rte_flow_action_age *non_shared_age = NULL;
12481         union flow_dv_attr flow_attr = { .attr = 0 };
12482         uint32_t tag_be;
12483         union mlx5_flow_tbl_key tbl_key;
12484         uint32_t modify_action_position = UINT32_MAX;
12485         void *match_mask = matcher.mask.buf;
12486         void *match_value = dev_flow->dv.value.buf;
12487         uint8_t next_protocol = 0xff;
12488         struct rte_vlan_hdr vlan = { 0 };
12489         struct mlx5_flow_dv_dest_array_resource mdest_res;
12490         struct mlx5_flow_dv_sample_resource sample_res;
12491         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12492         const struct rte_flow_action_sample *sample = NULL;
12493         struct mlx5_flow_sub_actions_list *sample_act;
12494         uint32_t sample_act_pos = UINT32_MAX;
12495         uint32_t age_act_pos = UINT32_MAX;
12496         uint32_t num_of_dest = 0;
12497         int tmp_actions_n = 0;
12498         uint32_t table;
12499         int ret = 0;
12500         const struct mlx5_flow_tunnel *tunnel = NULL;
12501         struct flow_grp_info grp_info = {
12502                 .external = !!dev_flow->external,
12503                 .transfer = !!attr->transfer,
12504                 .fdb_def_rule = !!priv->fdb_def_rule,
12505                 .skip_scale = dev_flow->skip_scale &
12506                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12507                 .std_tbl_fix = true,
12508         };
12509         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12510
12511         if (!wks)
12512                 return rte_flow_error_set(error, ENOMEM,
12513                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12514                                           NULL,
12515                                           "failed to push flow workspace");
12516         rss_desc = &wks->rss_desc;
12517         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12518         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12519         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12520                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12521         /* update normal path action resource into last index of array */
12522         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12523         if (is_tunnel_offload_active(dev)) {
12524                 if (dev_flow->tunnel) {
12525                         RTE_VERIFY(dev_flow->tof_type ==
12526                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12527                         tunnel = dev_flow->tunnel;
12528                 } else {
12529                         tunnel = mlx5_get_tof(items, actions,
12530                                               &dev_flow->tof_type);
12531                         dev_flow->tunnel = tunnel;
12532                 }
12533                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12534                                         (dev, attr, tunnel, dev_flow->tof_type);
12535         }
12536         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12537                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12538         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12539                                        &grp_info, error);
12540         if (ret)
12541                 return ret;
12542         dev_flow->dv.group = table;
12543         if (attr->transfer)
12544                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12545         /* number of actions must be set to 0 in case of dirty stack. */
12546         mhdr_res->actions_num = 0;
12547         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12548                 /*
12549                  * do not add decap action if match rule drops packet
12550                  * HW rejects rules with decap & drop
12551                  *
12552                  * if tunnel match rule was inserted before matching tunnel set
12553                  * rule flow table used in the match rule must be registered.
12554                  * current implementation handles that in the
12555                  * flow_dv_match_register() at the function end.
12556                  */
12557                 bool add_decap = true;
12558                 const struct rte_flow_action *ptr = actions;
12559
12560                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12561                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12562                                 add_decap = false;
12563                                 break;
12564                         }
12565                 }
12566                 if (add_decap) {
12567                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12568                                                            attr->transfer,
12569                                                            error))
12570                                 return -rte_errno;
12571                         dev_flow->dv.actions[actions_n++] =
12572                                         dev_flow->dv.encap_decap->action;
12573                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12574                 }
12575         }
12576         for (; !actions_end ; actions++) {
12577                 const struct rte_flow_action_queue *queue;
12578                 const struct rte_flow_action_rss *rss;
12579                 const struct rte_flow_action *action = actions;
12580                 const uint8_t *rss_key;
12581                 struct mlx5_flow_tbl_resource *tbl;
12582                 struct mlx5_aso_age_action *age_act;
12583                 struct mlx5_flow_counter *cnt_act;
12584                 uint32_t port_id = 0;
12585                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12586                 int action_type = actions->type;
12587                 const struct rte_flow_action *found_action = NULL;
12588                 uint32_t jump_group = 0;
12589                 uint32_t owner_idx;
12590                 struct mlx5_aso_ct_action *ct;
12591
12592                 if (!mlx5_flow_os_action_supported(action_type))
12593                         return rte_flow_error_set(error, ENOTSUP,
12594                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12595                                                   actions,
12596                                                   "action not supported");
12597                 switch (action_type) {
12598                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12599                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12600                         break;
12601                 case RTE_FLOW_ACTION_TYPE_VOID:
12602                         break;
12603                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12604                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12605                         if (flow_dv_translate_action_port_id(dev, action,
12606                                                              &port_id, error))
12607                                 return -rte_errno;
12608                         port_id_resource.port_id = port_id;
12609                         MLX5_ASSERT(!handle->rix_port_id_action);
12610                         if (flow_dv_port_id_action_resource_register
12611                             (dev, &port_id_resource, dev_flow, error))
12612                                 return -rte_errno;
12613                         dev_flow->dv.actions[actions_n++] =
12614                                         dev_flow->dv.port_id_action->action;
12615                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12616                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12617                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12618                         num_of_dest++;
12619                         break;
12620                 case RTE_FLOW_ACTION_TYPE_FLAG:
12621                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12622                         dev_flow->handle->mark = 1;
12623                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12624                                 struct rte_flow_action_mark mark = {
12625                                         .id = MLX5_FLOW_MARK_DEFAULT,
12626                                 };
12627
12628                                 if (flow_dv_convert_action_mark(dev, &mark,
12629                                                                 mhdr_res,
12630                                                                 error))
12631                                         return -rte_errno;
12632                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12633                                 break;
12634                         }
12635                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12636                         /*
12637                          * Only one FLAG or MARK is supported per device flow
12638                          * right now. So the pointer to the tag resource must be
12639                          * zero before the register process.
12640                          */
12641                         MLX5_ASSERT(!handle->dvh.rix_tag);
12642                         if (flow_dv_tag_resource_register(dev, tag_be,
12643                                                           dev_flow, error))
12644                                 return -rte_errno;
12645                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12646                         dev_flow->dv.actions[actions_n++] =
12647                                         dev_flow->dv.tag_resource->action;
12648                         break;
12649                 case RTE_FLOW_ACTION_TYPE_MARK:
12650                         action_flags |= MLX5_FLOW_ACTION_MARK;
12651                         dev_flow->handle->mark = 1;
12652                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12653                                 const struct rte_flow_action_mark *mark =
12654                                         (const struct rte_flow_action_mark *)
12655                                                 actions->conf;
12656
12657                                 if (flow_dv_convert_action_mark(dev, mark,
12658                                                                 mhdr_res,
12659                                                                 error))
12660                                         return -rte_errno;
12661                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12662                                 break;
12663                         }
12664                         /* Fall-through */
12665                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12666                         /* Legacy (non-extensive) MARK action. */
12667                         tag_be = mlx5_flow_mark_set
12668                               (((const struct rte_flow_action_mark *)
12669                                (actions->conf))->id);
12670                         MLX5_ASSERT(!handle->dvh.rix_tag);
12671                         if (flow_dv_tag_resource_register(dev, tag_be,
12672                                                           dev_flow, error))
12673                                 return -rte_errno;
12674                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12675                         dev_flow->dv.actions[actions_n++] =
12676                                         dev_flow->dv.tag_resource->action;
12677                         break;
12678                 case RTE_FLOW_ACTION_TYPE_SET_META:
12679                         if (flow_dv_convert_action_set_meta
12680                                 (dev, mhdr_res, attr,
12681                                  (const struct rte_flow_action_set_meta *)
12682                                   actions->conf, error))
12683                                 return -rte_errno;
12684                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12685                         break;
12686                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12687                         if (flow_dv_convert_action_set_tag
12688                                 (dev, mhdr_res,
12689                                  (const struct rte_flow_action_set_tag *)
12690                                   actions->conf, error))
12691                                 return -rte_errno;
12692                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12693                         break;
12694                 case RTE_FLOW_ACTION_TYPE_DROP:
12695                         action_flags |= MLX5_FLOW_ACTION_DROP;
12696                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12697                         break;
12698                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12699                         queue = actions->conf;
12700                         rss_desc->queue_num = 1;
12701                         rss_desc->queue[0] = queue->index;
12702                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12703                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12704                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12705                         num_of_dest++;
12706                         break;
12707                 case RTE_FLOW_ACTION_TYPE_RSS:
12708                         rss = actions->conf;
12709                         memcpy(rss_desc->queue, rss->queue,
12710                                rss->queue_num * sizeof(uint16_t));
12711                         rss_desc->queue_num = rss->queue_num;
12712                         /* NULL RSS key indicates default RSS key. */
12713                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12714                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12715                         /*
12716                          * rss->level and rss.types should be set in advance
12717                          * when expanding items for RSS.
12718                          */
12719                         action_flags |= MLX5_FLOW_ACTION_RSS;
12720                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12721                                 MLX5_FLOW_FATE_SHARED_RSS :
12722                                 MLX5_FLOW_FATE_QUEUE;
12723                         break;
12724                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12725                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12726                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12727                         if (flow->age == 0) {
12728                                 flow->age = owner_idx;
12729                                 __atomic_fetch_add(&age_act->refcnt, 1,
12730                                                    __ATOMIC_RELAXED);
12731                         }
12732                         age_act_pos = actions_n++;
12733                         action_flags |= MLX5_FLOW_ACTION_AGE;
12734                         break;
12735                 case RTE_FLOW_ACTION_TYPE_AGE:
12736                         non_shared_age = action->conf;
12737                         age_act_pos = actions_n++;
12738                         action_flags |= MLX5_FLOW_ACTION_AGE;
12739                         break;
12740                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12741                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12742                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12743                                                              NULL);
12744                         MLX5_ASSERT(cnt_act != NULL);
12745                         /**
12746                          * When creating meter drop flow in drop table, the
12747                          * counter should not overwrite the rte flow counter.
12748                          */
12749                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12750                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12751                                 dev_flow->dv.actions[actions_n++] =
12752                                                         cnt_act->action;
12753                         } else {
12754                                 if (flow->counter == 0) {
12755                                         flow->counter = owner_idx;
12756                                         __atomic_fetch_add
12757                                                 (&cnt_act->shared_info.refcnt,
12758                                                  1, __ATOMIC_RELAXED);
12759                                 }
12760                                 /* Save information first, will apply later. */
12761                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12762                         }
12763                         break;
12764                 case RTE_FLOW_ACTION_TYPE_COUNT:
12765                         if (!priv->sh->devx) {
12766                                 return rte_flow_error_set
12767                                               (error, ENOTSUP,
12768                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12769                                                NULL,
12770                                                "count action not supported");
12771                         }
12772                         /* Save information first, will apply later. */
12773                         count = action->conf;
12774                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12775                         break;
12776                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12777                         dev_flow->dv.actions[actions_n++] =
12778                                                 priv->sh->pop_vlan_action;
12779                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12780                         break;
12781                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12782                         if (!(action_flags &
12783                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12784                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12785                         vlan.eth_proto = rte_be_to_cpu_16
12786                              ((((const struct rte_flow_action_of_push_vlan *)
12787                                                    actions->conf)->ethertype));
12788                         found_action = mlx5_flow_find_action
12789                                         (actions + 1,
12790                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12791                         if (found_action)
12792                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12793                         found_action = mlx5_flow_find_action
12794                                         (actions + 1,
12795                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12796                         if (found_action)
12797                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12798                         if (flow_dv_create_action_push_vlan
12799                                             (dev, attr, &vlan, dev_flow, error))
12800                                 return -rte_errno;
12801                         dev_flow->dv.actions[actions_n++] =
12802                                         dev_flow->dv.push_vlan_res->action;
12803                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12804                         break;
12805                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12806                         /* of_vlan_push action handled this action */
12807                         MLX5_ASSERT(action_flags &
12808                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12809                         break;
12810                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12811                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12812                                 break;
12813                         flow_dev_get_vlan_info_from_items(items, &vlan);
12814                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12815                         /* If no VLAN push - this is a modify header action */
12816                         if (flow_dv_convert_action_modify_vlan_vid
12817                                                 (mhdr_res, actions, error))
12818                                 return -rte_errno;
12819                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12820                         break;
12821                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12822                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12823                         if (flow_dv_create_action_l2_encap(dev, actions,
12824                                                            dev_flow,
12825                                                            attr->transfer,
12826                                                            error))
12827                                 return -rte_errno;
12828                         dev_flow->dv.actions[actions_n++] =
12829                                         dev_flow->dv.encap_decap->action;
12830                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12831                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12832                                 sample_act->action_flags |=
12833                                                         MLX5_FLOW_ACTION_ENCAP;
12834                         break;
12835                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12836                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12837                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12838                                                            attr->transfer,
12839                                                            error))
12840                                 return -rte_errno;
12841                         dev_flow->dv.actions[actions_n++] =
12842                                         dev_flow->dv.encap_decap->action;
12843                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12844                         break;
12845                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12846                         /* Handle encap with preceding decap. */
12847                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12848                                 if (flow_dv_create_action_raw_encap
12849                                         (dev, actions, dev_flow, attr, error))
12850                                         return -rte_errno;
12851                                 dev_flow->dv.actions[actions_n++] =
12852                                         dev_flow->dv.encap_decap->action;
12853                         } else {
12854                                 /* Handle encap without preceding decap. */
12855                                 if (flow_dv_create_action_l2_encap
12856                                     (dev, actions, dev_flow, attr->transfer,
12857                                      error))
12858                                         return -rte_errno;
12859                                 dev_flow->dv.actions[actions_n++] =
12860                                         dev_flow->dv.encap_decap->action;
12861                         }
12862                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12863                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12864                                 sample_act->action_flags |=
12865                                                         MLX5_FLOW_ACTION_ENCAP;
12866                         break;
12867                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12868                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12869                                 ;
12870                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12871                                 if (flow_dv_create_action_l2_decap
12872                                     (dev, dev_flow, attr->transfer, error))
12873                                         return -rte_errno;
12874                                 dev_flow->dv.actions[actions_n++] =
12875                                         dev_flow->dv.encap_decap->action;
12876                         }
12877                         /* If decap is followed by encap, handle it at encap. */
12878                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12879                         break;
12880                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12881                         dev_flow->dv.actions[actions_n++] =
12882                                 (void *)(uintptr_t)action->conf;
12883                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12884                         break;
12885                 case RTE_FLOW_ACTION_TYPE_JUMP:
12886                         jump_group = ((const struct rte_flow_action_jump *)
12887                                                         action->conf)->group;
12888                         grp_info.std_tbl_fix = 0;
12889                         if (dev_flow->skip_scale &
12890                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12891                                 grp_info.skip_scale = 1;
12892                         else
12893                                 grp_info.skip_scale = 0;
12894                         ret = mlx5_flow_group_to_table(dev, tunnel,
12895                                                        jump_group,
12896                                                        &table,
12897                                                        &grp_info, error);
12898                         if (ret)
12899                                 return ret;
12900                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12901                                                        attr->transfer,
12902                                                        !!dev_flow->external,
12903                                                        tunnel, jump_group, 0,
12904                                                        0, error);
12905                         if (!tbl)
12906                                 return rte_flow_error_set
12907                                                 (error, errno,
12908                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12909                                                  NULL,
12910                                                  "cannot create jump action.");
12911                         if (flow_dv_jump_tbl_resource_register
12912                             (dev, tbl, dev_flow, error)) {
12913                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12914                                 return rte_flow_error_set
12915                                                 (error, errno,
12916                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12917                                                  NULL,
12918                                                  "cannot create jump action.");
12919                         }
12920                         dev_flow->dv.actions[actions_n++] =
12921                                         dev_flow->dv.jump->action;
12922                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12923                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12924                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12925                         num_of_dest++;
12926                         break;
12927                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12928                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12929                         if (flow_dv_convert_action_modify_mac
12930                                         (mhdr_res, actions, error))
12931                                 return -rte_errno;
12932                         action_flags |= actions->type ==
12933                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12934                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12935                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12936                         break;
12937                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12938                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12939                         if (flow_dv_convert_action_modify_ipv4
12940                                         (mhdr_res, actions, error))
12941                                 return -rte_errno;
12942                         action_flags |= actions->type ==
12943                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12944                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12945                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12946                         break;
12947                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12948                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12949                         if (flow_dv_convert_action_modify_ipv6
12950                                         (mhdr_res, actions, error))
12951                                 return -rte_errno;
12952                         action_flags |= actions->type ==
12953                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12954                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12955                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12956                         break;
12957                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12958                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12959                         if (flow_dv_convert_action_modify_tp
12960                                         (mhdr_res, actions, items,
12961                                          &flow_attr, dev_flow, !!(action_flags &
12962                                          MLX5_FLOW_ACTION_DECAP), error))
12963                                 return -rte_errno;
12964                         action_flags |= actions->type ==
12965                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12966                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12967                                         MLX5_FLOW_ACTION_SET_TP_DST;
12968                         break;
12969                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12970                         if (flow_dv_convert_action_modify_dec_ttl
12971                                         (mhdr_res, items, &flow_attr, dev_flow,
12972                                          !!(action_flags &
12973                                          MLX5_FLOW_ACTION_DECAP), error))
12974                                 return -rte_errno;
12975                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12976                         break;
12977                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12978                         if (flow_dv_convert_action_modify_ttl
12979                                         (mhdr_res, actions, items, &flow_attr,
12980                                          dev_flow, !!(action_flags &
12981                                          MLX5_FLOW_ACTION_DECAP), error))
12982                                 return -rte_errno;
12983                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12984                         break;
12985                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12986                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12987                         if (flow_dv_convert_action_modify_tcp_seq
12988                                         (mhdr_res, actions, error))
12989                                 return -rte_errno;
12990                         action_flags |= actions->type ==
12991                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12992                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12993                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12994                         break;
12995
12996                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12997                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12998                         if (flow_dv_convert_action_modify_tcp_ack
12999                                         (mhdr_res, actions, error))
13000                                 return -rte_errno;
13001                         action_flags |= actions->type ==
13002                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13003                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13004                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13005                         break;
13006                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13007                         if (flow_dv_convert_action_set_reg
13008                                         (mhdr_res, actions, error))
13009                                 return -rte_errno;
13010                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13011                         break;
13012                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13013                         if (flow_dv_convert_action_copy_mreg
13014                                         (dev, mhdr_res, actions, error))
13015                                 return -rte_errno;
13016                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13017                         break;
13018                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13019                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13020                         dev_flow->handle->fate_action =
13021                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13022                         break;
13023                 case RTE_FLOW_ACTION_TYPE_METER:
13024                         if (!wks->fm)
13025                                 return rte_flow_error_set(error, rte_errno,
13026                                         RTE_FLOW_ERROR_TYPE_ACTION,
13027                                         NULL, "Failed to get meter in flow.");
13028                         /* Set the meter action. */
13029                         dev_flow->dv.actions[actions_n++] =
13030                                 wks->fm->meter_action;
13031                         action_flags |= MLX5_FLOW_ACTION_METER;
13032                         break;
13033                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13034                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13035                                                               actions, error))
13036                                 return -rte_errno;
13037                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13038                         break;
13039                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13040                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13041                                                               actions, error))
13042                                 return -rte_errno;
13043                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13044                         break;
13045                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13046                         sample_act_pos = actions_n;
13047                         sample = (const struct rte_flow_action_sample *)
13048                                  action->conf;
13049                         actions_n++;
13050                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13051                         /* put encap action into group if work with port id */
13052                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13053                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13054                                 sample_act->action_flags |=
13055                                                         MLX5_FLOW_ACTION_ENCAP;
13056                         break;
13057                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13058                         if (flow_dv_convert_action_modify_field
13059                                         (dev, mhdr_res, actions, attr, error))
13060                                 return -rte_errno;
13061                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13062                         break;
13063                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13064                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13065                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13066                         if (!ct)
13067                                 return rte_flow_error_set(error, EINVAL,
13068                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13069                                                 NULL,
13070                                                 "Failed to get CT object.");
13071                         if (mlx5_aso_ct_available(priv->sh, ct))
13072                                 return rte_flow_error_set(error, rte_errno,
13073                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13074                                                 NULL,
13075                                                 "CT is unavailable.");
13076                         if (ct->is_original)
13077                                 dev_flow->dv.actions[actions_n] =
13078                                                         ct->dr_action_orig;
13079                         else
13080                                 dev_flow->dv.actions[actions_n] =
13081                                                         ct->dr_action_rply;
13082                         if (flow->ct == 0) {
13083                                 flow->indirect_type =
13084                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13085                                 flow->ct = owner_idx;
13086                                 __atomic_fetch_add(&ct->refcnt, 1,
13087                                                    __ATOMIC_RELAXED);
13088                         }
13089                         actions_n++;
13090                         action_flags |= MLX5_FLOW_ACTION_CT;
13091                         break;
13092                 case RTE_FLOW_ACTION_TYPE_END:
13093                         actions_end = true;
13094                         if (mhdr_res->actions_num) {
13095                                 /* create modify action if needed. */
13096                                 if (flow_dv_modify_hdr_resource_register
13097                                         (dev, mhdr_res, dev_flow, error))
13098                                         return -rte_errno;
13099                                 dev_flow->dv.actions[modify_action_position] =
13100                                         handle->dvh.modify_hdr->action;
13101                         }
13102                         /*
13103                          * Handle AGE and COUNT action by single HW counter
13104                          * when they are not shared.
13105                          */
13106                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13107                                 if ((non_shared_age && count) ||
13108                                     !(priv->sh->flow_hit_aso_en &&
13109                                       (attr->group || attr->transfer))) {
13110                                         /* Creates age by counters. */
13111                                         cnt_act = flow_dv_prepare_counter
13112                                                                 (dev, dev_flow,
13113                                                                  flow, count,
13114                                                                  non_shared_age,
13115                                                                  error);
13116                                         if (!cnt_act)
13117                                                 return -rte_errno;
13118                                         dev_flow->dv.actions[age_act_pos] =
13119                                                                 cnt_act->action;
13120                                         break;
13121                                 }
13122                                 if (!flow->age && non_shared_age) {
13123                                         flow->age = flow_dv_aso_age_alloc
13124                                                                 (dev, error);
13125                                         if (!flow->age)
13126                                                 return -rte_errno;
13127                                         flow_dv_aso_age_params_init
13128                                                     (dev, flow->age,
13129                                                      non_shared_age->context ?
13130                                                      non_shared_age->context :
13131                                                      (void *)(uintptr_t)
13132                                                      (dev_flow->flow_idx),
13133                                                      non_shared_age->timeout);
13134                                 }
13135                                 age_act = flow_aso_age_get_by_idx(dev,
13136                                                                   flow->age);
13137                                 dev_flow->dv.actions[age_act_pos] =
13138                                                              age_act->dr_action;
13139                         }
13140                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13141                                 /*
13142                                  * Create one count action, to be used
13143                                  * by all sub-flows.
13144                                  */
13145                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13146                                                                   flow, count,
13147                                                                   NULL, error);
13148                                 if (!cnt_act)
13149                                         return -rte_errno;
13150                                 dev_flow->dv.actions[actions_n++] =
13151                                                                 cnt_act->action;
13152                         }
13153                 default:
13154                         break;
13155                 }
13156                 if (mhdr_res->actions_num &&
13157                     modify_action_position == UINT32_MAX)
13158                         modify_action_position = actions_n++;
13159         }
13160         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13161                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13162                 int item_type = items->type;
13163
13164                 if (!mlx5_flow_os_item_supported(item_type))
13165                         return rte_flow_error_set(error, ENOTSUP,
13166                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13167                                                   NULL, "item not supported");
13168                 switch (item_type) {
13169                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13170                         flow_dv_translate_item_port_id
13171                                 (dev, match_mask, match_value, items, attr);
13172                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13173                         break;
13174                 case RTE_FLOW_ITEM_TYPE_ETH:
13175                         flow_dv_translate_item_eth(match_mask, match_value,
13176                                                    items, tunnel,
13177                                                    dev_flow->dv.group);
13178                         matcher.priority = action_flags &
13179                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13180                                         !dev_flow->external ?
13181                                         MLX5_PRIORITY_MAP_L3 :
13182                                         MLX5_PRIORITY_MAP_L2;
13183                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13184                                              MLX5_FLOW_LAYER_OUTER_L2;
13185                         break;
13186                 case RTE_FLOW_ITEM_TYPE_VLAN:
13187                         flow_dv_translate_item_vlan(dev_flow,
13188                                                     match_mask, match_value,
13189                                                     items, tunnel,
13190                                                     dev_flow->dv.group);
13191                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13192                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13193                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13194                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13195                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13196                         break;
13197                 case RTE_FLOW_ITEM_TYPE_IPV4:
13198                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13199                                                   &item_flags, &tunnel);
13200                         flow_dv_translate_item_ipv4(match_mask, match_value,
13201                                                     items, tunnel,
13202                                                     dev_flow->dv.group);
13203                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13204                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13205                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13206                         if (items->mask != NULL &&
13207                             ((const struct rte_flow_item_ipv4 *)
13208                              items->mask)->hdr.next_proto_id) {
13209                                 next_protocol =
13210                                         ((const struct rte_flow_item_ipv4 *)
13211                                          (items->spec))->hdr.next_proto_id;
13212                                 next_protocol &=
13213                                         ((const struct rte_flow_item_ipv4 *)
13214                                          (items->mask))->hdr.next_proto_id;
13215                         } else {
13216                                 /* Reset for inner layer. */
13217                                 next_protocol = 0xff;
13218                         }
13219                         break;
13220                 case RTE_FLOW_ITEM_TYPE_IPV6:
13221                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13222                                                   &item_flags, &tunnel);
13223                         flow_dv_translate_item_ipv6(match_mask, match_value,
13224                                                     items, tunnel,
13225                                                     dev_flow->dv.group);
13226                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13227                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13228                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13229                         if (items->mask != NULL &&
13230                             ((const struct rte_flow_item_ipv6 *)
13231                              items->mask)->hdr.proto) {
13232                                 next_protocol =
13233                                         ((const struct rte_flow_item_ipv6 *)
13234                                          items->spec)->hdr.proto;
13235                                 next_protocol &=
13236                                         ((const struct rte_flow_item_ipv6 *)
13237                                          items->mask)->hdr.proto;
13238                         } else {
13239                                 /* Reset for inner layer. */
13240                                 next_protocol = 0xff;
13241                         }
13242                         break;
13243                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13244                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13245                                                              match_value,
13246                                                              items, tunnel);
13247                         last_item = tunnel ?
13248                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13249                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13250                         if (items->mask != NULL &&
13251                             ((const struct rte_flow_item_ipv6_frag_ext *)
13252                              items->mask)->hdr.next_header) {
13253                                 next_protocol =
13254                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13255                                  items->spec)->hdr.next_header;
13256                                 next_protocol &=
13257                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13258                                  items->mask)->hdr.next_header;
13259                         } else {
13260                                 /* Reset for inner layer. */
13261                                 next_protocol = 0xff;
13262                         }
13263                         break;
13264                 case RTE_FLOW_ITEM_TYPE_TCP:
13265                         flow_dv_translate_item_tcp(match_mask, match_value,
13266                                                    items, tunnel);
13267                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13268                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13269                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13270                         break;
13271                 case RTE_FLOW_ITEM_TYPE_UDP:
13272                         flow_dv_translate_item_udp(match_mask, match_value,
13273                                                    items, tunnel);
13274                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13275                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13276                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13277                         break;
13278                 case RTE_FLOW_ITEM_TYPE_GRE:
13279                         flow_dv_translate_item_gre(match_mask, match_value,
13280                                                    items, tunnel);
13281                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13282                         last_item = MLX5_FLOW_LAYER_GRE;
13283                         break;
13284                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13285                         flow_dv_translate_item_gre_key(match_mask,
13286                                                        match_value, items);
13287                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13288                         break;
13289                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13290                         flow_dv_translate_item_nvgre(match_mask, match_value,
13291                                                      items, tunnel);
13292                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13293                         last_item = MLX5_FLOW_LAYER_GRE;
13294                         break;
13295                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13296                         flow_dv_translate_item_vxlan(dev, attr,
13297                                                      match_mask, match_value,
13298                                                      items, tunnel);
13299                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13300                         last_item = MLX5_FLOW_LAYER_VXLAN;
13301                         break;
13302                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13303                         flow_dv_translate_item_vxlan_gpe(match_mask,
13304                                                          match_value, items,
13305                                                          tunnel);
13306                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13307                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13308                         break;
13309                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13310                         flow_dv_translate_item_geneve(match_mask, match_value,
13311                                                       items, tunnel);
13312                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13313                         last_item = MLX5_FLOW_LAYER_GENEVE;
13314                         break;
13315                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13316                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13317                                                           match_value,
13318                                                           items, error);
13319                         if (ret)
13320                                 return rte_flow_error_set(error, -ret,
13321                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13322                                         "cannot create GENEVE TLV option");
13323                         flow->geneve_tlv_option = 1;
13324                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13325                         break;
13326                 case RTE_FLOW_ITEM_TYPE_MPLS:
13327                         flow_dv_translate_item_mpls(match_mask, match_value,
13328                                                     items, last_item, tunnel);
13329                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13330                         last_item = MLX5_FLOW_LAYER_MPLS;
13331                         break;
13332                 case RTE_FLOW_ITEM_TYPE_MARK:
13333                         flow_dv_translate_item_mark(dev, match_mask,
13334                                                     match_value, items);
13335                         last_item = MLX5_FLOW_ITEM_MARK;
13336                         break;
13337                 case RTE_FLOW_ITEM_TYPE_META:
13338                         flow_dv_translate_item_meta(dev, match_mask,
13339                                                     match_value, attr, items);
13340                         last_item = MLX5_FLOW_ITEM_METADATA;
13341                         break;
13342                 case RTE_FLOW_ITEM_TYPE_ICMP:
13343                         flow_dv_translate_item_icmp(match_mask, match_value,
13344                                                     items, tunnel);
13345                         last_item = MLX5_FLOW_LAYER_ICMP;
13346                         break;
13347                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13348                         flow_dv_translate_item_icmp6(match_mask, match_value,
13349                                                       items, tunnel);
13350                         last_item = MLX5_FLOW_LAYER_ICMP6;
13351                         break;
13352                 case RTE_FLOW_ITEM_TYPE_TAG:
13353                         flow_dv_translate_item_tag(dev, match_mask,
13354                                                    match_value, items);
13355                         last_item = MLX5_FLOW_ITEM_TAG;
13356                         break;
13357                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13358                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13359                                                         match_value, items);
13360                         last_item = MLX5_FLOW_ITEM_TAG;
13361                         break;
13362                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13363                         flow_dv_translate_item_tx_queue(dev, match_mask,
13364                                                         match_value,
13365                                                         items);
13366                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13367                         break;
13368                 case RTE_FLOW_ITEM_TYPE_GTP:
13369                         flow_dv_translate_item_gtp(match_mask, match_value,
13370                                                    items, tunnel);
13371                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13372                         last_item = MLX5_FLOW_LAYER_GTP;
13373                         break;
13374                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13375                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13376                                                           match_value,
13377                                                           items);
13378                         if (ret)
13379                                 return rte_flow_error_set(error, -ret,
13380                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13381                                         "cannot create GTP PSC item");
13382                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13383                         break;
13384                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13385                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13386                                 /* Create it only the first time to be used. */
13387                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13388                                 if (ret)
13389                                         return rte_flow_error_set
13390                                                 (error, -ret,
13391                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13392                                                 NULL,
13393                                                 "cannot create eCPRI parser");
13394                         }
13395                         flow_dv_translate_item_ecpri(dev, match_mask,
13396                                                      match_value, items,
13397                                                      last_item);
13398                         /* No other protocol should follow eCPRI layer. */
13399                         last_item = MLX5_FLOW_LAYER_ECPRI;
13400                         break;
13401                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13402                         flow_dv_translate_item_integrity(items, integrity_items,
13403                                                          &last_item);
13404                         break;
13405                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13406                         flow_dv_translate_item_aso_ct(dev, match_mask,
13407                                                       match_value, items);
13408                         break;
13409                 default:
13410                         break;
13411                 }
13412                 item_flags |= last_item;
13413         }
13414         /*
13415          * When E-Switch mode is enabled, we have two cases where we need to
13416          * set the source port manually.
13417          * The first one, is in case of Nic steering rule, and the second is
13418          * E-Switch rule where no port_id item was found. In both cases
13419          * the source port is set according the current port in use.
13420          */
13421         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13422             (priv->representor || priv->master)) {
13423                 if (flow_dv_translate_item_port_id(dev, match_mask,
13424                                                    match_value, NULL, attr))
13425                         return -rte_errno;
13426         }
13427         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13428                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13429                                                       integrity_items,
13430                                                       item_flags);
13431         }
13432 #ifdef RTE_LIBRTE_MLX5_DEBUG
13433         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13434                                               dev_flow->dv.value.buf));
13435 #endif
13436         /*
13437          * Layers may be already initialized from prefix flow if this dev_flow
13438          * is the suffix flow.
13439          */
13440         handle->layers |= item_flags;
13441         if (action_flags & MLX5_FLOW_ACTION_RSS)
13442                 flow_dv_hashfields_set(dev_flow, rss_desc);
13443         /* If has RSS action in the sample action, the Sample/Mirror resource
13444          * should be registered after the hash filed be update.
13445          */
13446         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13447                 ret = flow_dv_translate_action_sample(dev,
13448                                                       sample,
13449                                                       dev_flow, attr,
13450                                                       &num_of_dest,
13451                                                       sample_actions,
13452                                                       &sample_res,
13453                                                       error);
13454                 if (ret < 0)
13455                         return ret;
13456                 ret = flow_dv_create_action_sample(dev,
13457                                                    dev_flow,
13458                                                    num_of_dest,
13459                                                    &sample_res,
13460                                                    &mdest_res,
13461                                                    sample_actions,
13462                                                    action_flags,
13463                                                    error);
13464                 if (ret < 0)
13465                         return rte_flow_error_set
13466                                                 (error, rte_errno,
13467                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13468                                                 NULL,
13469                                                 "cannot create sample action");
13470                 if (num_of_dest > 1) {
13471                         dev_flow->dv.actions[sample_act_pos] =
13472                         dev_flow->dv.dest_array_res->action;
13473                 } else {
13474                         dev_flow->dv.actions[sample_act_pos] =
13475                         dev_flow->dv.sample_res->verbs_action;
13476                 }
13477         }
13478         /*
13479          * For multiple destination (sample action with ratio=1), the encap
13480          * action and port id action will be combined into group action.
13481          * So need remove the original these actions in the flow and only
13482          * use the sample action instead of.
13483          */
13484         if (num_of_dest > 1 &&
13485             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13486                 int i;
13487                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13488
13489                 for (i = 0; i < actions_n; i++) {
13490                         if ((sample_act->dr_encap_action &&
13491                                 sample_act->dr_encap_action ==
13492                                 dev_flow->dv.actions[i]) ||
13493                                 (sample_act->dr_port_id_action &&
13494                                 sample_act->dr_port_id_action ==
13495                                 dev_flow->dv.actions[i]) ||
13496                                 (sample_act->dr_jump_action &&
13497                                 sample_act->dr_jump_action ==
13498                                 dev_flow->dv.actions[i]))
13499                                 continue;
13500                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13501                 }
13502                 memcpy((void *)dev_flow->dv.actions,
13503                                 (void *)temp_actions,
13504                                 tmp_actions_n * sizeof(void *));
13505                 actions_n = tmp_actions_n;
13506         }
13507         dev_flow->dv.actions_n = actions_n;
13508         dev_flow->act_flags = action_flags;
13509         if (wks->skip_matcher_reg)
13510                 return 0;
13511         /* Register matcher. */
13512         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13513                                     matcher.mask.size);
13514         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13515                                                      matcher.priority,
13516                                                      dev_flow->external);
13517         /**
13518          * When creating meter drop flow in drop table, using original
13519          * 5-tuple match, the matcher priority should be lower than
13520          * mtr_id matcher.
13521          */
13522         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13523             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13524             matcher.priority <= MLX5_REG_BITS)
13525                 matcher.priority += MLX5_REG_BITS;
13526         /* reserved field no needs to be set to 0 here. */
13527         tbl_key.is_fdb = attr->transfer;
13528         tbl_key.is_egress = attr->egress;
13529         tbl_key.level = dev_flow->dv.group;
13530         tbl_key.id = dev_flow->dv.table_id;
13531         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13532                                      tunnel, attr->group, error))
13533                 return -rte_errno;
13534         return 0;
13535 }
13536
13537 /**
13538  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13539  * and tunnel.
13540  *
13541  * @param[in, out] action
13542  *   Shred RSS action holding hash RX queue objects.
13543  * @param[in] hash_fields
13544  *   Defines combination of packet fields to participate in RX hash.
13545  * @param[in] tunnel
13546  *   Tunnel type
13547  * @param[in] hrxq_idx
13548  *   Hash RX queue index to set.
13549  *
13550  * @return
13551  *   0 on success, otherwise negative errno value.
13552  */
13553 static int
13554 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13555                               const uint64_t hash_fields,
13556                               uint32_t hrxq_idx)
13557 {
13558         uint32_t *hrxqs = action->hrxq;
13559
13560         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13561         case MLX5_RSS_HASH_IPV4:
13562                 /* fall-through. */
13563         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13564                 /* fall-through. */
13565         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13566                 hrxqs[0] = hrxq_idx;
13567                 return 0;
13568         case MLX5_RSS_HASH_IPV4_TCP:
13569                 /* fall-through. */
13570         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13571                 /* fall-through. */
13572         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13573                 hrxqs[1] = hrxq_idx;
13574                 return 0;
13575         case MLX5_RSS_HASH_IPV4_UDP:
13576                 /* fall-through. */
13577         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13578                 /* fall-through. */
13579         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13580                 hrxqs[2] = hrxq_idx;
13581                 return 0;
13582         case MLX5_RSS_HASH_IPV6:
13583                 /* fall-through. */
13584         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13585                 /* fall-through. */
13586         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13587                 hrxqs[3] = hrxq_idx;
13588                 return 0;
13589         case MLX5_RSS_HASH_IPV6_TCP:
13590                 /* fall-through. */
13591         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13592                 /* fall-through. */
13593         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13594                 hrxqs[4] = hrxq_idx;
13595                 return 0;
13596         case MLX5_RSS_HASH_IPV6_UDP:
13597                 /* fall-through. */
13598         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13599                 /* fall-through. */
13600         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13601                 hrxqs[5] = hrxq_idx;
13602                 return 0;
13603         case MLX5_RSS_HASH_NONE:
13604                 hrxqs[6] = hrxq_idx;
13605                 return 0;
13606         default:
13607                 return -1;
13608         }
13609 }
13610
13611 /**
13612  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13613  * and tunnel.
13614  *
13615  * @param[in] dev
13616  *   Pointer to the Ethernet device structure.
13617  * @param[in] idx
13618  *   Shared RSS action ID holding hash RX queue objects.
13619  * @param[in] hash_fields
13620  *   Defines combination of packet fields to participate in RX hash.
13621  * @param[in] tunnel
13622  *   Tunnel type
13623  *
13624  * @return
13625  *   Valid hash RX queue index, otherwise 0.
13626  */
13627 static uint32_t
13628 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13629                                  const uint64_t hash_fields)
13630 {
13631         struct mlx5_priv *priv = dev->data->dev_private;
13632         struct mlx5_shared_action_rss *shared_rss =
13633             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13634         const uint32_t *hrxqs = shared_rss->hrxq;
13635
13636         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13637         case MLX5_RSS_HASH_IPV4:
13638                 /* fall-through. */
13639         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13640                 /* fall-through. */
13641         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13642                 return hrxqs[0];
13643         case MLX5_RSS_HASH_IPV4_TCP:
13644                 /* fall-through. */
13645         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13646                 /* fall-through. */
13647         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13648                 return hrxqs[1];
13649         case MLX5_RSS_HASH_IPV4_UDP:
13650                 /* fall-through. */
13651         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13652                 /* fall-through. */
13653         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13654                 return hrxqs[2];
13655         case MLX5_RSS_HASH_IPV6:
13656                 /* fall-through. */
13657         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13658                 /* fall-through. */
13659         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13660                 return hrxqs[3];
13661         case MLX5_RSS_HASH_IPV6_TCP:
13662                 /* fall-through. */
13663         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13664                 /* fall-through. */
13665         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13666                 return hrxqs[4];
13667         case MLX5_RSS_HASH_IPV6_UDP:
13668                 /* fall-through. */
13669         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13670                 /* fall-through. */
13671         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13672                 return hrxqs[5];
13673         case MLX5_RSS_HASH_NONE:
13674                 return hrxqs[6];
13675         default:
13676                 return 0;
13677         }
13678
13679 }
13680
13681 /**
13682  * Apply the flow to the NIC, lock free,
13683  * (mutex should be acquired by caller).
13684  *
13685  * @param[in] dev
13686  *   Pointer to the Ethernet device structure.
13687  * @param[in, out] flow
13688  *   Pointer to flow structure.
13689  * @param[out] error
13690  *   Pointer to error structure.
13691  *
13692  * @return
13693  *   0 on success, a negative errno value otherwise and rte_errno is set.
13694  */
13695 static int
13696 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13697               struct rte_flow_error *error)
13698 {
13699         struct mlx5_flow_dv_workspace *dv;
13700         struct mlx5_flow_handle *dh;
13701         struct mlx5_flow_handle_dv *dv_h;
13702         struct mlx5_flow *dev_flow;
13703         struct mlx5_priv *priv = dev->data->dev_private;
13704         uint32_t handle_idx;
13705         int n;
13706         int err;
13707         int idx;
13708         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13709         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13710         uint8_t misc_mask;
13711
13712         MLX5_ASSERT(wks);
13713         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13714                 dev_flow = &wks->flows[idx];
13715                 dv = &dev_flow->dv;
13716                 dh = dev_flow->handle;
13717                 dv_h = &dh->dvh;
13718                 n = dv->actions_n;
13719                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13720                         if (dv->transfer) {
13721                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13722                                 dv->actions[n++] = priv->sh->dr_drop_action;
13723                         } else {
13724 #ifdef HAVE_MLX5DV_DR
13725                                 /* DR supports drop action placeholder. */
13726                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13727                                 dv->actions[n++] = dv->group ?
13728                                         priv->sh->dr_drop_action :
13729                                         priv->root_drop_action;
13730 #else
13731                                 /* For DV we use the explicit drop queue. */
13732                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13733                                 dv->actions[n++] =
13734                                                 priv->drop_queue.hrxq->action;
13735 #endif
13736                         }
13737                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13738                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13739                         struct mlx5_hrxq *hrxq;
13740                         uint32_t hrxq_idx;
13741
13742                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13743                                                     &hrxq_idx);
13744                         if (!hrxq) {
13745                                 rte_flow_error_set
13746                                         (error, rte_errno,
13747                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13748                                          "cannot get hash queue");
13749                                 goto error;
13750                         }
13751                         dh->rix_hrxq = hrxq_idx;
13752                         dv->actions[n++] = hrxq->action;
13753                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13754                         struct mlx5_hrxq *hrxq = NULL;
13755                         uint32_t hrxq_idx;
13756
13757                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13758                                                 rss_desc->shared_rss,
13759                                                 dev_flow->hash_fields);
13760                         if (hrxq_idx)
13761                                 hrxq = mlx5_ipool_get
13762                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13763                                          hrxq_idx);
13764                         if (!hrxq) {
13765                                 rte_flow_error_set
13766                                         (error, rte_errno,
13767                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13768                                          "cannot get hash queue");
13769                                 goto error;
13770                         }
13771                         dh->rix_srss = rss_desc->shared_rss;
13772                         dv->actions[n++] = hrxq->action;
13773                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13774                         if (!priv->sh->default_miss_action) {
13775                                 rte_flow_error_set
13776                                         (error, rte_errno,
13777                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13778                                          "default miss action not be created.");
13779                                 goto error;
13780                         }
13781                         dv->actions[n++] = priv->sh->default_miss_action;
13782                 }
13783                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13784                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13785                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13786                                                (void *)&dv->value, n,
13787                                                dv->actions, &dh->drv_flow);
13788                 if (err) {
13789                         rte_flow_error_set
13790                                 (error, errno,
13791                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13792                                 NULL,
13793                                 (!priv->config.allow_duplicate_pattern &&
13794                                 errno == EEXIST) ?
13795                                 "duplicating pattern is not allowed" :
13796                                 "hardware refuses to create flow");
13797                         goto error;
13798                 }
13799                 if (priv->vmwa_context &&
13800                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13801                         /*
13802                          * The rule contains the VLAN pattern.
13803                          * For VF we are going to create VLAN
13804                          * interface to make hypervisor set correct
13805                          * e-Switch vport context.
13806                          */
13807                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13808                 }
13809         }
13810         return 0;
13811 error:
13812         err = rte_errno; /* Save rte_errno before cleanup. */
13813         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13814                        handle_idx, dh, next) {
13815                 /* hrxq is union, don't clear it if the flag is not set. */
13816                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13817                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13818                         dh->rix_hrxq = 0;
13819                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13820                         dh->rix_srss = 0;
13821                 }
13822                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13823                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13824         }
13825         rte_errno = err; /* Restore rte_errno. */
13826         return -rte_errno;
13827 }
13828
13829 void
13830 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
13831                           struct mlx5_list_entry *entry)
13832 {
13833         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13834                                                              typeof(*resource),
13835                                                              entry);
13836
13837         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13838         mlx5_free(resource);
13839 }
13840
13841 /**
13842  * Release the flow matcher.
13843  *
13844  * @param dev
13845  *   Pointer to Ethernet device.
13846  * @param port_id
13847  *   Index to port ID action resource.
13848  *
13849  * @return
13850  *   1 while a reference on it exists, 0 when freed.
13851  */
13852 static int
13853 flow_dv_matcher_release(struct rte_eth_dev *dev,
13854                         struct mlx5_flow_handle *handle)
13855 {
13856         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13857         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13858                                                             typeof(*tbl), tbl);
13859         int ret;
13860
13861         MLX5_ASSERT(matcher->matcher_object);
13862         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
13863         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13864         return ret;
13865 }
13866
13867 void
13868 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13869 {
13870         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13871         struct mlx5_flow_dv_encap_decap_resource *res =
13872                                        container_of(entry, typeof(*res), entry);
13873
13874         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13875         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13876 }
13877
13878 /**
13879  * Release an encap/decap resource.
13880  *
13881  * @param dev
13882  *   Pointer to Ethernet device.
13883  * @param encap_decap_idx
13884  *   Index of encap decap resource.
13885  *
13886  * @return
13887  *   1 while a reference on it exists, 0 when freed.
13888  */
13889 static int
13890 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13891                                      uint32_t encap_decap_idx)
13892 {
13893         struct mlx5_priv *priv = dev->data->dev_private;
13894         struct mlx5_flow_dv_encap_decap_resource *resource;
13895
13896         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13897                                   encap_decap_idx);
13898         if (!resource)
13899                 return 0;
13900         MLX5_ASSERT(resource->action);
13901         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
13902 }
13903
13904 /**
13905  * Release an jump to table action resource.
13906  *
13907  * @param dev
13908  *   Pointer to Ethernet device.
13909  * @param rix_jump
13910  *   Index to the jump action resource.
13911  *
13912  * @return
13913  *   1 while a reference on it exists, 0 when freed.
13914  */
13915 static int
13916 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13917                                   uint32_t rix_jump)
13918 {
13919         struct mlx5_priv *priv = dev->data->dev_private;
13920         struct mlx5_flow_tbl_data_entry *tbl_data;
13921
13922         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13923                                   rix_jump);
13924         if (!tbl_data)
13925                 return 0;
13926         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13927 }
13928
13929 void
13930 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13931 {
13932         struct mlx5_flow_dv_modify_hdr_resource *res =
13933                 container_of(entry, typeof(*res), entry);
13934         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13935
13936         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13937         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
13938 }
13939
13940 /**
13941  * Release a modify-header resource.
13942  *
13943  * @param dev
13944  *   Pointer to Ethernet device.
13945  * @param handle
13946  *   Pointer to mlx5_flow_handle.
13947  *
13948  * @return
13949  *   1 while a reference on it exists, 0 when freed.
13950  */
13951 static int
13952 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13953                                     struct mlx5_flow_handle *handle)
13954 {
13955         struct mlx5_priv *priv = dev->data->dev_private;
13956         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13957
13958         MLX5_ASSERT(entry->action);
13959         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13960 }
13961
13962 void
13963 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13964 {
13965         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13966         struct mlx5_flow_dv_port_id_action_resource *resource =
13967                                   container_of(entry, typeof(*resource), entry);
13968
13969         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13970         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
13971 }
13972
13973 /**
13974  * Release port ID action resource.
13975  *
13976  * @param dev
13977  *   Pointer to Ethernet device.
13978  * @param handle
13979  *   Pointer to mlx5_flow_handle.
13980  *
13981  * @return
13982  *   1 while a reference on it exists, 0 when freed.
13983  */
13984 static int
13985 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13986                                         uint32_t port_id)
13987 {
13988         struct mlx5_priv *priv = dev->data->dev_private;
13989         struct mlx5_flow_dv_port_id_action_resource *resource;
13990
13991         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13992         if (!resource)
13993                 return 0;
13994         MLX5_ASSERT(resource->action);
13995         return mlx5_list_unregister(priv->sh->port_id_action_list,
13996                                     &resource->entry);
13997 }
13998
13999 /**
14000  * Release shared RSS action resource.
14001  *
14002  * @param dev
14003  *   Pointer to Ethernet device.
14004  * @param srss
14005  *   Shared RSS action index.
14006  */
14007 static void
14008 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14009 {
14010         struct mlx5_priv *priv = dev->data->dev_private;
14011         struct mlx5_shared_action_rss *shared_rss;
14012
14013         shared_rss = mlx5_ipool_get
14014                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14015         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14016 }
14017
14018 void
14019 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14020 {
14021         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14022         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14023                         container_of(entry, typeof(*resource), entry);
14024
14025         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14026         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14027 }
14028
14029 /**
14030  * Release push vlan action resource.
14031  *
14032  * @param dev
14033  *   Pointer to Ethernet device.
14034  * @param handle
14035  *   Pointer to mlx5_flow_handle.
14036  *
14037  * @return
14038  *   1 while a reference on it exists, 0 when freed.
14039  */
14040 static int
14041 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14042                                           struct mlx5_flow_handle *handle)
14043 {
14044         struct mlx5_priv *priv = dev->data->dev_private;
14045         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14046         uint32_t idx = handle->dvh.rix_push_vlan;
14047
14048         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14049         if (!resource)
14050                 return 0;
14051         MLX5_ASSERT(resource->action);
14052         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14053                                     &resource->entry);
14054 }
14055
14056 /**
14057  * Release the fate resource.
14058  *
14059  * @param dev
14060  *   Pointer to Ethernet device.
14061  * @param handle
14062  *   Pointer to mlx5_flow_handle.
14063  */
14064 static void
14065 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14066                                struct mlx5_flow_handle *handle)
14067 {
14068         if (!handle->rix_fate)
14069                 return;
14070         switch (handle->fate_action) {
14071         case MLX5_FLOW_FATE_QUEUE:
14072                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14073                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14074                 break;
14075         case MLX5_FLOW_FATE_JUMP:
14076                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14077                 break;
14078         case MLX5_FLOW_FATE_PORT_ID:
14079                 flow_dv_port_id_action_resource_release(dev,
14080                                 handle->rix_port_id_action);
14081                 break;
14082         default:
14083                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14084                 break;
14085         }
14086         handle->rix_fate = 0;
14087 }
14088
14089 void
14090 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14091                          struct mlx5_list_entry *entry)
14092 {
14093         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14094                                                               typeof(*resource),
14095                                                               entry);
14096         struct rte_eth_dev *dev = resource->dev;
14097         struct mlx5_priv *priv = dev->data->dev_private;
14098
14099         if (resource->verbs_action)
14100                 claim_zero(mlx5_flow_os_destroy_flow_action
14101                                                       (resource->verbs_action));
14102         if (resource->normal_path_tbl)
14103                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14104                                              resource->normal_path_tbl);
14105         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14106         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14107         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14108 }
14109
14110 /**
14111  * Release an sample resource.
14112  *
14113  * @param dev
14114  *   Pointer to Ethernet device.
14115  * @param handle
14116  *   Pointer to mlx5_flow_handle.
14117  *
14118  * @return
14119  *   1 while a reference on it exists, 0 when freed.
14120  */
14121 static int
14122 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14123                                      struct mlx5_flow_handle *handle)
14124 {
14125         struct mlx5_priv *priv = dev->data->dev_private;
14126         struct mlx5_flow_dv_sample_resource *resource;
14127
14128         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14129                                   handle->dvh.rix_sample);
14130         if (!resource)
14131                 return 0;
14132         MLX5_ASSERT(resource->verbs_action);
14133         return mlx5_list_unregister(priv->sh->sample_action_list,
14134                                     &resource->entry);
14135 }
14136
14137 void
14138 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14139                              struct mlx5_list_entry *entry)
14140 {
14141         struct mlx5_flow_dv_dest_array_resource *resource =
14142                         container_of(entry, typeof(*resource), entry);
14143         struct rte_eth_dev *dev = resource->dev;
14144         struct mlx5_priv *priv = dev->data->dev_private;
14145         uint32_t i = 0;
14146
14147         MLX5_ASSERT(resource->action);
14148         if (resource->action)
14149                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14150         for (; i < resource->num_of_dest; i++)
14151                 flow_dv_sample_sub_actions_release(dev,
14152                                                    &resource->sample_idx[i]);
14153         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14154         DRV_LOG(DEBUG, "destination array resource %p: removed",
14155                 (void *)resource);
14156 }
14157
14158 /**
14159  * Release an destination array resource.
14160  *
14161  * @param dev
14162  *   Pointer to Ethernet device.
14163  * @param handle
14164  *   Pointer to mlx5_flow_handle.
14165  *
14166  * @return
14167  *   1 while a reference on it exists, 0 when freed.
14168  */
14169 static int
14170 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14171                                     struct mlx5_flow_handle *handle)
14172 {
14173         struct mlx5_priv *priv = dev->data->dev_private;
14174         struct mlx5_flow_dv_dest_array_resource *resource;
14175
14176         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14177                                   handle->dvh.rix_dest_array);
14178         if (!resource)
14179                 return 0;
14180         MLX5_ASSERT(resource->action);
14181         return mlx5_list_unregister(priv->sh->dest_array_list,
14182                                     &resource->entry);
14183 }
14184
14185 static void
14186 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14187 {
14188         struct mlx5_priv *priv = dev->data->dev_private;
14189         struct mlx5_dev_ctx_shared *sh = priv->sh;
14190         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14191                                 sh->geneve_tlv_option_resource;
14192         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14193         if (geneve_opt_resource) {
14194                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14195                                          __ATOMIC_RELAXED))) {
14196                         claim_zero(mlx5_devx_cmd_destroy
14197                                         (geneve_opt_resource->obj));
14198                         mlx5_free(sh->geneve_tlv_option_resource);
14199                         sh->geneve_tlv_option_resource = NULL;
14200                 }
14201         }
14202         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14203 }
14204
14205 /**
14206  * Remove the flow from the NIC but keeps it in memory.
14207  * Lock free, (mutex should be acquired by caller).
14208  *
14209  * @param[in] dev
14210  *   Pointer to Ethernet device.
14211  * @param[in, out] flow
14212  *   Pointer to flow structure.
14213  */
14214 static void
14215 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14216 {
14217         struct mlx5_flow_handle *dh;
14218         uint32_t handle_idx;
14219         struct mlx5_priv *priv = dev->data->dev_private;
14220
14221         if (!flow)
14222                 return;
14223         handle_idx = flow->dev_handles;
14224         while (handle_idx) {
14225                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14226                                     handle_idx);
14227                 if (!dh)
14228                         return;
14229                 if (dh->drv_flow) {
14230                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14231                         dh->drv_flow = NULL;
14232                 }
14233                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14234                         flow_dv_fate_resource_release(dev, dh);
14235                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14236                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14237                 handle_idx = dh->next.next;
14238         }
14239 }
14240
14241 /**
14242  * Remove the flow from the NIC and the memory.
14243  * Lock free, (mutex should be acquired by caller).
14244  *
14245  * @param[in] dev
14246  *   Pointer to the Ethernet device structure.
14247  * @param[in, out] flow
14248  *   Pointer to flow structure.
14249  */
14250 static void
14251 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14252 {
14253         struct mlx5_flow_handle *dev_handle;
14254         struct mlx5_priv *priv = dev->data->dev_private;
14255         struct mlx5_flow_meter_info *fm = NULL;
14256         uint32_t srss = 0;
14257
14258         if (!flow)
14259                 return;
14260         flow_dv_remove(dev, flow);
14261         if (flow->counter) {
14262                 flow_dv_counter_free(dev, flow->counter);
14263                 flow->counter = 0;
14264         }
14265         if (flow->meter) {
14266                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14267                 if (fm)
14268                         mlx5_flow_meter_detach(priv, fm);
14269                 flow->meter = 0;
14270         }
14271         /* Keep the current age handling by default. */
14272         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14273                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14274         else if (flow->age)
14275                 flow_dv_aso_age_release(dev, flow->age);
14276         if (flow->geneve_tlv_option) {
14277                 flow_dv_geneve_tlv_option_resource_release(dev);
14278                 flow->geneve_tlv_option = 0;
14279         }
14280         while (flow->dev_handles) {
14281                 uint32_t tmp_idx = flow->dev_handles;
14282
14283                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14284                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14285                 if (!dev_handle)
14286                         return;
14287                 flow->dev_handles = dev_handle->next.next;
14288                 if (dev_handle->dvh.matcher)
14289                         flow_dv_matcher_release(dev, dev_handle);
14290                 if (dev_handle->dvh.rix_sample)
14291                         flow_dv_sample_resource_release(dev, dev_handle);
14292                 if (dev_handle->dvh.rix_dest_array)
14293                         flow_dv_dest_array_resource_release(dev, dev_handle);
14294                 if (dev_handle->dvh.rix_encap_decap)
14295                         flow_dv_encap_decap_resource_release(dev,
14296                                 dev_handle->dvh.rix_encap_decap);
14297                 if (dev_handle->dvh.modify_hdr)
14298                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14299                 if (dev_handle->dvh.rix_push_vlan)
14300                         flow_dv_push_vlan_action_resource_release(dev,
14301                                                                   dev_handle);
14302                 if (dev_handle->dvh.rix_tag)
14303                         flow_dv_tag_release(dev,
14304                                             dev_handle->dvh.rix_tag);
14305                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14306                         flow_dv_fate_resource_release(dev, dev_handle);
14307                 else if (!srss)
14308                         srss = dev_handle->rix_srss;
14309                 if (fm && dev_handle->is_meter_flow_id &&
14310                     dev_handle->split_flow_id)
14311                         mlx5_ipool_free(fm->flow_ipool,
14312                                         dev_handle->split_flow_id);
14313                 else if (dev_handle->split_flow_id &&
14314                     !dev_handle->is_meter_flow_id)
14315                         mlx5_ipool_free(priv->sh->ipool
14316                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14317                                         dev_handle->split_flow_id);
14318                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14319                            tmp_idx);
14320         }
14321         if (srss)
14322                 flow_dv_shared_rss_action_release(dev, srss);
14323 }
14324
14325 /**
14326  * Release array of hash RX queue objects.
14327  * Helper function.
14328  *
14329  * @param[in] dev
14330  *   Pointer to the Ethernet device structure.
14331  * @param[in, out] hrxqs
14332  *   Array of hash RX queue objects.
14333  *
14334  * @return
14335  *   Total number of references to hash RX queue objects in *hrxqs* array
14336  *   after this operation.
14337  */
14338 static int
14339 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14340                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14341 {
14342         size_t i;
14343         int remaining = 0;
14344
14345         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14346                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14347
14348                 if (!ret)
14349                         (*hrxqs)[i] = 0;
14350                 remaining += ret;
14351         }
14352         return remaining;
14353 }
14354
14355 /**
14356  * Release all hash RX queue objects representing shared RSS action.
14357  *
14358  * @param[in] dev
14359  *   Pointer to the Ethernet device structure.
14360  * @param[in, out] action
14361  *   Shared RSS action to remove hash RX queue objects from.
14362  *
14363  * @return
14364  *   Total number of references to hash RX queue objects stored in *action*
14365  *   after this operation.
14366  *   Expected to be 0 if no external references held.
14367  */
14368 static int
14369 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14370                                  struct mlx5_shared_action_rss *shared_rss)
14371 {
14372         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14373 }
14374
14375 /**
14376  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14377  * user input.
14378  *
14379  * Only one hash value is available for one L3+L4 combination:
14380  * for example:
14381  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14382  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14383  * same slot in mlx5_rss_hash_fields.
14384  *
14385  * @param[in] rss
14386  *   Pointer to the shared action RSS conf.
14387  * @param[in, out] hash_field
14388  *   hash_field variable needed to be adjusted.
14389  *
14390  * @return
14391  *   void
14392  */
14393 static void
14394 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14395                                      uint64_t *hash_field)
14396 {
14397         uint64_t rss_types = rss->origin.types;
14398
14399         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14400         case MLX5_RSS_HASH_IPV4:
14401                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14402                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14403                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14404                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14405                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14406                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14407                         else
14408                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14409                 }
14410                 return;
14411         case MLX5_RSS_HASH_IPV6:
14412                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14413                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14414                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14415                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14416                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14417                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14418                         else
14419                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14420                 }
14421                 return;
14422         case MLX5_RSS_HASH_IPV4_UDP:
14423                 /* fall-through. */
14424         case MLX5_RSS_HASH_IPV6_UDP:
14425                 if (rss_types & RTE_ETH_RSS_UDP) {
14426                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14427                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14428                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14429                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14430                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14431                         else
14432                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14433                 }
14434                 return;
14435         case MLX5_RSS_HASH_IPV4_TCP:
14436                 /* fall-through. */
14437         case MLX5_RSS_HASH_IPV6_TCP:
14438                 if (rss_types & RTE_ETH_RSS_TCP) {
14439                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14440                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14441                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14442                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14443                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14444                         else
14445                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14446                 }
14447                 return;
14448         default:
14449                 return;
14450         }
14451 }
14452
14453 /**
14454  * Setup shared RSS action.
14455  * Prepare set of hash RX queue objects sufficient to handle all valid
14456  * hash_fields combinations (see enum ibv_rx_hash_fields).
14457  *
14458  * @param[in] dev
14459  *   Pointer to the Ethernet device structure.
14460  * @param[in] action_idx
14461  *   Shared RSS action ipool index.
14462  * @param[in, out] action
14463  *   Partially initialized shared RSS action.
14464  * @param[out] error
14465  *   Perform verbose error reporting if not NULL. Initialized in case of
14466  *   error only.
14467  *
14468  * @return
14469  *   0 on success, otherwise negative errno value.
14470  */
14471 static int
14472 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14473                            uint32_t action_idx,
14474                            struct mlx5_shared_action_rss *shared_rss,
14475                            struct rte_flow_error *error)
14476 {
14477         struct mlx5_flow_rss_desc rss_desc = { 0 };
14478         size_t i;
14479         int err;
14480
14481         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14482                 return rte_flow_error_set(error, rte_errno,
14483                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14484                                           "cannot setup indirection table");
14485         }
14486         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14487         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14488         rss_desc.const_q = shared_rss->origin.queue;
14489         rss_desc.queue_num = shared_rss->origin.queue_num;
14490         /* Set non-zero value to indicate a shared RSS. */
14491         rss_desc.shared_rss = action_idx;
14492         rss_desc.ind_tbl = shared_rss->ind_tbl;
14493         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14494                 uint32_t hrxq_idx;
14495                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14496                 int tunnel = 0;
14497
14498                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14499                 if (shared_rss->origin.level > 1) {
14500                         hash_fields |= IBV_RX_HASH_INNER;
14501                         tunnel = 1;
14502                 }
14503                 rss_desc.tunnel = tunnel;
14504                 rss_desc.hash_fields = hash_fields;
14505                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14506                 if (!hrxq_idx) {
14507                         rte_flow_error_set
14508                                 (error, rte_errno,
14509                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14510                                  "cannot get hash queue");
14511                         goto error_hrxq_new;
14512                 }
14513                 err = __flow_dv_action_rss_hrxq_set
14514                         (shared_rss, hash_fields, hrxq_idx);
14515                 MLX5_ASSERT(!err);
14516         }
14517         return 0;
14518 error_hrxq_new:
14519         err = rte_errno;
14520         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14521         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14522                 shared_rss->ind_tbl = NULL;
14523         rte_errno = err;
14524         return -rte_errno;
14525 }
14526
14527 /**
14528  * Create shared RSS action.
14529  *
14530  * @param[in] dev
14531  *   Pointer to the Ethernet device structure.
14532  * @param[in] conf
14533  *   Shared action configuration.
14534  * @param[in] rss
14535  *   RSS action specification used to create shared action.
14536  * @param[out] error
14537  *   Perform verbose error reporting if not NULL. Initialized in case of
14538  *   error only.
14539  *
14540  * @return
14541  *   A valid shared action ID in case of success, 0 otherwise and
14542  *   rte_errno is set.
14543  */
14544 static uint32_t
14545 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14546                             const struct rte_flow_indir_action_conf *conf,
14547                             const struct rte_flow_action_rss *rss,
14548                             struct rte_flow_error *error)
14549 {
14550         struct mlx5_priv *priv = dev->data->dev_private;
14551         struct mlx5_shared_action_rss *shared_rss = NULL;
14552         void *queue = NULL;
14553         struct rte_flow_action_rss *origin;
14554         const uint8_t *rss_key;
14555         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14556         uint32_t idx;
14557
14558         RTE_SET_USED(conf);
14559         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14560                             0, SOCKET_ID_ANY);
14561         shared_rss = mlx5_ipool_zmalloc
14562                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14563         if (!shared_rss || !queue) {
14564                 rte_flow_error_set(error, ENOMEM,
14565                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14566                                    "cannot allocate resource memory");
14567                 goto error_rss_init;
14568         }
14569         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14570                 rte_flow_error_set(error, E2BIG,
14571                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14572                                    "rss action number out of range");
14573                 goto error_rss_init;
14574         }
14575         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14576                                           sizeof(*shared_rss->ind_tbl),
14577                                           0, SOCKET_ID_ANY);
14578         if (!shared_rss->ind_tbl) {
14579                 rte_flow_error_set(error, ENOMEM,
14580                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14581                                    "cannot allocate resource memory");
14582                 goto error_rss_init;
14583         }
14584         memcpy(queue, rss->queue, queue_size);
14585         shared_rss->ind_tbl->queues = queue;
14586         shared_rss->ind_tbl->queues_n = rss->queue_num;
14587         origin = &shared_rss->origin;
14588         origin->func = rss->func;
14589         origin->level = rss->level;
14590         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14591         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14592         /* NULL RSS key indicates default RSS key. */
14593         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14594         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14595         origin->key = &shared_rss->key[0];
14596         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14597         origin->queue = queue;
14598         origin->queue_num = rss->queue_num;
14599         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14600                 goto error_rss_init;
14601         rte_spinlock_init(&shared_rss->action_rss_sl);
14602         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14603         rte_spinlock_lock(&priv->shared_act_sl);
14604         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14605                      &priv->rss_shared_actions, idx, shared_rss, next);
14606         rte_spinlock_unlock(&priv->shared_act_sl);
14607         return idx;
14608 error_rss_init:
14609         if (shared_rss) {
14610                 if (shared_rss->ind_tbl)
14611                         mlx5_free(shared_rss->ind_tbl);
14612                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14613                                 idx);
14614         }
14615         if (queue)
14616                 mlx5_free(queue);
14617         return 0;
14618 }
14619
14620 /**
14621  * Destroy the shared RSS action.
14622  * Release related hash RX queue objects.
14623  *
14624  * @param[in] dev
14625  *   Pointer to the Ethernet device structure.
14626  * @param[in] idx
14627  *   The shared RSS action object ID to be removed.
14628  * @param[out] error
14629  *   Perform verbose error reporting if not NULL. Initialized in case of
14630  *   error only.
14631  *
14632  * @return
14633  *   0 on success, otherwise negative errno value.
14634  */
14635 static int
14636 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14637                              struct rte_flow_error *error)
14638 {
14639         struct mlx5_priv *priv = dev->data->dev_private;
14640         struct mlx5_shared_action_rss *shared_rss =
14641             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14642         uint32_t old_refcnt = 1;
14643         int remaining;
14644         uint16_t *queue = NULL;
14645
14646         if (!shared_rss)
14647                 return rte_flow_error_set(error, EINVAL,
14648                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14649                                           "invalid shared action");
14650         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14651                                          0, 0, __ATOMIC_ACQUIRE,
14652                                          __ATOMIC_RELAXED))
14653                 return rte_flow_error_set(error, EBUSY,
14654                                           RTE_FLOW_ERROR_TYPE_ACTION,
14655                                           NULL,
14656                                           "shared rss has references");
14657         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14658         if (remaining)
14659                 return rte_flow_error_set(error, EBUSY,
14660                                           RTE_FLOW_ERROR_TYPE_ACTION,
14661                                           NULL,
14662                                           "shared rss hrxq has references");
14663         queue = shared_rss->ind_tbl->queues;
14664         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14665         if (remaining)
14666                 return rte_flow_error_set(error, EBUSY,
14667                                           RTE_FLOW_ERROR_TYPE_ACTION,
14668                                           NULL,
14669                                           "shared rss indirection table has"
14670                                           " references");
14671         mlx5_free(queue);
14672         rte_spinlock_lock(&priv->shared_act_sl);
14673         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14674                      &priv->rss_shared_actions, idx, shared_rss, next);
14675         rte_spinlock_unlock(&priv->shared_act_sl);
14676         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14677                         idx);
14678         return 0;
14679 }
14680
14681 /**
14682  * Create indirect action, lock free,
14683  * (mutex should be acquired by caller).
14684  * Dispatcher for action type specific call.
14685  *
14686  * @param[in] dev
14687  *   Pointer to the Ethernet device structure.
14688  * @param[in] conf
14689  *   Shared action configuration.
14690  * @param[in] action
14691  *   Action specification used to create indirect action.
14692  * @param[out] error
14693  *   Perform verbose error reporting if not NULL. Initialized in case of
14694  *   error only.
14695  *
14696  * @return
14697  *   A valid shared action handle in case of success, NULL otherwise and
14698  *   rte_errno is set.
14699  */
14700 static struct rte_flow_action_handle *
14701 flow_dv_action_create(struct rte_eth_dev *dev,
14702                       const struct rte_flow_indir_action_conf *conf,
14703                       const struct rte_flow_action *action,
14704                       struct rte_flow_error *err)
14705 {
14706         struct mlx5_priv *priv = dev->data->dev_private;
14707         uint32_t age_idx = 0;
14708         uint32_t idx = 0;
14709         uint32_t ret = 0;
14710
14711         switch (action->type) {
14712         case RTE_FLOW_ACTION_TYPE_RSS:
14713                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14714                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14715                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14716                 break;
14717         case RTE_FLOW_ACTION_TYPE_AGE:
14718                 age_idx = flow_dv_aso_age_alloc(dev, err);
14719                 if (!age_idx) {
14720                         ret = -rte_errno;
14721                         break;
14722                 }
14723                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14724                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14725                 flow_dv_aso_age_params_init(dev, age_idx,
14726                                         ((const struct rte_flow_action_age *)
14727                                                 action->conf)->context ?
14728                                         ((const struct rte_flow_action_age *)
14729                                                 action->conf)->context :
14730                                         (void *)(uintptr_t)idx,
14731                                         ((const struct rte_flow_action_age *)
14732                                                 action->conf)->timeout);
14733                 ret = age_idx;
14734                 break;
14735         case RTE_FLOW_ACTION_TYPE_COUNT:
14736                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14737                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14738                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14739                 break;
14740         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14741                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14742                                                          err);
14743                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14744                 break;
14745         default:
14746                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14747                                    NULL, "action type not supported");
14748                 break;
14749         }
14750         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14751 }
14752
14753 /**
14754  * Destroy the indirect action.
14755  * Release action related resources on the NIC and the memory.
14756  * Lock free, (mutex should be acquired by caller).
14757  * Dispatcher for action type specific call.
14758  *
14759  * @param[in] dev
14760  *   Pointer to the Ethernet device structure.
14761  * @param[in] handle
14762  *   The indirect action object handle to be removed.
14763  * @param[out] error
14764  *   Perform verbose error reporting if not NULL. Initialized in case of
14765  *   error only.
14766  *
14767  * @return
14768  *   0 on success, otherwise negative errno value.
14769  */
14770 static int
14771 flow_dv_action_destroy(struct rte_eth_dev *dev,
14772                        struct rte_flow_action_handle *handle,
14773                        struct rte_flow_error *error)
14774 {
14775         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14776         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14777         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14778         struct mlx5_flow_counter *cnt;
14779         uint32_t no_flow_refcnt = 1;
14780         int ret;
14781
14782         switch (type) {
14783         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14784                 return __flow_dv_action_rss_release(dev, idx, error);
14785         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14786                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14787                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14788                                                  &no_flow_refcnt, 1, false,
14789                                                  __ATOMIC_ACQUIRE,
14790                                                  __ATOMIC_RELAXED))
14791                         return rte_flow_error_set(error, EBUSY,
14792                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14793                                                   NULL,
14794                                                   "Indirect count action has references");
14795                 flow_dv_counter_free(dev, idx);
14796                 return 0;
14797         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14798                 ret = flow_dv_aso_age_release(dev, idx);
14799                 if (ret)
14800                         /*
14801                          * In this case, the last flow has a reference will
14802                          * actually release the age action.
14803                          */
14804                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14805                                 " released with references %d.", idx, ret);
14806                 return 0;
14807         case MLX5_INDIRECT_ACTION_TYPE_CT:
14808                 ret = flow_dv_aso_ct_release(dev, idx, error);
14809                 if (ret < 0)
14810                         return ret;
14811                 if (ret > 0)
14812                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14813                                 "has references %d.", idx, ret);
14814                 return 0;
14815         default:
14816                 return rte_flow_error_set(error, ENOTSUP,
14817                                           RTE_FLOW_ERROR_TYPE_ACTION,
14818                                           NULL,
14819                                           "action type not supported");
14820         }
14821 }
14822
14823 /**
14824  * Updates in place shared RSS action configuration.
14825  *
14826  * @param[in] dev
14827  *   Pointer to the Ethernet device structure.
14828  * @param[in] idx
14829  *   The shared RSS action object ID to be updated.
14830  * @param[in] action_conf
14831  *   RSS action specification used to modify *shared_rss*.
14832  * @param[out] error
14833  *   Perform verbose error reporting if not NULL. Initialized in case of
14834  *   error only.
14835  *
14836  * @return
14837  *   0 on success, otherwise negative errno value.
14838  * @note: currently only support update of RSS queues.
14839  */
14840 static int
14841 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14842                             const struct rte_flow_action_rss *action_conf,
14843                             struct rte_flow_error *error)
14844 {
14845         struct mlx5_priv *priv = dev->data->dev_private;
14846         struct mlx5_shared_action_rss *shared_rss =
14847             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14848         int ret = 0;
14849         void *queue = NULL;
14850         uint16_t *queue_old = NULL;
14851         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14852
14853         if (!shared_rss)
14854                 return rte_flow_error_set(error, EINVAL,
14855                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14856                                           "invalid shared action to update");
14857         if (priv->obj_ops.ind_table_modify == NULL)
14858                 return rte_flow_error_set(error, ENOTSUP,
14859                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14860                                           "cannot modify indirection table");
14861         queue = mlx5_malloc(MLX5_MEM_ZERO,
14862                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14863                             0, SOCKET_ID_ANY);
14864         if (!queue)
14865                 return rte_flow_error_set(error, ENOMEM,
14866                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14867                                           NULL,
14868                                           "cannot allocate resource memory");
14869         memcpy(queue, action_conf->queue, queue_size);
14870         MLX5_ASSERT(shared_rss->ind_tbl);
14871         rte_spinlock_lock(&shared_rss->action_rss_sl);
14872         queue_old = shared_rss->ind_tbl->queues;
14873         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14874                                         queue, action_conf->queue_num, true);
14875         if (ret) {
14876                 mlx5_free(queue);
14877                 ret = rte_flow_error_set(error, rte_errno,
14878                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14879                                           "cannot update indirection table");
14880         } else {
14881                 mlx5_free(queue_old);
14882                 shared_rss->origin.queue = queue;
14883                 shared_rss->origin.queue_num = action_conf->queue_num;
14884         }
14885         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14886         return ret;
14887 }
14888
14889 /*
14890  * Updates in place conntrack context or direction.
14891  * Context update should be synchronized.
14892  *
14893  * @param[in] dev
14894  *   Pointer to the Ethernet device structure.
14895  * @param[in] idx
14896  *   The conntrack object ID to be updated.
14897  * @param[in] update
14898  *   Pointer to the structure of information to update.
14899  * @param[out] error
14900  *   Perform verbose error reporting if not NULL. Initialized in case of
14901  *   error only.
14902  *
14903  * @return
14904  *   0 on success, otherwise negative errno value.
14905  */
14906 static int
14907 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14908                            const struct rte_flow_modify_conntrack *update,
14909                            struct rte_flow_error *error)
14910 {
14911         struct mlx5_priv *priv = dev->data->dev_private;
14912         struct mlx5_aso_ct_action *ct;
14913         const struct rte_flow_action_conntrack *new_prf;
14914         int ret = 0;
14915         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14916         uint32_t dev_idx;
14917
14918         if (PORT_ID(priv) != owner)
14919                 return rte_flow_error_set(error, EACCES,
14920                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14921                                           NULL,
14922                                           "CT object owned by another port");
14923         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14924         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14925         if (!ct->refcnt)
14926                 return rte_flow_error_set(error, ENOMEM,
14927                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14928                                           NULL,
14929                                           "CT object is inactive");
14930         new_prf = &update->new_ct;
14931         if (update->direction)
14932                 ct->is_original = !!new_prf->is_original_dir;
14933         if (update->state) {
14934                 /* Only validate the profile when it needs to be updated. */
14935                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14936                 if (ret)
14937                         return ret;
14938                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14939                 if (ret)
14940                         return rte_flow_error_set(error, EIO,
14941                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14942                                         NULL,
14943                                         "Failed to send CT context update WQE");
14944                 /* Block until ready or a failure. */
14945                 ret = mlx5_aso_ct_available(priv->sh, ct);
14946                 if (ret)
14947                         rte_flow_error_set(error, rte_errno,
14948                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14949                                            NULL,
14950                                            "Timeout to get the CT update");
14951         }
14952         return ret;
14953 }
14954
14955 /**
14956  * Updates in place shared action configuration, lock free,
14957  * (mutex should be acquired by caller).
14958  *
14959  * @param[in] dev
14960  *   Pointer to the Ethernet device structure.
14961  * @param[in] handle
14962  *   The indirect action object handle to be updated.
14963  * @param[in] update
14964  *   Action specification used to modify the action pointed by *handle*.
14965  *   *update* could be of same type with the action pointed by the *handle*
14966  *   handle argument, or some other structures like a wrapper, depending on
14967  *   the indirect action type.
14968  * @param[out] error
14969  *   Perform verbose error reporting if not NULL. Initialized in case of
14970  *   error only.
14971  *
14972  * @return
14973  *   0 on success, otherwise negative errno value.
14974  */
14975 static int
14976 flow_dv_action_update(struct rte_eth_dev *dev,
14977                         struct rte_flow_action_handle *handle,
14978                         const void *update,
14979                         struct rte_flow_error *err)
14980 {
14981         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14982         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14983         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14984         const void *action_conf;
14985
14986         switch (type) {
14987         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14988                 action_conf = ((const struct rte_flow_action *)update)->conf;
14989                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14990         case MLX5_INDIRECT_ACTION_TYPE_CT:
14991                 return __flow_dv_action_ct_update(dev, idx, update, err);
14992         default:
14993                 return rte_flow_error_set(err, ENOTSUP,
14994                                           RTE_FLOW_ERROR_TYPE_ACTION,
14995                                           NULL,
14996                                           "action type update not supported");
14997         }
14998 }
14999
15000 /**
15001  * Destroy the meter sub policy table rules.
15002  * Lock free, (mutex should be acquired by caller).
15003  *
15004  * @param[in] dev
15005  *   Pointer to Ethernet device.
15006  * @param[in] sub_policy
15007  *   Pointer to meter sub policy table.
15008  */
15009 static void
15010 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15011                              struct mlx5_flow_meter_sub_policy *sub_policy)
15012 {
15013         struct mlx5_priv *priv = dev->data->dev_private;
15014         struct mlx5_flow_tbl_data_entry *tbl;
15015         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15016         struct mlx5_flow_meter_info *next_fm;
15017         struct mlx5_sub_policy_color_rule *color_rule;
15018         void *tmp;
15019         uint32_t i;
15020
15021         for (i = 0; i < RTE_COLORS; i++) {
15022                 next_fm = NULL;
15023                 if (i == RTE_COLOR_GREEN && policy &&
15024                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15025                         next_fm = mlx5_flow_meter_find(priv,
15026                                         policy->act_cnt[i].next_mtr_id, NULL);
15027                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15028                                    next_port, tmp) {
15029                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15030                         tbl = container_of(color_rule->matcher->tbl,
15031                                            typeof(*tbl), tbl);
15032                         mlx5_list_unregister(tbl->matchers,
15033                                              &color_rule->matcher->entry);
15034                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15035                                      color_rule, next_port);
15036                         mlx5_free(color_rule);
15037                         if (next_fm)
15038                                 mlx5_flow_meter_detach(priv, next_fm);
15039                 }
15040         }
15041         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15042                 if (sub_policy->rix_hrxq[i]) {
15043                         if (policy && !policy->is_hierarchy)
15044                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15045                         sub_policy->rix_hrxq[i] = 0;
15046                 }
15047                 if (sub_policy->jump_tbl[i]) {
15048                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15049                                                      sub_policy->jump_tbl[i]);
15050                         sub_policy->jump_tbl[i] = NULL;
15051                 }
15052         }
15053         if (sub_policy->tbl_rsc) {
15054                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15055                                              sub_policy->tbl_rsc);
15056                 sub_policy->tbl_rsc = NULL;
15057         }
15058 }
15059
15060 /**
15061  * Destroy policy rules, lock free,
15062  * (mutex should be acquired by caller).
15063  * Dispatcher for action type specific call.
15064  *
15065  * @param[in] dev
15066  *   Pointer to the Ethernet device structure.
15067  * @param[in] mtr_policy
15068  *   Meter policy struct.
15069  */
15070 static void
15071 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15072                              struct mlx5_flow_meter_policy *mtr_policy)
15073 {
15074         uint32_t i, j;
15075         struct mlx5_flow_meter_sub_policy *sub_policy;
15076         uint16_t sub_policy_num;
15077
15078         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15079                 sub_policy_num = (mtr_policy->sub_policy_num >>
15080                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15081                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15082                 for (j = 0; j < sub_policy_num; j++) {
15083                         sub_policy = mtr_policy->sub_policys[i][j];
15084                         if (sub_policy)
15085                                 __flow_dv_destroy_sub_policy_rules(dev,
15086                                                                    sub_policy);
15087                 }
15088         }
15089 }
15090
15091 /**
15092  * Destroy policy action, lock free,
15093  * (mutex should be acquired by caller).
15094  * Dispatcher for action type specific call.
15095  *
15096  * @param[in] dev
15097  *   Pointer to the Ethernet device structure.
15098  * @param[in] mtr_policy
15099  *   Meter policy struct.
15100  */
15101 static void
15102 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15103                       struct mlx5_flow_meter_policy *mtr_policy)
15104 {
15105         struct rte_flow_action *rss_action;
15106         struct mlx5_flow_handle dev_handle;
15107         uint32_t i, j;
15108
15109         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15110                 if (mtr_policy->act_cnt[i].rix_mark) {
15111                         flow_dv_tag_release(dev,
15112                                 mtr_policy->act_cnt[i].rix_mark);
15113                         mtr_policy->act_cnt[i].rix_mark = 0;
15114                 }
15115                 if (mtr_policy->act_cnt[i].modify_hdr) {
15116                         dev_handle.dvh.modify_hdr =
15117                                 mtr_policy->act_cnt[i].modify_hdr;
15118                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15119                 }
15120                 switch (mtr_policy->act_cnt[i].fate_action) {
15121                 case MLX5_FLOW_FATE_SHARED_RSS:
15122                         rss_action = mtr_policy->act_cnt[i].rss;
15123                         mlx5_free(rss_action);
15124                         break;
15125                 case MLX5_FLOW_FATE_PORT_ID:
15126                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15127                                 flow_dv_port_id_action_resource_release(dev,
15128                                 mtr_policy->act_cnt[i].rix_port_id_action);
15129                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15130                         }
15131                         break;
15132                 case MLX5_FLOW_FATE_DROP:
15133                 case MLX5_FLOW_FATE_JUMP:
15134                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15135                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15136                                                 NULL;
15137                         break;
15138                 default:
15139                         /*Queue action do nothing*/
15140                         break;
15141                 }
15142         }
15143         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15144                 mtr_policy->dr_drop_action[j] = NULL;
15145 }
15146
15147 /**
15148  * Create policy action per domain, lock free,
15149  * (mutex should be acquired by caller).
15150  * Dispatcher for action type specific call.
15151  *
15152  * @param[in] dev
15153  *   Pointer to the Ethernet device structure.
15154  * @param[in] mtr_policy
15155  *   Meter policy struct.
15156  * @param[in] action
15157  *   Action specification used to create meter actions.
15158  * @param[out] error
15159  *   Perform verbose error reporting if not NULL. Initialized in case of
15160  *   error only.
15161  *
15162  * @return
15163  *   0 on success, otherwise negative errno value.
15164  */
15165 static int
15166 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15167                         struct mlx5_flow_meter_policy *mtr_policy,
15168                         const struct rte_flow_action *actions[RTE_COLORS],
15169                         enum mlx5_meter_domain domain,
15170                         struct rte_mtr_error *error)
15171 {
15172         struct mlx5_priv *priv = dev->data->dev_private;
15173         struct rte_flow_error flow_err;
15174         const struct rte_flow_action *act;
15175         uint64_t action_flags;
15176         struct mlx5_flow_handle dh;
15177         struct mlx5_flow dev_flow;
15178         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15179         int i, ret;
15180         uint8_t egress, transfer;
15181         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15182         union {
15183                 struct mlx5_flow_dv_modify_hdr_resource res;
15184                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15185                             sizeof(struct mlx5_modification_cmd) *
15186                             (MLX5_MAX_MODIFY_NUM + 1)];
15187         } mhdr_dummy;
15188         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15189
15190         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15191         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15192         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15193         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15194         memset(&port_id_action, 0,
15195                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15196         memset(mhdr_res, 0, sizeof(*mhdr_res));
15197         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15198                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15199                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15200         dev_flow.handle = &dh;
15201         dev_flow.dv.port_id_action = &port_id_action;
15202         dev_flow.external = true;
15203         for (i = 0; i < RTE_COLORS; i++) {
15204                 if (i < MLX5_MTR_RTE_COLORS)
15205                         act_cnt = &mtr_policy->act_cnt[i];
15206                 /* Skip the color policy actions creation. */
15207                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15208                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15209                         continue;
15210                 action_flags = 0;
15211                 for (act = actions[i];
15212                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15213                         switch (act->type) {
15214                         case RTE_FLOW_ACTION_TYPE_MARK:
15215                         {
15216                                 uint32_t tag_be = mlx5_flow_mark_set
15217                                         (((const struct rte_flow_action_mark *)
15218                                         (act->conf))->id);
15219
15220                                 if (i >= MLX5_MTR_RTE_COLORS)
15221                                         return -rte_mtr_error_set(error,
15222                                           ENOTSUP,
15223                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15224                                           NULL,
15225                                           "cannot create policy "
15226                                           "mark action for this color");
15227                                 dev_flow.handle->mark = 1;
15228                                 if (flow_dv_tag_resource_register(dev, tag_be,
15229                                                   &dev_flow, &flow_err))
15230                                         return -rte_mtr_error_set(error,
15231                                         ENOTSUP,
15232                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15233                                         NULL,
15234                                         "cannot setup policy mark action");
15235                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15236                                 act_cnt->rix_mark =
15237                                         dev_flow.handle->dvh.rix_tag;
15238                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15239                                 break;
15240                         }
15241                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15242                                 if (i >= MLX5_MTR_RTE_COLORS)
15243                                         return -rte_mtr_error_set(error,
15244                                           ENOTSUP,
15245                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15246                                           NULL,
15247                                           "cannot create policy "
15248                                           "set tag action for this color");
15249                                 if (flow_dv_convert_action_set_tag
15250                                 (dev, mhdr_res,
15251                                 (const struct rte_flow_action_set_tag *)
15252                                 act->conf,  &flow_err))
15253                                         return -rte_mtr_error_set(error,
15254                                         ENOTSUP,
15255                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15256                                         NULL, "cannot convert policy "
15257                                         "set tag action");
15258                                 if (!mhdr_res->actions_num)
15259                                         return -rte_mtr_error_set(error,
15260                                         ENOTSUP,
15261                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15262                                         NULL, "cannot find policy "
15263                                         "set tag action");
15264                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15265                                 break;
15266                         case RTE_FLOW_ACTION_TYPE_DROP:
15267                         {
15268                                 struct mlx5_flow_mtr_mng *mtrmng =
15269                                                 priv->sh->mtrmng;
15270                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15271
15272                                 /*
15273                                  * Create the drop table with
15274                                  * METER DROP level.
15275                                  */
15276                                 if (!mtrmng->drop_tbl[domain]) {
15277                                         mtrmng->drop_tbl[domain] =
15278                                         flow_dv_tbl_resource_get(dev,
15279                                         MLX5_FLOW_TABLE_LEVEL_METER,
15280                                         egress, transfer, false, NULL, 0,
15281                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15282                                         if (!mtrmng->drop_tbl[domain])
15283                                                 return -rte_mtr_error_set
15284                                         (error, ENOTSUP,
15285                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15286                                         NULL,
15287                                         "Failed to create meter drop table");
15288                                 }
15289                                 tbl_data = container_of
15290                                 (mtrmng->drop_tbl[domain],
15291                                 struct mlx5_flow_tbl_data_entry, tbl);
15292                                 if (i < MLX5_MTR_RTE_COLORS) {
15293                                         act_cnt->dr_jump_action[domain] =
15294                                                 tbl_data->jump.action;
15295                                         act_cnt->fate_action =
15296                                                 MLX5_FLOW_FATE_DROP;
15297                                 }
15298                                 if (i == RTE_COLOR_RED)
15299                                         mtr_policy->dr_drop_action[domain] =
15300                                                 tbl_data->jump.action;
15301                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15302                                 break;
15303                         }
15304                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15305                         {
15306                                 if (i >= MLX5_MTR_RTE_COLORS)
15307                                         return -rte_mtr_error_set(error,
15308                                         ENOTSUP,
15309                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15310                                         NULL, "cannot create policy "
15311                                         "fate queue for this color");
15312                                 act_cnt->queue =
15313                                 ((const struct rte_flow_action_queue *)
15314                                         (act->conf))->index;
15315                                 act_cnt->fate_action =
15316                                         MLX5_FLOW_FATE_QUEUE;
15317                                 dev_flow.handle->fate_action =
15318                                         MLX5_FLOW_FATE_QUEUE;
15319                                 mtr_policy->is_queue = 1;
15320                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15321                                 break;
15322                         }
15323                         case RTE_FLOW_ACTION_TYPE_RSS:
15324                         {
15325                                 int rss_size;
15326
15327                                 if (i >= MLX5_MTR_RTE_COLORS)
15328                                         return -rte_mtr_error_set(error,
15329                                           ENOTSUP,
15330                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15331                                           NULL,
15332                                           "cannot create policy "
15333                                           "rss action for this color");
15334                                 /*
15335                                  * Save RSS conf into policy struct
15336                                  * for translate stage.
15337                                  */
15338                                 rss_size = (int)rte_flow_conv
15339                                         (RTE_FLOW_CONV_OP_ACTION,
15340                                         NULL, 0, act, &flow_err);
15341                                 if (rss_size <= 0)
15342                                         return -rte_mtr_error_set(error,
15343                                           ENOTSUP,
15344                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15345                                           NULL, "Get the wrong "
15346                                           "rss action struct size");
15347                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15348                                                 rss_size, 0, SOCKET_ID_ANY);
15349                                 if (!act_cnt->rss)
15350                                         return -rte_mtr_error_set(error,
15351                                           ENOTSUP,
15352                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15353                                           NULL,
15354                                           "Fail to malloc rss action memory");
15355                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15356                                         act_cnt->rss, rss_size,
15357                                         act, &flow_err);
15358                                 if (ret < 0)
15359                                         return -rte_mtr_error_set(error,
15360                                           ENOTSUP,
15361                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15362                                           NULL, "Fail to save "
15363                                           "rss action into policy struct");
15364                                 act_cnt->fate_action =
15365                                         MLX5_FLOW_FATE_SHARED_RSS;
15366                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15367                                 break;
15368                         }
15369                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15370                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15371                         {
15372                                 struct mlx5_flow_dv_port_id_action_resource
15373                                         port_id_resource;
15374                                 uint32_t port_id = 0;
15375
15376                                 if (i >= MLX5_MTR_RTE_COLORS)
15377                                         return -rte_mtr_error_set(error,
15378                                         ENOTSUP,
15379                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15380                                         NULL, "cannot create policy "
15381                                         "port action for this color");
15382                                 memset(&port_id_resource, 0,
15383                                         sizeof(port_id_resource));
15384                                 if (flow_dv_translate_action_port_id(dev, act,
15385                                                 &port_id, &flow_err))
15386                                         return -rte_mtr_error_set(error,
15387                                         ENOTSUP,
15388                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15389                                         NULL, "cannot translate "
15390                                         "policy port action");
15391                                 port_id_resource.port_id = port_id;
15392                                 if (flow_dv_port_id_action_resource_register
15393                                         (dev, &port_id_resource,
15394                                         &dev_flow, &flow_err))
15395                                         return -rte_mtr_error_set(error,
15396                                         ENOTSUP,
15397                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15398                                         NULL, "cannot setup "
15399                                         "policy port action");
15400                                 act_cnt->rix_port_id_action =
15401                                         dev_flow.handle->rix_port_id_action;
15402                                 act_cnt->fate_action =
15403                                         MLX5_FLOW_FATE_PORT_ID;
15404                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15405                                 break;
15406                         }
15407                         case RTE_FLOW_ACTION_TYPE_JUMP:
15408                         {
15409                                 uint32_t jump_group = 0;
15410                                 uint32_t table = 0;
15411                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15412                                 struct flow_grp_info grp_info = {
15413                                         .external = !!dev_flow.external,
15414                                         .transfer = !!transfer,
15415                                         .fdb_def_rule = !!priv->fdb_def_rule,
15416                                         .std_tbl_fix = 0,
15417                                         .skip_scale = dev_flow.skip_scale &
15418                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15419                                 };
15420                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15421                                         mtr_policy->sub_policys[domain][0];
15422
15423                                 if (i >= MLX5_MTR_RTE_COLORS)
15424                                         return -rte_mtr_error_set(error,
15425                                           ENOTSUP,
15426                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15427                                           NULL,
15428                                           "cannot create policy "
15429                                           "jump action for this color");
15430                                 jump_group =
15431                                 ((const struct rte_flow_action_jump *)
15432                                                         act->conf)->group;
15433                                 if (mlx5_flow_group_to_table(dev, NULL,
15434                                                        jump_group,
15435                                                        &table,
15436                                                        &grp_info, &flow_err))
15437                                         return -rte_mtr_error_set(error,
15438                                         ENOTSUP,
15439                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15440                                         NULL, "cannot setup "
15441                                         "policy jump action");
15442                                 sub_policy->jump_tbl[i] =
15443                                 flow_dv_tbl_resource_get(dev,
15444                                         table, egress,
15445                                         transfer,
15446                                         !!dev_flow.external,
15447                                         NULL, jump_group, 0,
15448                                         0, &flow_err);
15449                                 if
15450                                 (!sub_policy->jump_tbl[i])
15451                                         return  -rte_mtr_error_set(error,
15452                                         ENOTSUP,
15453                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15454                                         NULL, "cannot create jump action.");
15455                                 tbl_data = container_of
15456                                 (sub_policy->jump_tbl[i],
15457                                 struct mlx5_flow_tbl_data_entry, tbl);
15458                                 act_cnt->dr_jump_action[domain] =
15459                                         tbl_data->jump.action;
15460                                 act_cnt->fate_action =
15461                                         MLX5_FLOW_FATE_JUMP;
15462                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15463                                 break;
15464                         }
15465                         /*
15466                          * No need to check meter hierarchy for Y or R colors
15467                          * here since it is done in the validation stage.
15468                          */
15469                         case RTE_FLOW_ACTION_TYPE_METER:
15470                         {
15471                                 const struct rte_flow_action_meter *mtr;
15472                                 struct mlx5_flow_meter_info *next_fm;
15473                                 struct mlx5_flow_meter_policy *next_policy;
15474                                 struct rte_flow_action tag_action;
15475                                 struct mlx5_rte_flow_action_set_tag set_tag;
15476                                 uint32_t next_mtr_idx = 0;
15477
15478                                 mtr = act->conf;
15479                                 next_fm = mlx5_flow_meter_find(priv,
15480                                                         mtr->mtr_id,
15481                                                         &next_mtr_idx);
15482                                 if (!next_fm)
15483                                         return -rte_mtr_error_set(error, EINVAL,
15484                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15485                                                 "Fail to find next meter.");
15486                                 if (next_fm->def_policy)
15487                                         return -rte_mtr_error_set(error, EINVAL,
15488                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15489                                 "Hierarchy only supports termination meter.");
15490                                 next_policy = mlx5_flow_meter_policy_find(dev,
15491                                                 next_fm->policy_id, NULL);
15492                                 MLX5_ASSERT(next_policy);
15493                                 if (next_fm->drop_cnt) {
15494                                         set_tag.id =
15495                                                 (enum modify_reg)
15496                                                 mlx5_flow_get_reg_id(dev,
15497                                                 MLX5_MTR_ID,
15498                                                 0,
15499                                                 (struct rte_flow_error *)error);
15500                                         set_tag.offset = (priv->mtr_reg_share ?
15501                                                 MLX5_MTR_COLOR_BITS : 0);
15502                                         set_tag.length = (priv->mtr_reg_share ?
15503                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15504                                                MLX5_REG_BITS);
15505                                         set_tag.data = next_mtr_idx;
15506                                         tag_action.type =
15507                                                 (enum rte_flow_action_type)
15508                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15509                                         tag_action.conf = &set_tag;
15510                                         if (flow_dv_convert_action_set_reg
15511                                                 (mhdr_res, &tag_action,
15512                                                 (struct rte_flow_error *)error))
15513                                                 return -rte_errno;
15514                                         action_flags |=
15515                                                 MLX5_FLOW_ACTION_SET_TAG;
15516                                 }
15517                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15518                                 act_cnt->next_mtr_id = next_fm->meter_id;
15519                                 act_cnt->next_sub_policy = NULL;
15520                                 mtr_policy->is_hierarchy = 1;
15521                                 mtr_policy->dev = next_policy->dev;
15522                                 action_flags |=
15523                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15524                                 break;
15525                         }
15526                         default:
15527                                 return -rte_mtr_error_set(error, ENOTSUP,
15528                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15529                                           NULL, "action type not supported");
15530                         }
15531                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15532                                 /* create modify action if needed. */
15533                                 dev_flow.dv.group = 1;
15534                                 if (flow_dv_modify_hdr_resource_register
15535                                         (dev, mhdr_res, &dev_flow, &flow_err))
15536                                         return -rte_mtr_error_set(error,
15537                                                 ENOTSUP,
15538                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15539                                                 NULL, "cannot register policy "
15540                                                 "set tag action");
15541                                 act_cnt->modify_hdr =
15542                                         dev_flow.handle->dvh.modify_hdr;
15543                         }
15544                 }
15545         }
15546         return 0;
15547 }
15548
15549 /**
15550  * Create policy action per domain, lock free,
15551  * (mutex should be acquired by caller).
15552  * Dispatcher for action type specific call.
15553  *
15554  * @param[in] dev
15555  *   Pointer to the Ethernet device structure.
15556  * @param[in] mtr_policy
15557  *   Meter policy struct.
15558  * @param[in] action
15559  *   Action specification used to create meter actions.
15560  * @param[out] error
15561  *   Perform verbose error reporting if not NULL. Initialized in case of
15562  *   error only.
15563  *
15564  * @return
15565  *   0 on success, otherwise negative errno value.
15566  */
15567 static int
15568 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15569                       struct mlx5_flow_meter_policy *mtr_policy,
15570                       const struct rte_flow_action *actions[RTE_COLORS],
15571                       struct rte_mtr_error *error)
15572 {
15573         int ret, i;
15574         uint16_t sub_policy_num;
15575
15576         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15577                 sub_policy_num = (mtr_policy->sub_policy_num >>
15578                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15579                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15580                 if (sub_policy_num) {
15581                         ret = __flow_dv_create_domain_policy_acts(dev,
15582                                 mtr_policy, actions,
15583                                 (enum mlx5_meter_domain)i, error);
15584                         /* Cleaning resource is done in the caller level. */
15585                         if (ret)
15586                                 return ret;
15587                 }
15588         }
15589         return 0;
15590 }
15591
15592 /**
15593  * Query a DV flow rule for its statistics via DevX.
15594  *
15595  * @param[in] dev
15596  *   Pointer to Ethernet device.
15597  * @param[in] cnt_idx
15598  *   Index to the flow counter.
15599  * @param[out] data
15600  *   Data retrieved by the query.
15601  * @param[out] error
15602  *   Perform verbose error reporting if not NULL.
15603  *
15604  * @return
15605  *   0 on success, a negative errno value otherwise and rte_errno is set.
15606  */
15607 int
15608 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15609                     struct rte_flow_error *error)
15610 {
15611         struct mlx5_priv *priv = dev->data->dev_private;
15612         struct rte_flow_query_count *qc = data;
15613
15614         if (!priv->sh->devx)
15615                 return rte_flow_error_set(error, ENOTSUP,
15616                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15617                                           NULL,
15618                                           "counters are not supported");
15619         if (cnt_idx) {
15620                 uint64_t pkts, bytes;
15621                 struct mlx5_flow_counter *cnt;
15622                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15623
15624                 if (err)
15625                         return rte_flow_error_set(error, -err,
15626                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15627                                         NULL, "cannot read counters");
15628                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15629                 qc->hits_set = 1;
15630                 qc->bytes_set = 1;
15631                 qc->hits = pkts - cnt->hits;
15632                 qc->bytes = bytes - cnt->bytes;
15633                 if (qc->reset) {
15634                         cnt->hits = pkts;
15635                         cnt->bytes = bytes;
15636                 }
15637                 return 0;
15638         }
15639         return rte_flow_error_set(error, EINVAL,
15640                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15641                                   NULL,
15642                                   "counters are not available");
15643 }
15644
15645
15646 /**
15647  * Query counter's action pointer for a DV flow rule via DevX.
15648  *
15649  * @param[in] dev
15650  *   Pointer to Ethernet device.
15651  * @param[in] cnt_idx
15652  *   Index to the flow counter.
15653  * @param[out] action_ptr
15654  *   Action pointer for counter.
15655  * @param[out] error
15656  *   Perform verbose error reporting if not NULL.
15657  *
15658  * @return
15659  *   0 on success, a negative errno value otherwise and rte_errno is set.
15660  */
15661 int
15662 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15663         void **action_ptr, struct rte_flow_error *error)
15664 {
15665         struct mlx5_priv *priv = dev->data->dev_private;
15666
15667         if (!priv->sh->devx || !action_ptr)
15668                 return rte_flow_error_set(error, ENOTSUP,
15669                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15670                                           NULL,
15671                                           "counters are not supported");
15672
15673         if (cnt_idx) {
15674                 struct mlx5_flow_counter *cnt = NULL;
15675                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15676                 if (cnt) {
15677                         *action_ptr = cnt->action;
15678                         return 0;
15679                 }
15680         }
15681         return rte_flow_error_set(error, EINVAL,
15682                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15683                                   NULL,
15684                                   "counters are not available");
15685 }
15686
15687 static int
15688 flow_dv_action_query(struct rte_eth_dev *dev,
15689                      const struct rte_flow_action_handle *handle, void *data,
15690                      struct rte_flow_error *error)
15691 {
15692         struct mlx5_age_param *age_param;
15693         struct rte_flow_query_age *resp;
15694         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15695         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15696         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15697         struct mlx5_priv *priv = dev->data->dev_private;
15698         struct mlx5_aso_ct_action *ct;
15699         uint16_t owner;
15700         uint32_t dev_idx;
15701
15702         switch (type) {
15703         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15704                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15705                 resp = data;
15706                 resp->aged = __atomic_load_n(&age_param->state,
15707                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15708                                                                           1 : 0;
15709                 resp->sec_since_last_hit_valid = !resp->aged;
15710                 if (resp->sec_since_last_hit_valid)
15711                         resp->sec_since_last_hit = __atomic_load_n
15712                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15713                 return 0;
15714         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15715                 return flow_dv_query_count(dev, idx, data, error);
15716         case MLX5_INDIRECT_ACTION_TYPE_CT:
15717                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15718                 if (owner != PORT_ID(priv))
15719                         return rte_flow_error_set(error, EACCES,
15720                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15721                                         NULL,
15722                                         "CT object owned by another port");
15723                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15724                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15725                 MLX5_ASSERT(ct);
15726                 if (!ct->refcnt)
15727                         return rte_flow_error_set(error, EFAULT,
15728                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15729                                         NULL,
15730                                         "CT object is inactive");
15731                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15732                                                         ct->peer;
15733                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15734                                                         ct->is_original;
15735                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15736                         return rte_flow_error_set(error, EIO,
15737                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15738                                         NULL,
15739                                         "Failed to query CT context");
15740                 return 0;
15741         default:
15742                 return rte_flow_error_set(error, ENOTSUP,
15743                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15744                                           "action type query not supported");
15745         }
15746 }
15747
15748 /**
15749  * Query a flow rule AGE action for aging information.
15750  *
15751  * @param[in] dev
15752  *   Pointer to Ethernet device.
15753  * @param[in] flow
15754  *   Pointer to the sub flow.
15755  * @param[out] data
15756  *   data retrieved by the query.
15757  * @param[out] error
15758  *   Perform verbose error reporting if not NULL.
15759  *
15760  * @return
15761  *   0 on success, a negative errno value otherwise and rte_errno is set.
15762  */
15763 static int
15764 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15765                   void *data, struct rte_flow_error *error)
15766 {
15767         struct rte_flow_query_age *resp = data;
15768         struct mlx5_age_param *age_param;
15769
15770         if (flow->age) {
15771                 struct mlx5_aso_age_action *act =
15772                                      flow_aso_age_get_by_idx(dev, flow->age);
15773
15774                 age_param = &act->age_params;
15775         } else if (flow->counter) {
15776                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15777
15778                 if (!age_param || !age_param->timeout)
15779                         return rte_flow_error_set
15780                                         (error, EINVAL,
15781                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15782                                          NULL, "cannot read age data");
15783         } else {
15784                 return rte_flow_error_set(error, EINVAL,
15785                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15786                                           NULL, "age data not available");
15787         }
15788         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15789                                      AGE_TMOUT ? 1 : 0;
15790         resp->sec_since_last_hit_valid = !resp->aged;
15791         if (resp->sec_since_last_hit_valid)
15792                 resp->sec_since_last_hit = __atomic_load_n
15793                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15794         return 0;
15795 }
15796
15797 /**
15798  * Query a flow.
15799  *
15800  * @see rte_flow_query()
15801  * @see rte_flow_ops
15802  */
15803 static int
15804 flow_dv_query(struct rte_eth_dev *dev,
15805               struct rte_flow *flow __rte_unused,
15806               const struct rte_flow_action *actions __rte_unused,
15807               void *data __rte_unused,
15808               struct rte_flow_error *error __rte_unused)
15809 {
15810         int ret = -EINVAL;
15811
15812         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15813                 switch (actions->type) {
15814                 case RTE_FLOW_ACTION_TYPE_VOID:
15815                         break;
15816                 case RTE_FLOW_ACTION_TYPE_COUNT:
15817                         ret = flow_dv_query_count(dev, flow->counter, data,
15818                                                   error);
15819                         break;
15820                 case RTE_FLOW_ACTION_TYPE_AGE:
15821                         ret = flow_dv_query_age(dev, flow, data, error);
15822                         break;
15823                 default:
15824                         return rte_flow_error_set(error, ENOTSUP,
15825                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15826                                                   actions,
15827                                                   "action not supported");
15828                 }
15829         }
15830         return ret;
15831 }
15832
15833 /**
15834  * Destroy the meter table set.
15835  * Lock free, (mutex should be acquired by caller).
15836  *
15837  * @param[in] dev
15838  *   Pointer to Ethernet device.
15839  * @param[in] fm
15840  *   Meter information table.
15841  */
15842 static void
15843 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15844                         struct mlx5_flow_meter_info *fm)
15845 {
15846         struct mlx5_priv *priv = dev->data->dev_private;
15847         int i;
15848
15849         if (!fm || !priv->config.dv_flow_en)
15850                 return;
15851         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15852                 if (fm->drop_rule[i]) {
15853                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15854                         fm->drop_rule[i] = NULL;
15855                 }
15856         }
15857 }
15858
15859 static void
15860 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15861 {
15862         struct mlx5_priv *priv = dev->data->dev_private;
15863         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15864         struct mlx5_flow_tbl_data_entry *tbl;
15865         int i, j;
15866
15867         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15868                 if (mtrmng->def_rule[i]) {
15869                         claim_zero(mlx5_flow_os_destroy_flow
15870                                         (mtrmng->def_rule[i]));
15871                         mtrmng->def_rule[i] = NULL;
15872                 }
15873                 if (mtrmng->def_matcher[i]) {
15874                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15875                                 struct mlx5_flow_tbl_data_entry, tbl);
15876                         mlx5_list_unregister(tbl->matchers,
15877                                              &mtrmng->def_matcher[i]->entry);
15878                         mtrmng->def_matcher[i] = NULL;
15879                 }
15880                 for (j = 0; j < MLX5_REG_BITS; j++) {
15881                         if (mtrmng->drop_matcher[i][j]) {
15882                                 tbl =
15883                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15884                                              struct mlx5_flow_tbl_data_entry,
15885                                              tbl);
15886                                 mlx5_list_unregister(tbl->matchers,
15887                                             &mtrmng->drop_matcher[i][j]->entry);
15888                                 mtrmng->drop_matcher[i][j] = NULL;
15889                         }
15890                 }
15891                 if (mtrmng->drop_tbl[i]) {
15892                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15893                                 mtrmng->drop_tbl[i]);
15894                         mtrmng->drop_tbl[i] = NULL;
15895                 }
15896         }
15897 }
15898
15899 /* Number of meter flow actions, count and jump or count and drop. */
15900 #define METER_ACTIONS 2
15901
15902 static void
15903 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15904                                     enum mlx5_meter_domain domain)
15905 {
15906         struct mlx5_priv *priv = dev->data->dev_private;
15907         struct mlx5_flow_meter_def_policy *def_policy =
15908                         priv->sh->mtrmng->def_policy[domain];
15909
15910         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15911         mlx5_free(def_policy);
15912         priv->sh->mtrmng->def_policy[domain] = NULL;
15913 }
15914
15915 /**
15916  * Destroy the default policy table set.
15917  *
15918  * @param[in] dev
15919  *   Pointer to Ethernet device.
15920  */
15921 static void
15922 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15923 {
15924         struct mlx5_priv *priv = dev->data->dev_private;
15925         int i;
15926
15927         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15928                 if (priv->sh->mtrmng->def_policy[i])
15929                         __flow_dv_destroy_domain_def_policy(dev,
15930                                         (enum mlx5_meter_domain)i);
15931         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15932 }
15933
15934 static int
15935 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15936                         uint32_t color_reg_c_idx,
15937                         enum rte_color color, void *matcher_object,
15938                         int actions_n, void *actions,
15939                         bool match_src_port, const struct rte_flow_item *item,
15940                         void **rule, const struct rte_flow_attr *attr)
15941 {
15942         int ret;
15943         struct mlx5_flow_dv_match_params value = {
15944                 .size = sizeof(value.buf),
15945         };
15946         struct mlx5_flow_dv_match_params matcher = {
15947                 .size = sizeof(matcher.buf),
15948         };
15949         struct mlx5_priv *priv = dev->data->dev_private;
15950         uint8_t misc_mask;
15951
15952         if (match_src_port && (priv->representor || priv->master)) {
15953                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15954                                                    value.buf, item, attr)) {
15955                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
15956                                 " value with port.", color);
15957                         return -1;
15958                 }
15959         }
15960         flow_dv_match_meta_reg(matcher.buf, value.buf,
15961                                (enum modify_reg)color_reg_c_idx,
15962                                rte_col_2_mlx5_col(color), UINT32_MAX);
15963         misc_mask = flow_dv_matcher_enable(value.buf);
15964         __flow_dv_adjust_buf_size(&value.size, misc_mask);
15965         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
15966                                        actions_n, actions, rule);
15967         if (ret) {
15968                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
15969                 return -1;
15970         }
15971         return 0;
15972 }
15973
15974 static int
15975 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15976                         uint32_t color_reg_c_idx,
15977                         uint16_t priority,
15978                         struct mlx5_flow_meter_sub_policy *sub_policy,
15979                         const struct rte_flow_attr *attr,
15980                         bool match_src_port,
15981                         const struct rte_flow_item *item,
15982                         struct mlx5_flow_dv_matcher **policy_matcher,
15983                         struct rte_flow_error *error)
15984 {
15985         struct mlx5_list_entry *entry;
15986         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15987         struct mlx5_flow_dv_matcher matcher = {
15988                 .mask = {
15989                         .size = sizeof(matcher.mask.buf),
15990                 },
15991                 .tbl = tbl_rsc,
15992         };
15993         struct mlx5_flow_dv_match_params value = {
15994                 .size = sizeof(value.buf),
15995         };
15996         struct mlx5_flow_cb_ctx ctx = {
15997                 .error = error,
15998                 .data = &matcher,
15999         };
16000         struct mlx5_flow_tbl_data_entry *tbl_data;
16001         struct mlx5_priv *priv = dev->data->dev_private;
16002         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16003
16004         if (match_src_port && (priv->representor || priv->master)) {
16005                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16006                                                    value.buf, item, attr)) {
16007                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16008                                 " with port.", priority);
16009                         return -1;
16010                 }
16011         }
16012         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16013         if (priority < RTE_COLOR_RED)
16014                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16015                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16016         matcher.priority = priority;
16017         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16018                                     matcher.mask.size);
16019         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16020         if (!entry) {
16021                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16022                 return -1;
16023         }
16024         *policy_matcher =
16025                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16026         return 0;
16027 }
16028
16029 /**
16030  * Create the policy rules per domain.
16031  *
16032  * @param[in] dev
16033  *   Pointer to Ethernet device.
16034  * @param[in] sub_policy
16035  *    Pointer to sub policy table..
16036  * @param[in] egress
16037  *   Direction of the table.
16038  * @param[in] transfer
16039  *   E-Switch or NIC flow.
16040  * @param[in] acts
16041  *   Pointer to policy action list per color.
16042  *
16043  * @return
16044  *   0 on success, -1 otherwise.
16045  */
16046 static int
16047 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16048                 struct mlx5_flow_meter_sub_policy *sub_policy,
16049                 uint8_t egress, uint8_t transfer, bool match_src_port,
16050                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16051 {
16052         struct mlx5_priv *priv = dev->data->dev_private;
16053         struct rte_flow_error flow_err;
16054         uint32_t color_reg_c_idx;
16055         struct rte_flow_attr attr = {
16056                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16057                 .priority = 0,
16058                 .ingress = 0,
16059                 .egress = !!egress,
16060                 .transfer = !!transfer,
16061                 .reserved = 0,
16062         };
16063         int i;
16064         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16065         struct mlx5_sub_policy_color_rule *color_rule;
16066         bool svport_match;
16067         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16068
16069         if (ret < 0)
16070                 return -1;
16071         /* Create policy table with POLICY level. */
16072         if (!sub_policy->tbl_rsc)
16073                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16074                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16075                                 egress, transfer, false, NULL, 0, 0,
16076                                 sub_policy->idx, &flow_err);
16077         if (!sub_policy->tbl_rsc) {
16078                 DRV_LOG(ERR,
16079                         "Failed to create meter sub policy table.");
16080                 return -1;
16081         }
16082         /* Prepare matchers. */
16083         color_reg_c_idx = ret;
16084         for (i = 0; i < RTE_COLORS; i++) {
16085                 TAILQ_INIT(&sub_policy->color_rules[i]);
16086                 if (!acts[i].actions_n)
16087                         continue;
16088                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16089                                 sizeof(struct mlx5_sub_policy_color_rule),
16090                                 0, SOCKET_ID_ANY);
16091                 if (!color_rule) {
16092                         DRV_LOG(ERR, "No memory to create color rule.");
16093                         goto err_exit;
16094                 }
16095                 tmp_rules[i] = color_rule;
16096                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16097                                   color_rule, next_port);
16098                 color_rule->src_port = priv->representor_id;
16099                 /* No use. */
16100                 attr.priority = i;
16101                 /* Create matchers for colors. */
16102                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16103                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16104                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16105                                 &attr, svport_match, NULL,
16106                                 &color_rule->matcher, &flow_err)) {
16107                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16108                         goto err_exit;
16109                 }
16110                 /* Create flow, matching color. */
16111                 if (__flow_dv_create_policy_flow(dev,
16112                                 color_reg_c_idx, (enum rte_color)i,
16113                                 color_rule->matcher->matcher_object,
16114                                 acts[i].actions_n, acts[i].dv_actions,
16115                                 svport_match, NULL, &color_rule->rule,
16116                                 &attr)) {
16117                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16118                         goto err_exit;
16119                 }
16120         }
16121         return 0;
16122 err_exit:
16123         /* All the policy rules will be cleared. */
16124         do {
16125                 color_rule = tmp_rules[i];
16126                 if (color_rule) {
16127                         if (color_rule->rule)
16128                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16129                         if (color_rule->matcher) {
16130                                 struct mlx5_flow_tbl_data_entry *tbl =
16131                                         container_of(color_rule->matcher->tbl,
16132                                                      typeof(*tbl), tbl);
16133                                 mlx5_list_unregister(tbl->matchers,
16134                                                 &color_rule->matcher->entry);
16135                         }
16136                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16137                                      color_rule, next_port);
16138                         mlx5_free(color_rule);
16139                 }
16140         } while (i--);
16141         return -1;
16142 }
16143
16144 static int
16145 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16146                         struct mlx5_flow_meter_policy *mtr_policy,
16147                         struct mlx5_flow_meter_sub_policy *sub_policy,
16148                         uint32_t domain)
16149 {
16150         struct mlx5_priv *priv = dev->data->dev_private;
16151         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16152         struct mlx5_flow_dv_tag_resource *tag;
16153         struct mlx5_flow_dv_port_id_action_resource *port_action;
16154         struct mlx5_hrxq *hrxq;
16155         struct mlx5_flow_meter_info *next_fm = NULL;
16156         struct mlx5_flow_meter_policy *next_policy;
16157         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16158         struct mlx5_flow_tbl_data_entry *tbl_data;
16159         struct rte_flow_error error;
16160         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16161         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16162         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16163         bool match_src_port = false;
16164         int i;
16165
16166         /* If RSS or Queue, no previous actions / rules is created. */
16167         for (i = 0; i < RTE_COLORS; i++) {
16168                 acts[i].actions_n = 0;
16169                 if (i == RTE_COLOR_RED) {
16170                         /* Only support drop on red. */
16171                         acts[i].dv_actions[0] =
16172                                 mtr_policy->dr_drop_action[domain];
16173                         acts[i].actions_n = 1;
16174                         continue;
16175                 }
16176                 if (i == RTE_COLOR_GREEN &&
16177                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16178                         struct rte_flow_attr attr = {
16179                                 .transfer = transfer
16180                         };
16181
16182                         next_fm = mlx5_flow_meter_find(priv,
16183                                         mtr_policy->act_cnt[i].next_mtr_id,
16184                                         NULL);
16185                         if (!next_fm) {
16186                                 DRV_LOG(ERR,
16187                                         "Failed to get next hierarchy meter.");
16188                                 goto err_exit;
16189                         }
16190                         if (mlx5_flow_meter_attach(priv, next_fm,
16191                                                    &attr, &error)) {
16192                                 DRV_LOG(ERR, "%s", error.message);
16193                                 next_fm = NULL;
16194                                 goto err_exit;
16195                         }
16196                         /* Meter action must be the first for TX. */
16197                         if (mtr_first) {
16198                                 acts[i].dv_actions[acts[i].actions_n] =
16199                                         next_fm->meter_action;
16200                                 acts[i].actions_n++;
16201                         }
16202                 }
16203                 if (mtr_policy->act_cnt[i].rix_mark) {
16204                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16205                                         mtr_policy->act_cnt[i].rix_mark);
16206                         if (!tag) {
16207                                 DRV_LOG(ERR, "Failed to find "
16208                                 "mark action for policy.");
16209                                 goto err_exit;
16210                         }
16211                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16212                         acts[i].actions_n++;
16213                 }
16214                 if (mtr_policy->act_cnt[i].modify_hdr) {
16215                         acts[i].dv_actions[acts[i].actions_n] =
16216                                 mtr_policy->act_cnt[i].modify_hdr->action;
16217                         acts[i].actions_n++;
16218                 }
16219                 if (mtr_policy->act_cnt[i].fate_action) {
16220                         switch (mtr_policy->act_cnt[i].fate_action) {
16221                         case MLX5_FLOW_FATE_PORT_ID:
16222                                 port_action = mlx5_ipool_get
16223                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16224                                 mtr_policy->act_cnt[i].rix_port_id_action);
16225                                 if (!port_action) {
16226                                         DRV_LOG(ERR, "Failed to find "
16227                                                 "port action for policy.");
16228                                         goto err_exit;
16229                                 }
16230                                 acts[i].dv_actions[acts[i].actions_n] =
16231                                         port_action->action;
16232                                 acts[i].actions_n++;
16233                                 mtr_policy->dev = dev;
16234                                 match_src_port = true;
16235                                 break;
16236                         case MLX5_FLOW_FATE_DROP:
16237                         case MLX5_FLOW_FATE_JUMP:
16238                                 acts[i].dv_actions[acts[i].actions_n] =
16239                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16240                                 acts[i].actions_n++;
16241                                 break;
16242                         case MLX5_FLOW_FATE_SHARED_RSS:
16243                         case MLX5_FLOW_FATE_QUEUE:
16244                                 hrxq = mlx5_ipool_get
16245                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16246                                          sub_policy->rix_hrxq[i]);
16247                                 if (!hrxq) {
16248                                         DRV_LOG(ERR, "Failed to find "
16249                                                 "queue action for policy.");
16250                                         goto err_exit;
16251                                 }
16252                                 acts[i].dv_actions[acts[i].actions_n] =
16253                                         hrxq->action;
16254                                 acts[i].actions_n++;
16255                                 break;
16256                         case MLX5_FLOW_FATE_MTR:
16257                                 if (!next_fm) {
16258                                         DRV_LOG(ERR,
16259                                                 "No next hierarchy meter.");
16260                                         goto err_exit;
16261                                 }
16262                                 if (!mtr_first) {
16263                                         acts[i].dv_actions[acts[i].actions_n] =
16264                                                         next_fm->meter_action;
16265                                         acts[i].actions_n++;
16266                                 }
16267                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16268                                         next_sub_policy =
16269                                         mtr_policy->act_cnt[i].next_sub_policy;
16270                                 } else {
16271                                         next_policy =
16272                                                 mlx5_flow_meter_policy_find(dev,
16273                                                 next_fm->policy_id, NULL);
16274                                         MLX5_ASSERT(next_policy);
16275                                         next_sub_policy =
16276                                         next_policy->sub_policys[domain][0];
16277                                 }
16278                                 tbl_data =
16279                                         container_of(next_sub_policy->tbl_rsc,
16280                                         struct mlx5_flow_tbl_data_entry, tbl);
16281                                 acts[i].dv_actions[acts[i].actions_n++] =
16282                                                         tbl_data->jump.action;
16283                                 if (mtr_policy->act_cnt[i].modify_hdr)
16284                                         match_src_port = !!transfer;
16285                                 break;
16286                         default:
16287                                 /*Queue action do nothing*/
16288                                 break;
16289                         }
16290                 }
16291         }
16292         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16293                                 egress, transfer, match_src_port, acts)) {
16294                 DRV_LOG(ERR,
16295                         "Failed to create policy rules per domain.");
16296                 goto err_exit;
16297         }
16298         return 0;
16299 err_exit:
16300         if (next_fm)
16301                 mlx5_flow_meter_detach(priv, next_fm);
16302         return -1;
16303 }
16304
16305 /**
16306  * Create the policy rules.
16307  *
16308  * @param[in] dev
16309  *   Pointer to Ethernet device.
16310  * @param[in,out] mtr_policy
16311  *   Pointer to meter policy table.
16312  *
16313  * @return
16314  *   0 on success, -1 otherwise.
16315  */
16316 static int
16317 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16318                              struct mlx5_flow_meter_policy *mtr_policy)
16319 {
16320         int i;
16321         uint16_t sub_policy_num;
16322
16323         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16324                 sub_policy_num = (mtr_policy->sub_policy_num >>
16325                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16326                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16327                 if (!sub_policy_num)
16328                         continue;
16329                 /* Prepare actions list and create policy rules. */
16330                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16331                         mtr_policy->sub_policys[i][0], i)) {
16332                         DRV_LOG(ERR, "Failed to create policy action "
16333                                 "list per domain.");
16334                         return -1;
16335                 }
16336         }
16337         return 0;
16338 }
16339
16340 static int
16341 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16342 {
16343         struct mlx5_priv *priv = dev->data->dev_private;
16344         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16345         struct mlx5_flow_meter_def_policy *def_policy;
16346         struct mlx5_flow_tbl_resource *jump_tbl;
16347         struct mlx5_flow_tbl_data_entry *tbl_data;
16348         uint8_t egress, transfer;
16349         struct rte_flow_error error;
16350         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16351         int ret;
16352
16353         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16354         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16355         def_policy = mtrmng->def_policy[domain];
16356         if (!def_policy) {
16357                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16358                         sizeof(struct mlx5_flow_meter_def_policy),
16359                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16360                 if (!def_policy) {
16361                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16362                         goto def_policy_error;
16363                 }
16364                 mtrmng->def_policy[domain] = def_policy;
16365                 /* Create the meter suffix table with SUFFIX level. */
16366                 jump_tbl = flow_dv_tbl_resource_get(dev,
16367                                 MLX5_FLOW_TABLE_LEVEL_METER,
16368                                 egress, transfer, false, NULL, 0,
16369                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16370                 if (!jump_tbl) {
16371                         DRV_LOG(ERR,
16372                                 "Failed to create meter suffix table.");
16373                         goto def_policy_error;
16374                 }
16375                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16376                 tbl_data = container_of(jump_tbl,
16377                                         struct mlx5_flow_tbl_data_entry, tbl);
16378                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16379                                                 tbl_data->jump.action;
16380                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16381                 acts[RTE_COLOR_GREEN].actions_n = 1;
16382                 /*
16383                  * YELLOW has the same default policy as GREEN does.
16384                  * G & Y share the same table and action. The 2nd time of table
16385                  * resource getting is just to update the reference count for
16386                  * the releasing stage.
16387                  */
16388                 jump_tbl = flow_dv_tbl_resource_get(dev,
16389                                 MLX5_FLOW_TABLE_LEVEL_METER,
16390                                 egress, transfer, false, NULL, 0,
16391                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16392                 if (!jump_tbl) {
16393                         DRV_LOG(ERR,
16394                                 "Failed to get meter suffix table.");
16395                         goto def_policy_error;
16396                 }
16397                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16398                 tbl_data = container_of(jump_tbl,
16399                                         struct mlx5_flow_tbl_data_entry, tbl);
16400                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16401                                                 tbl_data->jump.action;
16402                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16403                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16404                 /* Create jump action to the drop table. */
16405                 if (!mtrmng->drop_tbl[domain]) {
16406                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16407                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16408                                  egress, transfer, false, NULL, 0,
16409                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16410                         if (!mtrmng->drop_tbl[domain]) {
16411                                 DRV_LOG(ERR, "Failed to create meter "
16412                                         "drop table for default policy.");
16413                                 goto def_policy_error;
16414                         }
16415                 }
16416                 /* all RED: unique Drop table for jump action. */
16417                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16418                                         struct mlx5_flow_tbl_data_entry, tbl);
16419                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16420                                                 tbl_data->jump.action;
16421                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16422                 acts[RTE_COLOR_RED].actions_n = 1;
16423                 /* Create default policy rules. */
16424                 ret = __flow_dv_create_domain_policy_rules(dev,
16425                                         &def_policy->sub_policy,
16426                                         egress, transfer, false, acts);
16427                 if (ret) {
16428                         DRV_LOG(ERR, "Failed to create default policy rules.");
16429                         goto def_policy_error;
16430                 }
16431         }
16432         return 0;
16433 def_policy_error:
16434         __flow_dv_destroy_domain_def_policy(dev,
16435                                             (enum mlx5_meter_domain)domain);
16436         return -1;
16437 }
16438
16439 /**
16440  * Create the default policy table set.
16441  *
16442  * @param[in] dev
16443  *   Pointer to Ethernet device.
16444  * @return
16445  *   0 on success, -1 otherwise.
16446  */
16447 static int
16448 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16449 {
16450         struct mlx5_priv *priv = dev->data->dev_private;
16451         int i;
16452
16453         /* Non-termination policy table. */
16454         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16455                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16456                         continue;
16457                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16458                         DRV_LOG(ERR, "Failed to create default policy");
16459                         /* Rollback the created default policies for others. */
16460                         flow_dv_destroy_def_policy(dev);
16461                         return -1;
16462                 }
16463         }
16464         return 0;
16465 }
16466
16467 /**
16468  * Create the needed meter tables.
16469  * Lock free, (mutex should be acquired by caller).
16470  *
16471  * @param[in] dev
16472  *   Pointer to Ethernet device.
16473  * @param[in] fm
16474  *   Meter information table.
16475  * @param[in] mtr_idx
16476  *   Meter index.
16477  * @param[in] domain_bitmap
16478  *   Domain bitmap.
16479  * @return
16480  *   0 on success, -1 otherwise.
16481  */
16482 static int
16483 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16484                         struct mlx5_flow_meter_info *fm,
16485                         uint32_t mtr_idx,
16486                         uint8_t domain_bitmap)
16487 {
16488         struct mlx5_priv *priv = dev->data->dev_private;
16489         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16490         struct rte_flow_error error;
16491         struct mlx5_flow_tbl_data_entry *tbl_data;
16492         uint8_t egress, transfer;
16493         void *actions[METER_ACTIONS];
16494         int domain, ret, i;
16495         struct mlx5_flow_counter *cnt;
16496         struct mlx5_flow_dv_match_params value = {
16497                 .size = sizeof(value.buf),
16498         };
16499         struct mlx5_flow_dv_match_params matcher_para = {
16500                 .size = sizeof(matcher_para.buf),
16501         };
16502         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16503                                                      0, &error);
16504         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16505         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16506         struct mlx5_list_entry *entry;
16507         struct mlx5_flow_dv_matcher matcher = {
16508                 .mask = {
16509                         .size = sizeof(matcher.mask.buf),
16510                 },
16511         };
16512         struct mlx5_flow_dv_matcher *drop_matcher;
16513         struct mlx5_flow_cb_ctx ctx = {
16514                 .error = &error,
16515                 .data = &matcher,
16516         };
16517         uint8_t misc_mask;
16518
16519         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16520                 rte_errno = ENOTSUP;
16521                 return -1;
16522         }
16523         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16524                 if (!(domain_bitmap & (1 << domain)) ||
16525                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16526                         continue;
16527                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16528                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16529                 /* Create the drop table with METER DROP level. */
16530                 if (!mtrmng->drop_tbl[domain]) {
16531                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16532                                         MLX5_FLOW_TABLE_LEVEL_METER,
16533                                         egress, transfer, false, NULL, 0,
16534                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16535                         if (!mtrmng->drop_tbl[domain]) {
16536                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16537                                 goto policy_error;
16538                         }
16539                 }
16540                 /* Create default matcher in drop table. */
16541                 matcher.tbl = mtrmng->drop_tbl[domain],
16542                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16543                                 struct mlx5_flow_tbl_data_entry, tbl);
16544                 if (!mtrmng->def_matcher[domain]) {
16545                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16546                                        (enum modify_reg)mtr_id_reg_c,
16547                                        0, 0);
16548                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16549                         matcher.crc = rte_raw_cksum
16550                                         ((const void *)matcher.mask.buf,
16551                                         matcher.mask.size);
16552                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16553                         if (!entry) {
16554                                 DRV_LOG(ERR, "Failed to register meter "
16555                                 "drop default matcher.");
16556                                 goto policy_error;
16557                         }
16558                         mtrmng->def_matcher[domain] = container_of(entry,
16559                         struct mlx5_flow_dv_matcher, entry);
16560                 }
16561                 /* Create default rule in drop table. */
16562                 if (!mtrmng->def_rule[domain]) {
16563                         i = 0;
16564                         actions[i++] = priv->sh->dr_drop_action;
16565                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16566                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16567                         misc_mask = flow_dv_matcher_enable(value.buf);
16568                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16569                         ret = mlx5_flow_os_create_flow
16570                                 (mtrmng->def_matcher[domain]->matcher_object,
16571                                 (void *)&value, i, actions,
16572                                 &mtrmng->def_rule[domain]);
16573                         if (ret) {
16574                                 DRV_LOG(ERR, "Failed to create meter "
16575                                 "default drop rule for drop table.");
16576                                 goto policy_error;
16577                         }
16578                 }
16579                 if (!fm->drop_cnt)
16580                         continue;
16581                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16582                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16583                         /* Create matchers for Drop. */
16584                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16585                                         (enum modify_reg)mtr_id_reg_c, 0,
16586                                         (mtr_id_mask << mtr_id_offset));
16587                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16588                         matcher.crc = rte_raw_cksum
16589                                         ((const void *)matcher.mask.buf,
16590                                         matcher.mask.size);
16591                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16592                         if (!entry) {
16593                                 DRV_LOG(ERR,
16594                                 "Failed to register meter drop matcher.");
16595                                 goto policy_error;
16596                         }
16597                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16598                                 container_of(entry, struct mlx5_flow_dv_matcher,
16599                                              entry);
16600                 }
16601                 drop_matcher =
16602                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16603                 /* Create drop rule, matching meter_id only. */
16604                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16605                                 (enum modify_reg)mtr_id_reg_c,
16606                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16607                 i = 0;
16608                 cnt = flow_dv_counter_get_by_idx(dev,
16609                                         fm->drop_cnt, NULL);
16610                 actions[i++] = cnt->action;
16611                 actions[i++] = priv->sh->dr_drop_action;
16612                 misc_mask = flow_dv_matcher_enable(value.buf);
16613                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16614                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16615                                                (void *)&value, i, actions,
16616                                                &fm->drop_rule[domain]);
16617                 if (ret) {
16618                         DRV_LOG(ERR, "Failed to create meter "
16619                                 "drop rule for drop table.");
16620                                 goto policy_error;
16621                 }
16622         }
16623         return 0;
16624 policy_error:
16625         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16626                 if (fm->drop_rule[i]) {
16627                         claim_zero(mlx5_flow_os_destroy_flow
16628                                 (fm->drop_rule[i]));
16629                         fm->drop_rule[i] = NULL;
16630                 }
16631         }
16632         return -1;
16633 }
16634
16635 static struct mlx5_flow_meter_sub_policy *
16636 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16637                 struct mlx5_flow_meter_policy *mtr_policy,
16638                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16639                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16640                 bool *is_reuse)
16641 {
16642         struct mlx5_priv *priv = dev->data->dev_private;
16643         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16644         uint32_t sub_policy_idx = 0;
16645         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16646         uint32_t i, j;
16647         struct mlx5_hrxq *hrxq;
16648         struct mlx5_flow_handle dh;
16649         struct mlx5_meter_policy_action_container *act_cnt;
16650         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16651         uint16_t sub_policy_num;
16652
16653         rte_spinlock_lock(&mtr_policy->sl);
16654         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16655                 if (!rss_desc[i])
16656                         continue;
16657                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16658                 if (!hrxq_idx[i]) {
16659                         rte_spinlock_unlock(&mtr_policy->sl);
16660                         return NULL;
16661                 }
16662         }
16663         sub_policy_num = (mtr_policy->sub_policy_num >>
16664                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16665                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16666         for (j = 0; j < sub_policy_num; j++) {
16667                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16668                         if (rss_desc[i] &&
16669                             hrxq_idx[i] !=
16670                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16671                                 break;
16672                 }
16673                 if (i >= MLX5_MTR_RTE_COLORS) {
16674                         /*
16675                          * Found the sub policy table with
16676                          * the same queue per color.
16677                          */
16678                         rte_spinlock_unlock(&mtr_policy->sl);
16679                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16680                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16681                         *is_reuse = true;
16682                         return mtr_policy->sub_policys[domain][j];
16683                 }
16684         }
16685         /* Create sub policy. */
16686         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16687                 /* Reuse the first pre-allocated sub_policy. */
16688                 sub_policy = mtr_policy->sub_policys[domain][0];
16689                 sub_policy_idx = sub_policy->idx;
16690         } else {
16691                 sub_policy = mlx5_ipool_zmalloc
16692                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16693                                  &sub_policy_idx);
16694                 if (!sub_policy ||
16695                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16696                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16697                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16698                         goto rss_sub_policy_error;
16699                 }
16700                 sub_policy->idx = sub_policy_idx;
16701                 sub_policy->main_policy = mtr_policy;
16702         }
16703         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16704                 if (!rss_desc[i])
16705                         continue;
16706                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16707                 if (mtr_policy->is_hierarchy) {
16708                         act_cnt = &mtr_policy->act_cnt[i];
16709                         act_cnt->next_sub_policy = next_sub_policy;
16710                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16711                 } else {
16712                         /*
16713                          * Overwrite the last action from
16714                          * RSS action to Queue action.
16715                          */
16716                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16717                                               hrxq_idx[i]);
16718                         if (!hrxq) {
16719                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16720                                 goto rss_sub_policy_error;
16721                         }
16722                         act_cnt = &mtr_policy->act_cnt[i];
16723                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16724                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16725                                 if (act_cnt->rix_mark)
16726                                         dh.mark = 1;
16727                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16728                                 dh.rix_hrxq = hrxq_idx[i];
16729                                 flow_drv_rxq_flags_set(dev, &dh);
16730                         }
16731                 }
16732         }
16733         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16734                                                sub_policy, domain)) {
16735                 DRV_LOG(ERR, "Failed to create policy "
16736                         "rules for ingress domain.");
16737                 goto rss_sub_policy_error;
16738         }
16739         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16740                 i = (mtr_policy->sub_policy_num >>
16741                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16742                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16743                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16744                         DRV_LOG(ERR, "No free sub-policy slot.");
16745                         goto rss_sub_policy_error;
16746                 }
16747                 mtr_policy->sub_policys[domain][i] = sub_policy;
16748                 i++;
16749                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16750                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16751                 mtr_policy->sub_policy_num |=
16752                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16753                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16754         }
16755         rte_spinlock_unlock(&mtr_policy->sl);
16756         *is_reuse = false;
16757         return sub_policy;
16758 rss_sub_policy_error:
16759         if (sub_policy) {
16760                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16761                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16762                         i = (mtr_policy->sub_policy_num >>
16763                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16764                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16765                         mtr_policy->sub_policys[domain][i] = NULL;
16766                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16767                                         sub_policy->idx);
16768                 }
16769         }
16770         rte_spinlock_unlock(&mtr_policy->sl);
16771         return NULL;
16772 }
16773
16774 /**
16775  * Find the policy table for prefix table with RSS.
16776  *
16777  * @param[in] dev
16778  *   Pointer to Ethernet device.
16779  * @param[in] mtr_policy
16780  *   Pointer to meter policy table.
16781  * @param[in] rss_desc
16782  *   Pointer to rss_desc
16783  * @return
16784  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16785  */
16786 static struct mlx5_flow_meter_sub_policy *
16787 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16788                 struct mlx5_flow_meter_policy *mtr_policy,
16789                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16790 {
16791         struct mlx5_priv *priv = dev->data->dev_private;
16792         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16793         struct mlx5_flow_meter_info *next_fm;
16794         struct mlx5_flow_meter_policy *next_policy;
16795         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16796         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16797         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16798         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16799         bool reuse_sub_policy;
16800         uint32_t i = 0;
16801         uint32_t j = 0;
16802
16803         while (true) {
16804                 /* Iterate hierarchy to get all policies in this hierarchy. */
16805                 policies[i++] = mtr_policy;
16806                 if (!mtr_policy->is_hierarchy)
16807                         break;
16808                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16809                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16810                         return NULL;
16811                 }
16812                 next_fm = mlx5_flow_meter_find(priv,
16813                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16814                 if (!next_fm) {
16815                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16816                         return NULL;
16817                 }
16818                 next_policy =
16819                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16820                                                     NULL);
16821                 MLX5_ASSERT(next_policy);
16822                 mtr_policy = next_policy;
16823         }
16824         while (i) {
16825                 /**
16826                  * From last policy to the first one in hierarchy,
16827                  * create / get the sub policy for each of them.
16828                  */
16829                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16830                                                         policies[--i],
16831                                                         rss_desc,
16832                                                         next_sub_policy,
16833                                                         &reuse_sub_policy);
16834                 if (!sub_policy) {
16835                         DRV_LOG(ERR, "Failed to get the sub policy.");
16836                         goto err_exit;
16837                 }
16838                 if (!reuse_sub_policy)
16839                         sub_policies[j++] = sub_policy;
16840                 next_sub_policy = sub_policy;
16841         }
16842         return sub_policy;
16843 err_exit:
16844         while (j) {
16845                 uint16_t sub_policy_num;
16846
16847                 sub_policy = sub_policies[--j];
16848                 mtr_policy = sub_policy->main_policy;
16849                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16850                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16851                         sub_policy_num = (mtr_policy->sub_policy_num >>
16852                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16853                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
16854                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16855                                                                         NULL;
16856                         sub_policy_num--;
16857                         mtr_policy->sub_policy_num &=
16858                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16859                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16860                         mtr_policy->sub_policy_num |=
16861                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16862                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16863                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16864                                         sub_policy->idx);
16865                 }
16866         }
16867         return NULL;
16868 }
16869
16870 /**
16871  * Create the sub policy tag rule for all meters in hierarchy.
16872  *
16873  * @param[in] dev
16874  *   Pointer to Ethernet device.
16875  * @param[in] fm
16876  *   Meter information table.
16877  * @param[in] src_port
16878  *   The src port this extra rule should use.
16879  * @param[in] item
16880  *   The src port match item.
16881  * @param[out] error
16882  *   Perform verbose error reporting if not NULL.
16883  * @return
16884  *   0 on success, a negative errno value otherwise and rte_errno is set.
16885  */
16886 static int
16887 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16888                                 struct mlx5_flow_meter_info *fm,
16889                                 int32_t src_port,
16890                                 const struct rte_flow_item *item,
16891                                 struct rte_flow_error *error)
16892 {
16893         struct mlx5_priv *priv = dev->data->dev_private;
16894         struct mlx5_flow_meter_policy *mtr_policy;
16895         struct mlx5_flow_meter_sub_policy *sub_policy;
16896         struct mlx5_flow_meter_info *next_fm = NULL;
16897         struct mlx5_flow_meter_policy *next_policy;
16898         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16899         struct mlx5_flow_tbl_data_entry *tbl_data;
16900         struct mlx5_sub_policy_color_rule *color_rule;
16901         struct mlx5_meter_policy_acts acts;
16902         uint32_t color_reg_c_idx;
16903         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16904         struct rte_flow_attr attr = {
16905                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16906                 .priority = 0,
16907                 .ingress = 0,
16908                 .egress = 0,
16909                 .transfer = 1,
16910                 .reserved = 0,
16911         };
16912         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16913         int i;
16914
16915         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16916         MLX5_ASSERT(mtr_policy);
16917         if (!mtr_policy->is_hierarchy)
16918                 return 0;
16919         next_fm = mlx5_flow_meter_find(priv,
16920                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16921         if (!next_fm) {
16922                 return rte_flow_error_set(error, EINVAL,
16923                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16924                                 "Failed to find next meter in hierarchy.");
16925         }
16926         if (!next_fm->drop_cnt)
16927                 goto exit;
16928         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16929         sub_policy = mtr_policy->sub_policys[domain][0];
16930         for (i = 0; i < RTE_COLORS; i++) {
16931                 bool rule_exist = false;
16932                 struct mlx5_meter_policy_action_container *act_cnt;
16933
16934                 if (i >= RTE_COLOR_YELLOW)
16935                         break;
16936                 TAILQ_FOREACH(color_rule,
16937                               &sub_policy->color_rules[i], next_port)
16938                         if (color_rule->src_port == src_port) {
16939                                 rule_exist = true;
16940                                 break;
16941                         }
16942                 if (rule_exist)
16943                         continue;
16944                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16945                                 sizeof(struct mlx5_sub_policy_color_rule),
16946                                 0, SOCKET_ID_ANY);
16947                 if (!color_rule)
16948                         return rte_flow_error_set(error, ENOMEM,
16949                                 RTE_FLOW_ERROR_TYPE_ACTION,
16950                                 NULL, "No memory to create tag color rule.");
16951                 color_rule->src_port = src_port;
16952                 attr.priority = i;
16953                 next_policy = mlx5_flow_meter_policy_find(dev,
16954                                                 next_fm->policy_id, NULL);
16955                 MLX5_ASSERT(next_policy);
16956                 next_sub_policy = next_policy->sub_policys[domain][0];
16957                 tbl_data = container_of(next_sub_policy->tbl_rsc,
16958                                         struct mlx5_flow_tbl_data_entry, tbl);
16959                 act_cnt = &mtr_policy->act_cnt[i];
16960                 if (mtr_first) {
16961                         acts.dv_actions[0] = next_fm->meter_action;
16962                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
16963                 } else {
16964                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
16965                         acts.dv_actions[1] = next_fm->meter_action;
16966                 }
16967                 acts.dv_actions[2] = tbl_data->jump.action;
16968                 acts.actions_n = 3;
16969                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16970                         next_fm = NULL;
16971                         goto err_exit;
16972                 }
16973                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16974                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16975                                 &attr, true, item,
16976                                 &color_rule->matcher, error)) {
16977                         rte_flow_error_set(error, errno,
16978                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16979                                 "Failed to create hierarchy meter matcher.");
16980                         goto err_exit;
16981                 }
16982                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16983                                         (enum rte_color)i,
16984                                         color_rule->matcher->matcher_object,
16985                                         acts.actions_n, acts.dv_actions,
16986                                         true, item,
16987                                         &color_rule->rule, &attr)) {
16988                         rte_flow_error_set(error, errno,
16989                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16990                                 "Failed to create hierarchy meter rule.");
16991                         goto err_exit;
16992                 }
16993                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16994                                   color_rule, next_port);
16995         }
16996 exit:
16997         /**
16998          * Recursive call to iterate all meters in hierarchy and
16999          * create needed rules.
17000          */
17001         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17002                                                 src_port, item, error);
17003 err_exit:
17004         if (color_rule) {
17005                 if (color_rule->rule)
17006                         mlx5_flow_os_destroy_flow(color_rule->rule);
17007                 if (color_rule->matcher) {
17008                         struct mlx5_flow_tbl_data_entry *tbl =
17009                                 container_of(color_rule->matcher->tbl,
17010                                                 typeof(*tbl), tbl);
17011                         mlx5_list_unregister(tbl->matchers,
17012                                                 &color_rule->matcher->entry);
17013                 }
17014                 mlx5_free(color_rule);
17015         }
17016         if (next_fm)
17017                 mlx5_flow_meter_detach(priv, next_fm);
17018         return -rte_errno;
17019 }
17020
17021 /**
17022  * Destroy the sub policy table with RX queue.
17023  *
17024  * @param[in] dev
17025  *   Pointer to Ethernet device.
17026  * @param[in] mtr_policy
17027  *   Pointer to meter policy table.
17028  */
17029 static void
17030 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17031                                     struct mlx5_flow_meter_policy *mtr_policy)
17032 {
17033         struct mlx5_priv *priv = dev->data->dev_private;
17034         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17035         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17036         uint32_t i, j;
17037         uint16_t sub_policy_num, new_policy_num;
17038
17039         rte_spinlock_lock(&mtr_policy->sl);
17040         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17041                 switch (mtr_policy->act_cnt[i].fate_action) {
17042                 case MLX5_FLOW_FATE_SHARED_RSS:
17043                         sub_policy_num = (mtr_policy->sub_policy_num >>
17044                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17045                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17046                         new_policy_num = sub_policy_num;
17047                         for (j = 0; j < sub_policy_num; j++) {
17048                                 sub_policy =
17049                                         mtr_policy->sub_policys[domain][j];
17050                                 if (sub_policy) {
17051                                         __flow_dv_destroy_sub_policy_rules(dev,
17052                                                 sub_policy);
17053                                 if (sub_policy !=
17054                                         mtr_policy->sub_policys[domain][0]) {
17055                                         mtr_policy->sub_policys[domain][j] =
17056                                                                 NULL;
17057                                         mlx5_ipool_free
17058                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17059                                                 sub_policy->idx);
17060                                                 new_policy_num--;
17061                                         }
17062                                 }
17063                         }
17064                         if (new_policy_num != sub_policy_num) {
17065                                 mtr_policy->sub_policy_num &=
17066                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17067                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17068                                 mtr_policy->sub_policy_num |=
17069                                 (new_policy_num &
17070                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17071                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17072                         }
17073                         break;
17074                 case MLX5_FLOW_FATE_QUEUE:
17075                         sub_policy = mtr_policy->sub_policys[domain][0];
17076                         __flow_dv_destroy_sub_policy_rules(dev,
17077                                                            sub_policy);
17078                         break;
17079                 default:
17080                         /*Other actions without queue and do nothing*/
17081                         break;
17082                 }
17083         }
17084         rte_spinlock_unlock(&mtr_policy->sl);
17085 }
17086 /**
17087  * Check whether the DR drop action is supported on the root table or not.
17088  *
17089  * Create a simple flow with DR drop action on root table to validate
17090  * if DR drop action on root table is supported or not.
17091  *
17092  * @param[in] dev
17093  *   Pointer to rte_eth_dev structure.
17094  *
17095  * @return
17096  *   0 on success, a negative errno value otherwise and rte_errno is set.
17097  */
17098 int
17099 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17100 {
17101         struct mlx5_priv *priv = dev->data->dev_private;
17102         struct mlx5_dev_ctx_shared *sh = priv->sh;
17103         struct mlx5_flow_dv_match_params mask = {
17104                 .size = sizeof(mask.buf),
17105         };
17106         struct mlx5_flow_dv_match_params value = {
17107                 .size = sizeof(value.buf),
17108         };
17109         struct mlx5dv_flow_matcher_attr dv_attr = {
17110                 .type = IBV_FLOW_ATTR_NORMAL,
17111                 .priority = 0,
17112                 .match_criteria_enable = 0,
17113                 .match_mask = (void *)&mask,
17114         };
17115         struct mlx5_flow_tbl_resource *tbl = NULL;
17116         void *matcher = NULL;
17117         void *flow = NULL;
17118         int ret = -1;
17119
17120         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17121                                         0, 0, 0, NULL);
17122         if (!tbl)
17123                 goto err;
17124         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17125         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17126         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17127                                                tbl->obj, &matcher);
17128         if (ret)
17129                 goto err;
17130         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17131         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17132                                        &sh->dr_drop_action, &flow);
17133 err:
17134         /*
17135          * If DR drop action is not supported on root table, flow create will
17136          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17137          */
17138         if (!flow) {
17139                 if (matcher &&
17140                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17141                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17142                 else
17143                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17144                 ret = -1;
17145         } else {
17146                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17147         }
17148         if (matcher)
17149                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17150         if (tbl)
17151                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17152         return ret;
17153 }
17154
17155 /**
17156  * Validate the batch counter support in root table.
17157  *
17158  * Create a simple flow with invalid counter and drop action on root table to
17159  * validate if batch counter with offset on root table is supported or not.
17160  *
17161  * @param[in] dev
17162  *   Pointer to rte_eth_dev structure.
17163  *
17164  * @return
17165  *   0 on success, a negative errno value otherwise and rte_errno is set.
17166  */
17167 int
17168 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17169 {
17170         struct mlx5_priv *priv = dev->data->dev_private;
17171         struct mlx5_dev_ctx_shared *sh = priv->sh;
17172         struct mlx5_flow_dv_match_params mask = {
17173                 .size = sizeof(mask.buf),
17174         };
17175         struct mlx5_flow_dv_match_params value = {
17176                 .size = sizeof(value.buf),
17177         };
17178         struct mlx5dv_flow_matcher_attr dv_attr = {
17179                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17180                 .priority = 0,
17181                 .match_criteria_enable = 0,
17182                 .match_mask = (void *)&mask,
17183         };
17184         void *actions[2] = { 0 };
17185         struct mlx5_flow_tbl_resource *tbl = NULL;
17186         struct mlx5_devx_obj *dcs = NULL;
17187         void *matcher = NULL;
17188         void *flow = NULL;
17189         int ret = -1;
17190
17191         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17192                                         0, 0, 0, NULL);
17193         if (!tbl)
17194                 goto err;
17195         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17196         if (!dcs)
17197                 goto err;
17198         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17199                                                     &actions[0]);
17200         if (ret)
17201                 goto err;
17202         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17203         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17204         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17205                                                tbl->obj, &matcher);
17206         if (ret)
17207                 goto err;
17208         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17209         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17210                                        actions, &flow);
17211 err:
17212         /*
17213          * If batch counter with offset is not supported, the driver will not
17214          * validate the invalid offset value, flow create should success.
17215          * In this case, it means batch counter is not supported in root table.
17216          *
17217          * Otherwise, if flow create is failed, counter offset is supported.
17218          */
17219         if (flow) {
17220                 DRV_LOG(INFO, "Batch counter is not supported in root "
17221                               "table. Switch to fallback mode.");
17222                 rte_errno = ENOTSUP;
17223                 ret = -rte_errno;
17224                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17225         } else {
17226                 /* Check matcher to make sure validate fail at flow create. */
17227                 if (!matcher || (matcher && errno != EINVAL))
17228                         DRV_LOG(ERR, "Unexpected error in counter offset "
17229                                      "support detection");
17230                 ret = 0;
17231         }
17232         if (actions[0])
17233                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17234         if (matcher)
17235                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17236         if (tbl)
17237                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17238         if (dcs)
17239                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17240         return ret;
17241 }
17242
17243 /**
17244  * Query a devx counter.
17245  *
17246  * @param[in] dev
17247  *   Pointer to the Ethernet device structure.
17248  * @param[in] cnt
17249  *   Index to the flow counter.
17250  * @param[in] clear
17251  *   Set to clear the counter statistics.
17252  * @param[out] pkts
17253  *   The statistics value of packets.
17254  * @param[out] bytes
17255  *   The statistics value of bytes.
17256  *
17257  * @return
17258  *   0 on success, otherwise return -1.
17259  */
17260 static int
17261 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17262                       uint64_t *pkts, uint64_t *bytes)
17263 {
17264         struct mlx5_priv *priv = dev->data->dev_private;
17265         struct mlx5_flow_counter *cnt;
17266         uint64_t inn_pkts, inn_bytes;
17267         int ret;
17268
17269         if (!priv->sh->devx)
17270                 return -1;
17271
17272         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17273         if (ret)
17274                 return -1;
17275         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17276         *pkts = inn_pkts - cnt->hits;
17277         *bytes = inn_bytes - cnt->bytes;
17278         if (clear) {
17279                 cnt->hits = inn_pkts;
17280                 cnt->bytes = inn_bytes;
17281         }
17282         return 0;
17283 }
17284
17285 /**
17286  * Get aged-out flows.
17287  *
17288  * @param[in] dev
17289  *   Pointer to the Ethernet device structure.
17290  * @param[in] context
17291  *   The address of an array of pointers to the aged-out flows contexts.
17292  * @param[in] nb_contexts
17293  *   The length of context array pointers.
17294  * @param[out] error
17295  *   Perform verbose error reporting if not NULL. Initialized in case of
17296  *   error only.
17297  *
17298  * @return
17299  *   how many contexts get in success, otherwise negative errno value.
17300  *   if nb_contexts is 0, return the amount of all aged contexts.
17301  *   if nb_contexts is not 0 , return the amount of aged flows reported
17302  *   in the context array.
17303  * @note: only stub for now
17304  */
17305 static int
17306 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17307                     void **context,
17308                     uint32_t nb_contexts,
17309                     struct rte_flow_error *error)
17310 {
17311         struct mlx5_priv *priv = dev->data->dev_private;
17312         struct mlx5_age_info *age_info;
17313         struct mlx5_age_param *age_param;
17314         struct mlx5_flow_counter *counter;
17315         struct mlx5_aso_age_action *act;
17316         int nb_flows = 0;
17317
17318         if (nb_contexts && !context)
17319                 return rte_flow_error_set(error, EINVAL,
17320                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17321                                           NULL, "empty context");
17322         age_info = GET_PORT_AGE_INFO(priv);
17323         rte_spinlock_lock(&age_info->aged_sl);
17324         LIST_FOREACH(act, &age_info->aged_aso, next) {
17325                 nb_flows++;
17326                 if (nb_contexts) {
17327                         context[nb_flows - 1] =
17328                                                 act->age_params.context;
17329                         if (!(--nb_contexts))
17330                                 break;
17331                 }
17332         }
17333         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17334                 nb_flows++;
17335                 if (nb_contexts) {
17336                         age_param = MLX5_CNT_TO_AGE(counter);
17337                         context[nb_flows - 1] = age_param->context;
17338                         if (!(--nb_contexts))
17339                                 break;
17340                 }
17341         }
17342         rte_spinlock_unlock(&age_info->aged_sl);
17343         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17344         return nb_flows;
17345 }
17346
17347 /*
17348  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17349  */
17350 static uint32_t
17351 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17352 {
17353         return flow_dv_counter_alloc(dev, 0);
17354 }
17355
17356 /**
17357  * Validate indirect action.
17358  * Dispatcher for action type specific validation.
17359  *
17360  * @param[in] dev
17361  *   Pointer to the Ethernet device structure.
17362  * @param[in] conf
17363  *   Indirect action configuration.
17364  * @param[in] action
17365  *   The indirect action object to validate.
17366  * @param[out] error
17367  *   Perform verbose error reporting if not NULL. Initialized in case of
17368  *   error only.
17369  *
17370  * @return
17371  *   0 on success, otherwise negative errno value.
17372  */
17373 static int
17374 flow_dv_action_validate(struct rte_eth_dev *dev,
17375                         const struct rte_flow_indir_action_conf *conf,
17376                         const struct rte_flow_action *action,
17377                         struct rte_flow_error *err)
17378 {
17379         struct mlx5_priv *priv = dev->data->dev_private;
17380
17381         RTE_SET_USED(conf);
17382         switch (action->type) {
17383         case RTE_FLOW_ACTION_TYPE_RSS:
17384                 /*
17385                  * priv->obj_ops is set according to driver capabilities.
17386                  * When DevX capabilities are
17387                  * sufficient, it is set to devx_obj_ops.
17388                  * Otherwise, it is set to ibv_obj_ops.
17389                  * ibv_obj_ops doesn't support ind_table_modify operation.
17390                  * In this case the indirect RSS action can't be used.
17391                  */
17392                 if (priv->obj_ops.ind_table_modify == NULL)
17393                         return rte_flow_error_set
17394                                         (err, ENOTSUP,
17395                                          RTE_FLOW_ERROR_TYPE_ACTION,
17396                                          NULL,
17397                                          "Indirect RSS action not supported");
17398                 return mlx5_validate_action_rss(dev, action, err);
17399         case RTE_FLOW_ACTION_TYPE_AGE:
17400                 if (!priv->sh->aso_age_mng)
17401                         return rte_flow_error_set(err, ENOTSUP,
17402                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17403                                                 NULL,
17404                                                 "Indirect age action not supported");
17405                 return flow_dv_validate_action_age(0, action, dev, err);
17406         case RTE_FLOW_ACTION_TYPE_COUNT:
17407                 return flow_dv_validate_action_count(dev, true, 0, err);
17408         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17409                 if (!priv->sh->ct_aso_en)
17410                         return rte_flow_error_set(err, ENOTSUP,
17411                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17412                                         "ASO CT is not supported");
17413                 return mlx5_validate_action_ct(dev, action->conf, err);
17414         default:
17415                 return rte_flow_error_set(err, ENOTSUP,
17416                                           RTE_FLOW_ERROR_TYPE_ACTION,
17417                                           NULL,
17418                                           "action type not supported");
17419         }
17420 }
17421
17422 /*
17423  * Check if the RSS configurations for colors of a meter policy match
17424  * each other, except the queues.
17425  *
17426  * @param[in] r1
17427  *   Pointer to the first RSS flow action.
17428  * @param[in] r2
17429  *   Pointer to the second RSS flow action.
17430  *
17431  * @return
17432  *   0 on match, 1 on conflict.
17433  */
17434 static inline int
17435 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17436                                const struct rte_flow_action_rss *r2)
17437 {
17438         if (!r1 || !r2)
17439                 return 0;
17440         if (r1->func != r2->func || r1->level != r2->level ||
17441             r1->types != r2->types || r1->key_len != r2->key_len ||
17442             memcmp(r1->key, r2->key, r1->key_len))
17443                 return 1;
17444         return 0;
17445 }
17446
17447 /**
17448  * Validate the meter hierarchy chain for meter policy.
17449  *
17450  * @param[in] dev
17451  *   Pointer to the Ethernet device structure.
17452  * @param[in] meter_id
17453  *   Meter id.
17454  * @param[in] action_flags
17455  *   Holds the actions detected until now.
17456  * @param[out] is_rss
17457  *   Is RSS or not.
17458  * @param[out] hierarchy_domain
17459  *   The domain bitmap for hierarchy policy.
17460  * @param[out] error
17461  *   Perform verbose error reporting if not NULL. Initialized in case of
17462  *   error only.
17463  *
17464  * @return
17465  *   0 on success, otherwise negative errno value with error set.
17466  */
17467 static int
17468 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17469                                   uint32_t meter_id,
17470                                   uint64_t action_flags,
17471                                   bool *is_rss,
17472                                   uint8_t *hierarchy_domain,
17473                                   struct rte_mtr_error *error)
17474 {
17475         struct mlx5_priv *priv = dev->data->dev_private;
17476         struct mlx5_flow_meter_info *fm;
17477         struct mlx5_flow_meter_policy *policy;
17478         uint8_t cnt = 1;
17479
17480         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17481                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17482                 return -rte_mtr_error_set(error, EINVAL,
17483                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17484                                         NULL,
17485                                         "Multiple fate actions not supported.");
17486         *hierarchy_domain = 0;
17487         while (true) {
17488                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17489                 if (!fm)
17490                         return -rte_mtr_error_set(error, EINVAL,
17491                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17492                                         "Meter not found in meter hierarchy.");
17493                 if (fm->def_policy)
17494                         return -rte_mtr_error_set(error, EINVAL,
17495                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17496                         "Non termination meter not supported in hierarchy.");
17497                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17498                 MLX5_ASSERT(policy);
17499                 /**
17500                  * Only inherit the supported domains of the first meter in
17501                  * hierarchy.
17502                  * One meter supports at least one domain.
17503                  */
17504                 if (!*hierarchy_domain) {
17505                         if (policy->transfer)
17506                                 *hierarchy_domain |=
17507                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17508                         if (policy->ingress)
17509                                 *hierarchy_domain |=
17510                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17511                         if (policy->egress)
17512                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17513                 }
17514                 if (!policy->is_hierarchy) {
17515                         *is_rss = policy->is_rss;
17516                         break;
17517                 }
17518                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17519                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17520                         return -rte_mtr_error_set(error, EINVAL,
17521                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17522                                         "Exceed max hierarchy meter number.");
17523         }
17524         return 0;
17525 }
17526
17527 /**
17528  * Validate meter policy actions.
17529  * Dispatcher for action type specific validation.
17530  *
17531  * @param[in] dev
17532  *   Pointer to the Ethernet device structure.
17533  * @param[in] action
17534  *   The meter policy action object to validate.
17535  * @param[in] attr
17536  *   Attributes of flow to determine steering domain.
17537  * @param[out] error
17538  *   Perform verbose error reporting if not NULL. Initialized in case of
17539  *   error only.
17540  *
17541  * @return
17542  *   0 on success, otherwise negative errno value.
17543  */
17544 static int
17545 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17546                         const struct rte_flow_action *actions[RTE_COLORS],
17547                         struct rte_flow_attr *attr,
17548                         bool *is_rss,
17549                         uint8_t *domain_bitmap,
17550                         uint8_t *policy_mode,
17551                         struct rte_mtr_error *error)
17552 {
17553         struct mlx5_priv *priv = dev->data->dev_private;
17554         struct mlx5_dev_config *dev_conf = &priv->config;
17555         const struct rte_flow_action *act;
17556         uint64_t action_flags[RTE_COLORS] = {0};
17557         int actions_n;
17558         int i, ret;
17559         struct rte_flow_error flow_err;
17560         uint8_t domain_color[RTE_COLORS] = {0};
17561         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17562         uint8_t hierarchy_domain = 0;
17563         const struct rte_flow_action_meter *mtr;
17564         bool def_green = false;
17565         bool def_yellow = false;
17566         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17567
17568         if (!priv->config.dv_esw_en)
17569                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17570         *domain_bitmap = def_domain;
17571         /* Red color could only support DROP action. */
17572         if (!actions[RTE_COLOR_RED] ||
17573             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17574                 return -rte_mtr_error_set(error, ENOTSUP,
17575                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17576                                 NULL, "Red color only supports drop action.");
17577         /*
17578          * Check default policy actions:
17579          * Green / Yellow: no action, Red: drop action
17580          * Either G or Y will trigger default policy actions to be created.
17581          */
17582         if (!actions[RTE_COLOR_GREEN] ||
17583             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17584                 def_green = true;
17585         if (!actions[RTE_COLOR_YELLOW] ||
17586             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17587                 def_yellow = true;
17588         if (def_green && def_yellow) {
17589                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17590                 return 0;
17591         } else if (!def_green && def_yellow) {
17592                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17593         } else if (def_green && !def_yellow) {
17594                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17595         }
17596         /* Set to empty string in case of NULL pointer access by user. */
17597         flow_err.message = "";
17598         for (i = 0; i < RTE_COLORS; i++) {
17599                 act = actions[i];
17600                 for (action_flags[i] = 0, actions_n = 0;
17601                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17602                      act++) {
17603                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17604                                 return -rte_mtr_error_set(error, ENOTSUP,
17605                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17606                                           NULL, "too many actions");
17607                         switch (act->type) {
17608                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17609                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17610                                 if (!priv->config.dv_esw_en)
17611                                         return -rte_mtr_error_set(error,
17612                                         ENOTSUP,
17613                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17614                                         NULL, "PORT action validate check"
17615                                         " fail for ESW disable");
17616                                 ret = flow_dv_validate_action_port_id(dev,
17617                                                 action_flags[i],
17618                                                 act, attr, &flow_err);
17619                                 if (ret)
17620                                         return -rte_mtr_error_set(error,
17621                                         ENOTSUP,
17622                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17623                                         NULL, flow_err.message ?
17624                                         flow_err.message :
17625                                         "PORT action validate check fail");
17626                                 ++actions_n;
17627                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17628                                 break;
17629                         case RTE_FLOW_ACTION_TYPE_MARK:
17630                                 ret = flow_dv_validate_action_mark(dev, act,
17631                                                            action_flags[i],
17632                                                            attr, &flow_err);
17633                                 if (ret < 0)
17634                                         return -rte_mtr_error_set(error,
17635                                         ENOTSUP,
17636                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17637                                         NULL, flow_err.message ?
17638                                         flow_err.message :
17639                                         "Mark action validate check fail");
17640                                 if (dev_conf->dv_xmeta_en !=
17641                                         MLX5_XMETA_MODE_LEGACY)
17642                                         return -rte_mtr_error_set(error,
17643                                         ENOTSUP,
17644                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17645                                         NULL, "Extend MARK action is "
17646                                         "not supported. Please try use "
17647                                         "default policy for meter.");
17648                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17649                                 ++actions_n;
17650                                 break;
17651                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17652                                 ret = flow_dv_validate_action_set_tag(dev,
17653                                                         act, action_flags[i],
17654                                                         attr, &flow_err);
17655                                 if (ret)
17656                                         return -rte_mtr_error_set(error,
17657                                         ENOTSUP,
17658                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17659                                         NULL, flow_err.message ?
17660                                         flow_err.message :
17661                                         "Set tag action validate check fail");
17662                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17663                                 ++actions_n;
17664                                 break;
17665                         case RTE_FLOW_ACTION_TYPE_DROP:
17666                                 ret = mlx5_flow_validate_action_drop
17667                                         (action_flags[i], attr, &flow_err);
17668                                 if (ret < 0)
17669                                         return -rte_mtr_error_set(error,
17670                                         ENOTSUP,
17671                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17672                                         NULL, flow_err.message ?
17673                                         flow_err.message :
17674                                         "Drop action validate check fail");
17675                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17676                                 ++actions_n;
17677                                 break;
17678                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17679                                 /*
17680                                  * Check whether extensive
17681                                  * metadata feature is engaged.
17682                                  */
17683                                 if (dev_conf->dv_flow_en &&
17684                                     (dev_conf->dv_xmeta_en !=
17685                                      MLX5_XMETA_MODE_LEGACY) &&
17686                                     mlx5_flow_ext_mreg_supported(dev))
17687                                         return -rte_mtr_error_set(error,
17688                                           ENOTSUP,
17689                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17690                                           NULL, "Queue action with meta "
17691                                           "is not supported. Please try use "
17692                                           "default policy for meter.");
17693                                 ret = mlx5_flow_validate_action_queue(act,
17694                                                         action_flags[i], dev,
17695                                                         attr, &flow_err);
17696                                 if (ret < 0)
17697                                         return -rte_mtr_error_set(error,
17698                                           ENOTSUP,
17699                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17700                                           NULL, flow_err.message ?
17701                                           flow_err.message :
17702                                           "Queue action validate check fail");
17703                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17704                                 ++actions_n;
17705                                 break;
17706                         case RTE_FLOW_ACTION_TYPE_RSS:
17707                                 if (dev_conf->dv_flow_en &&
17708                                     (dev_conf->dv_xmeta_en !=
17709                                      MLX5_XMETA_MODE_LEGACY) &&
17710                                     mlx5_flow_ext_mreg_supported(dev))
17711                                         return -rte_mtr_error_set(error,
17712                                           ENOTSUP,
17713                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17714                                           NULL, "RSS action with meta "
17715                                           "is not supported. Please try use "
17716                                           "default policy for meter.");
17717                                 ret = mlx5_validate_action_rss(dev, act,
17718                                                                &flow_err);
17719                                 if (ret < 0)
17720                                         return -rte_mtr_error_set(error,
17721                                           ENOTSUP,
17722                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17723                                           NULL, flow_err.message ?
17724                                           flow_err.message :
17725                                           "RSS action validate check fail");
17726                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17727                                 ++actions_n;
17728                                 /* Either G or Y will set the RSS. */
17729                                 rss_color[i] = act->conf;
17730                                 break;
17731                         case RTE_FLOW_ACTION_TYPE_JUMP:
17732                                 ret = flow_dv_validate_action_jump(dev,
17733                                         NULL, act, action_flags[i],
17734                                         attr, true, &flow_err);
17735                                 if (ret)
17736                                         return -rte_mtr_error_set(error,
17737                                           ENOTSUP,
17738                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17739                                           NULL, flow_err.message ?
17740                                           flow_err.message :
17741                                           "Jump action validate check fail");
17742                                 ++actions_n;
17743                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17744                                 break;
17745                         /*
17746                          * Only the last meter in the hierarchy will support
17747                          * the YELLOW color steering. Then in the meter policy
17748                          * actions list, there should be no other meter inside.
17749                          */
17750                         case RTE_FLOW_ACTION_TYPE_METER:
17751                                 if (i != RTE_COLOR_GREEN)
17752                                         return -rte_mtr_error_set(error,
17753                                                 ENOTSUP,
17754                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17755                                                 NULL,
17756                                                 "Meter hierarchy only supports GREEN color.");
17757                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17758                                         return -rte_mtr_error_set(error,
17759                                                 ENOTSUP,
17760                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17761                                                 NULL,
17762                                                 "No yellow policy should be provided in meter hierarchy.");
17763                                 mtr = act->conf;
17764                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17765                                                         mtr->mtr_id,
17766                                                         action_flags[i],
17767                                                         is_rss,
17768                                                         &hierarchy_domain,
17769                                                         error);
17770                                 if (ret)
17771                                         return ret;
17772                                 ++actions_n;
17773                                 action_flags[i] |=
17774                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17775                                 break;
17776                         default:
17777                                 return -rte_mtr_error_set(error, ENOTSUP,
17778                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17779                                         NULL,
17780                                         "Doesn't support optional action");
17781                         }
17782                 }
17783                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
17784                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17785                 } else if ((action_flags[i] &
17786                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17787                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
17788                         /*
17789                          * Only support MLX5_XMETA_MODE_LEGACY
17790                          * so MARK action is only in ingress domain.
17791                          */
17792                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17793                 } else {
17794                         domain_color[i] = def_domain;
17795                         if (action_flags[i] &&
17796                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17797                                 domain_color[i] &=
17798                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17799                 }
17800                 if (action_flags[i] &
17801                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17802                         domain_color[i] &= hierarchy_domain;
17803                 /*
17804                  * Non-termination actions only support NIC Tx domain.
17805                  * The adjustion should be skipped when there is no
17806                  * action or only END is provided. The default domains
17807                  * bit-mask is set to find the MIN intersection.
17808                  * The action flags checking should also be skipped.
17809                  */
17810                 if ((def_green && i == RTE_COLOR_GREEN) ||
17811                     (def_yellow && i == RTE_COLOR_YELLOW))
17812                         continue;
17813                 /*
17814                  * Validate the drop action mutual exclusion
17815                  * with other actions. Drop action is mutually-exclusive
17816                  * with any other action, except for Count action.
17817                  */
17818                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
17819                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
17820                         return -rte_mtr_error_set(error, ENOTSUP,
17821                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17822                                 NULL, "Drop action is mutually-exclusive "
17823                                 "with any other action");
17824                 }
17825                 /* Eswitch has few restrictions on using items and actions */
17826                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17827                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17828                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
17829                                 return -rte_mtr_error_set(error, ENOTSUP,
17830                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17831                                         NULL, "unsupported action MARK");
17832                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
17833                                 return -rte_mtr_error_set(error, ENOTSUP,
17834                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17835                                         NULL, "unsupported action QUEUE");
17836                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
17837                                 return -rte_mtr_error_set(error, ENOTSUP,
17838                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17839                                         NULL, "unsupported action RSS");
17840                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17841                                 return -rte_mtr_error_set(error, ENOTSUP,
17842                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17843                                         NULL, "no fate action is found");
17844                 } else {
17845                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
17846                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17847                                 if ((domain_color[i] &
17848                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
17849                                         domain_color[i] =
17850                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
17851                                 else
17852                                         return -rte_mtr_error_set(error,
17853                                                 ENOTSUP,
17854                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17855                                                 NULL,
17856                                                 "no fate action is found");
17857                         }
17858                 }
17859         }
17860         /* If both colors have RSS, the attributes should be the same. */
17861         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
17862                                            rss_color[RTE_COLOR_YELLOW]))
17863                 return -rte_mtr_error_set(error, EINVAL,
17864                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17865                                           NULL, "policy RSS attr conflict");
17866         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
17867                 *is_rss = true;
17868         /* "domain_color[C]" is non-zero for each color, default is ALL. */
17869         if (!def_green && !def_yellow &&
17870             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
17871             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
17872             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
17873                 return -rte_mtr_error_set(error, EINVAL,
17874                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17875                                           NULL, "policy domains conflict");
17876         /*
17877          * At least one color policy is listed in the actions, the domains
17878          * to be supported should be the intersection.
17879          */
17880         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
17881                          domain_color[RTE_COLOR_YELLOW];
17882         return 0;
17883 }
17884
17885 static int
17886 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17887 {
17888         struct mlx5_priv *priv = dev->data->dev_private;
17889         int ret = 0;
17890
17891         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17892                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17893                                                 flags);
17894                 if (ret != 0)
17895                         return ret;
17896         }
17897         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17898                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17899                 if (ret != 0)
17900                         return ret;
17901         }
17902         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17903                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17904                 if (ret != 0)
17905                         return ret;
17906         }
17907         return 0;
17908 }
17909
17910 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17911         .validate = flow_dv_validate,
17912         .prepare = flow_dv_prepare,
17913         .translate = flow_dv_translate,
17914         .apply = flow_dv_apply,
17915         .remove = flow_dv_remove,
17916         .destroy = flow_dv_destroy,
17917         .query = flow_dv_query,
17918         .create_mtr_tbls = flow_dv_create_mtr_tbls,
17919         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17920         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17921         .create_meter = flow_dv_mtr_alloc,
17922         .free_meter = flow_dv_aso_mtr_release_to_pool,
17923         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17924         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17925         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17926         .create_policy_rules = flow_dv_create_policy_rules,
17927         .destroy_policy_rules = flow_dv_destroy_policy_rules,
17928         .create_def_policy = flow_dv_create_def_policy,
17929         .destroy_def_policy = flow_dv_destroy_def_policy,
17930         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17931         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17932         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17933         .counter_alloc = flow_dv_counter_allocate,
17934         .counter_free = flow_dv_counter_free,
17935         .counter_query = flow_dv_counter_query,
17936         .get_aged_flows = flow_dv_get_aged_flows,
17937         .action_validate = flow_dv_action_validate,
17938         .action_create = flow_dv_action_create,
17939         .action_destroy = flow_dv_action_destroy,
17940         .action_update = flow_dv_action_update,
17941         .action_query = flow_dv_action_query,
17942         .sync_domain = flow_dv_sync_domain,
17943 };
17944
17945 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
17946