23e5849783dd9a0333942a60909c539ca76aa5cf
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                      uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86
87 static int
88 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
89                                   uint32_t rix_jump);
90
91 /**
92  * Initialize flow attributes structure according to flow items' types.
93  *
94  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
95  * mode. For tunnel mode, the items to be modified are the outermost ones.
96  *
97  * @param[in] item
98  *   Pointer to item specification.
99  * @param[out] attr
100  *   Pointer to flow attributes structure.
101  * @param[in] dev_flow
102  *   Pointer to the sub flow.
103  * @param[in] tunnel_decap
104  *   Whether action is after tunnel decapsulation.
105  */
106 static void
107 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
108                   struct mlx5_flow *dev_flow, bool tunnel_decap)
109 {
110         uint64_t layers = dev_flow->handle->layers;
111
112         /*
113          * If layers is already initialized, it means this dev_flow is the
114          * suffix flow, the layers flags is set by the prefix flow. Need to
115          * use the layer flags from prefix flow as the suffix flow may not
116          * have the user defined items as the flow is split.
117          */
118         if (layers) {
119                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
120                         attr->ipv4 = 1;
121                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
122                         attr->ipv6 = 1;
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
124                         attr->tcp = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
126                         attr->udp = 1;
127                 attr->valid = 1;
128                 return;
129         }
130         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
131                 uint8_t next_protocol = 0xff;
132                 switch (item->type) {
133                 case RTE_FLOW_ITEM_TYPE_GRE:
134                 case RTE_FLOW_ITEM_TYPE_NVGRE:
135                 case RTE_FLOW_ITEM_TYPE_VXLAN:
136                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
137                 case RTE_FLOW_ITEM_TYPE_GENEVE:
138                 case RTE_FLOW_ITEM_TYPE_MPLS:
139                         if (tunnel_decap)
140                                 attr->attr = 0;
141                         break;
142                 case RTE_FLOW_ITEM_TYPE_IPV4:
143                         if (!attr->ipv6)
144                                 attr->ipv4 = 1;
145                         if (item->mask != NULL &&
146                             ((const struct rte_flow_item_ipv4 *)
147                             item->mask)->hdr.next_proto_id)
148                                 next_protocol =
149                                     ((const struct rte_flow_item_ipv4 *)
150                                       (item->spec))->hdr.next_proto_id &
151                                     ((const struct rte_flow_item_ipv4 *)
152                                       (item->mask))->hdr.next_proto_id;
153                         if ((next_protocol == IPPROTO_IPIP ||
154                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
155                                 attr->attr = 0;
156                         break;
157                 case RTE_FLOW_ITEM_TYPE_IPV6:
158                         if (!attr->ipv4)
159                                 attr->ipv6 = 1;
160                         if (item->mask != NULL &&
161                             ((const struct rte_flow_item_ipv6 *)
162                             item->mask)->hdr.proto)
163                                 next_protocol =
164                                     ((const struct rte_flow_item_ipv6 *)
165                                       (item->spec))->hdr.proto &
166                                     ((const struct rte_flow_item_ipv6 *)
167                                       (item->mask))->hdr.proto;
168                         if ((next_protocol == IPPROTO_IPIP ||
169                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
170                                 attr->attr = 0;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_UDP:
173                         if (!attr->tcp)
174                                 attr->udp = 1;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_TCP:
177                         if (!attr->udp)
178                                 attr->tcp = 1;
179                         break;
180                 default:
181                         break;
182                 }
183         }
184         attr->valid = 1;
185 }
186
187 /**
188  * Convert rte_mtr_color to mlx5 color.
189  *
190  * @param[in] rcol
191  *   rte_mtr_color.
192  *
193  * @return
194  *   mlx5 color.
195  */
196 static int
197 rte_col_2_mlx5_col(enum rte_color rcol)
198 {
199         switch (rcol) {
200         case RTE_COLOR_GREEN:
201                 return MLX5_FLOW_COLOR_GREEN;
202         case RTE_COLOR_YELLOW:
203                 return MLX5_FLOW_COLOR_YELLOW;
204         case RTE_COLOR_RED:
205                 return MLX5_FLOW_COLOR_RED;
206         default:
207                 break;
208         }
209         return MLX5_FLOW_COLOR_UNDEFINED;
210 }
211
212 struct field_modify_info {
213         uint32_t size; /* Size of field in protocol header, in bytes. */
214         uint32_t offset; /* Offset of field in protocol header, in bytes. */
215         enum mlx5_modification_field id;
216 };
217
218 struct field_modify_info modify_eth[] = {
219         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
220         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
221         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
222         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_vlan_out_first_vid[] = {
227         /* Size in bits !!! */
228         {12, 0, MLX5_MODI_OUT_FIRST_VID},
229         {0, 0, 0},
230 };
231
232 struct field_modify_info modify_ipv4[] = {
233         {1,  1, MLX5_MODI_OUT_IP_DSCP},
234         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
235         {4, 12, MLX5_MODI_OUT_SIPV4},
236         {4, 16, MLX5_MODI_OUT_DIPV4},
237         {0, 0, 0},
238 };
239
240 struct field_modify_info modify_ipv6[] = {
241         {1,  0, MLX5_MODI_OUT_IP_DSCP},
242         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
243         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
244         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
245         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
246         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
247         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
248         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
249         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
250         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_udp[] = {
255         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
256         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
257         {0, 0, 0},
258 };
259
260 struct field_modify_info modify_tcp[] = {
261         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
262         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
263         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
264         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
265         {0, 0, 0},
266 };
267
268 static void
269 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
270                           uint8_t next_protocol, uint64_t *item_flags,
271                           int *tunnel)
272 {
273         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
274                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
275         if (next_protocol == IPPROTO_IPIP) {
276                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
277                 *tunnel = 1;
278         }
279         if (next_protocol == IPPROTO_IPV6) {
280                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
281                 *tunnel = 1;
282         }
283 }
284
285 /* Update VLAN's VID/PCP based on input rte_flow_action.
286  *
287  * @param[in] action
288  *   Pointer to struct rte_flow_action.
289  * @param[out] vlan
290  *   Pointer to struct rte_vlan_hdr.
291  */
292 static void
293 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
294                          struct rte_vlan_hdr *vlan)
295 {
296         uint16_t vlan_tci;
297         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
298                 vlan_tci =
299                     ((const struct rte_flow_action_of_set_vlan_pcp *)
300                                                action->conf)->vlan_pcp;
301                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
302                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
303                 vlan->vlan_tci |= vlan_tci;
304         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
306                 vlan->vlan_tci |= rte_be_to_cpu_16
307                     (((const struct rte_flow_action_of_set_vlan_vid *)
308                                              action->conf)->vlan_vid);
309         }
310 }
311
312 /**
313  * Fetch 1, 2, 3 or 4 byte field from the byte array
314  * and return as unsigned integer in host-endian format.
315  *
316  * @param[in] data
317  *   Pointer to data array.
318  * @param[in] size
319  *   Size of field to extract.
320  *
321  * @return
322  *   converted field in host endian format.
323  */
324 static inline uint32_t
325 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
326 {
327         uint32_t ret;
328
329         switch (size) {
330         case 1:
331                 ret = *data;
332                 break;
333         case 2:
334                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
335                 break;
336         case 3:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 ret = (ret << 8) | *(data + sizeof(uint16_t));
339                 break;
340         case 4:
341                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
342                 break;
343         default:
344                 MLX5_ASSERT(false);
345                 ret = 0;
346                 break;
347         }
348         return ret;
349 }
350
351 /**
352  * Convert modify-header action to DV specification.
353  *
354  * Data length of each action is determined by provided field description
355  * and the item mask. Data bit offset and width of each action is determined
356  * by provided item mask.
357  *
358  * @param[in] item
359  *   Pointer to item specification.
360  * @param[in] field
361  *   Pointer to field modification information.
362  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
363  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
364  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
365  * @param[in] dcopy
366  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
367  *   Negative offset value sets the same offset as source offset.
368  *   size field is ignored, value is taken from source field.
369  * @param[in,out] resource
370  *   Pointer to the modify-header resource.
371  * @param[in] type
372  *   Type of modification.
373  * @param[out] error
374  *   Pointer to the error structure.
375  *
376  * @return
377  *   0 on success, a negative errno value otherwise and rte_errno is set.
378  */
379 static int
380 flow_dv_convert_modify_action(struct rte_flow_item *item,
381                               struct field_modify_info *field,
382                               struct field_modify_info *dcopy,
383                               struct mlx5_flow_dv_modify_hdr_resource *resource,
384                               uint32_t type, struct rte_flow_error *error)
385 {
386         uint32_t i = resource->actions_num;
387         struct mlx5_modification_cmd *actions = resource->actions;
388
389         /*
390          * The item and mask are provided in big-endian format.
391          * The fields should be presented as in big-endian format either.
392          * Mask must be always present, it defines the actual field width.
393          */
394         MLX5_ASSERT(item->mask);
395         MLX5_ASSERT(field->size);
396         do {
397                 unsigned int size_b;
398                 unsigned int off_b;
399                 uint32_t mask;
400                 uint32_t data;
401
402                 if (i >= MLX5_MAX_MODIFY_NUM)
403                         return rte_flow_error_set(error, EINVAL,
404                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
405                                  "too many items to modify");
406                 /* Fetch variable byte size mask from the array. */
407                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
408                                            field->offset, field->size);
409                 if (!mask) {
410                         ++field;
411                         continue;
412                 }
413                 /* Deduce actual data width in bits from mask value. */
414                 off_b = rte_bsf32(mask);
415                 size_b = sizeof(uint32_t) * CHAR_BIT -
416                          off_b - __builtin_clz(mask);
417                 MLX5_ASSERT(size_b);
418                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
419                 actions[i] = (struct mlx5_modification_cmd) {
420                         .action_type = type,
421                         .field = field->id,
422                         .offset = off_b,
423                         .length = size_b,
424                 };
425                 /* Convert entire record to expected big-endian format. */
426                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
427                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
428                         MLX5_ASSERT(dcopy);
429                         actions[i].dst_field = dcopy->id;
430                         actions[i].dst_offset =
431                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
432                         /* Convert entire record to big-endian format. */
433                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
434                         ++dcopy;
435                 } else {
436                         MLX5_ASSERT(item->spec);
437                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
438                                                    field->offset, field->size);
439                         /* Shift out the trailing masked bits from data. */
440                         data = (data & mask) >> off_b;
441                         actions[i].data1 = rte_cpu_to_be_32(data);
442                 }
443                 ++i;
444                 ++field;
445         } while (field->size);
446         if (resource->actions_num == i)
447                 return rte_flow_error_set(error, EINVAL,
448                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
449                                           "invalid modification flow item");
450         resource->actions_num = i;
451         return 0;
452 }
453
454 /**
455  * Convert modify-header set IPv4 address action to DV specification.
456  *
457  * @param[in,out] resource
458  *   Pointer to the modify-header resource.
459  * @param[in] action
460  *   Pointer to action specification.
461  * @param[out] error
462  *   Pointer to the error structure.
463  *
464  * @return
465  *   0 on success, a negative errno value otherwise and rte_errno is set.
466  */
467 static int
468 flow_dv_convert_action_modify_ipv4
469                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
470                          const struct rte_flow_action *action,
471                          struct rte_flow_error *error)
472 {
473         const struct rte_flow_action_set_ipv4 *conf =
474                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
475         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
476         struct rte_flow_item_ipv4 ipv4;
477         struct rte_flow_item_ipv4 ipv4_mask;
478
479         memset(&ipv4, 0, sizeof(ipv4));
480         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
481         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
482                 ipv4.hdr.src_addr = conf->ipv4_addr;
483                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
484         } else {
485                 ipv4.hdr.dst_addr = conf->ipv4_addr;
486                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
487         }
488         item.spec = &ipv4;
489         item.mask = &ipv4_mask;
490         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
491                                              MLX5_MODIFICATION_TYPE_SET, error);
492 }
493
494 /**
495  * Convert modify-header set IPv6 address action to DV specification.
496  *
497  * @param[in,out] resource
498  *   Pointer to the modify-header resource.
499  * @param[in] action
500  *   Pointer to action specification.
501  * @param[out] error
502  *   Pointer to the error structure.
503  *
504  * @return
505  *   0 on success, a negative errno value otherwise and rte_errno is set.
506  */
507 static int
508 flow_dv_convert_action_modify_ipv6
509                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
510                          const struct rte_flow_action *action,
511                          struct rte_flow_error *error)
512 {
513         const struct rte_flow_action_set_ipv6 *conf =
514                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
515         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
516         struct rte_flow_item_ipv6 ipv6;
517         struct rte_flow_item_ipv6 ipv6_mask;
518
519         memset(&ipv6, 0, sizeof(ipv6));
520         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
521         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
522                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
523                        sizeof(ipv6.hdr.src_addr));
524                 memcpy(&ipv6_mask.hdr.src_addr,
525                        &rte_flow_item_ipv6_mask.hdr.src_addr,
526                        sizeof(ipv6.hdr.src_addr));
527         } else {
528                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
529                        sizeof(ipv6.hdr.dst_addr));
530                 memcpy(&ipv6_mask.hdr.dst_addr,
531                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
532                        sizeof(ipv6.hdr.dst_addr));
533         }
534         item.spec = &ipv6;
535         item.mask = &ipv6_mask;
536         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
537                                              MLX5_MODIFICATION_TYPE_SET, error);
538 }
539
540 /**
541  * Convert modify-header set MAC address action to DV specification.
542  *
543  * @param[in,out] resource
544  *   Pointer to the modify-header resource.
545  * @param[in] action
546  *   Pointer to action specification.
547  * @param[out] error
548  *   Pointer to the error structure.
549  *
550  * @return
551  *   0 on success, a negative errno value otherwise and rte_errno is set.
552  */
553 static int
554 flow_dv_convert_action_modify_mac
555                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
556                          const struct rte_flow_action *action,
557                          struct rte_flow_error *error)
558 {
559         const struct rte_flow_action_set_mac *conf =
560                 (const struct rte_flow_action_set_mac *)(action->conf);
561         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
562         struct rte_flow_item_eth eth;
563         struct rte_flow_item_eth eth_mask;
564
565         memset(&eth, 0, sizeof(eth));
566         memset(&eth_mask, 0, sizeof(eth_mask));
567         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
568                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
569                        sizeof(eth.src.addr_bytes));
570                 memcpy(&eth_mask.src.addr_bytes,
571                        &rte_flow_item_eth_mask.src.addr_bytes,
572                        sizeof(eth_mask.src.addr_bytes));
573         } else {
574                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
575                        sizeof(eth.dst.addr_bytes));
576                 memcpy(&eth_mask.dst.addr_bytes,
577                        &rte_flow_item_eth_mask.dst.addr_bytes,
578                        sizeof(eth_mask.dst.addr_bytes));
579         }
580         item.spec = &eth;
581         item.mask = &eth_mask;
582         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
583                                              MLX5_MODIFICATION_TYPE_SET, error);
584 }
585
586 /**
587  * Convert modify-header set VLAN VID action to DV specification.
588  *
589  * @param[in,out] resource
590  *   Pointer to the modify-header resource.
591  * @param[in] action
592  *   Pointer to action specification.
593  * @param[out] error
594  *   Pointer to the error structure.
595  *
596  * @return
597  *   0 on success, a negative errno value otherwise and rte_errno is set.
598  */
599 static int
600 flow_dv_convert_action_modify_vlan_vid
601                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
602                          const struct rte_flow_action *action,
603                          struct rte_flow_error *error)
604 {
605         const struct rte_flow_action_of_set_vlan_vid *conf =
606                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
607         int i = resource->actions_num;
608         struct mlx5_modification_cmd *actions = resource->actions;
609         struct field_modify_info *field = modify_vlan_out_first_vid;
610
611         if (i >= MLX5_MAX_MODIFY_NUM)
612                 return rte_flow_error_set(error, EINVAL,
613                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
614                          "too many items to modify");
615         actions[i] = (struct mlx5_modification_cmd) {
616                 .action_type = MLX5_MODIFICATION_TYPE_SET,
617                 .field = field->id,
618                 .length = field->size,
619                 .offset = field->offset,
620         };
621         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
622         actions[i].data1 = conf->vlan_vid;
623         actions[i].data1 = actions[i].data1 << 16;
624         resource->actions_num = ++i;
625         return 0;
626 }
627
628 /**
629  * Convert modify-header set TP action to DV specification.
630  *
631  * @param[in,out] resource
632  *   Pointer to the modify-header resource.
633  * @param[in] action
634  *   Pointer to action specification.
635  * @param[in] items
636  *   Pointer to rte_flow_item objects list.
637  * @param[in] attr
638  *   Pointer to flow attributes structure.
639  * @param[in] dev_flow
640  *   Pointer to the sub flow.
641  * @param[in] tunnel_decap
642  *   Whether action is after tunnel decapsulation.
643  * @param[out] error
644  *   Pointer to the error structure.
645  *
646  * @return
647  *   0 on success, a negative errno value otherwise and rte_errno is set.
648  */
649 static int
650 flow_dv_convert_action_modify_tp
651                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
652                          const struct rte_flow_action *action,
653                          const struct rte_flow_item *items,
654                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
655                          bool tunnel_decap, struct rte_flow_error *error)
656 {
657         const struct rte_flow_action_set_tp *conf =
658                 (const struct rte_flow_action_set_tp *)(action->conf);
659         struct rte_flow_item item;
660         struct rte_flow_item_udp udp;
661         struct rte_flow_item_udp udp_mask;
662         struct rte_flow_item_tcp tcp;
663         struct rte_flow_item_tcp tcp_mask;
664         struct field_modify_info *field;
665
666         if (!attr->valid)
667                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
668         if (attr->udp) {
669                 memset(&udp, 0, sizeof(udp));
670                 memset(&udp_mask, 0, sizeof(udp_mask));
671                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
672                         udp.hdr.src_port = conf->port;
673                         udp_mask.hdr.src_port =
674                                         rte_flow_item_udp_mask.hdr.src_port;
675                 } else {
676                         udp.hdr.dst_port = conf->port;
677                         udp_mask.hdr.dst_port =
678                                         rte_flow_item_udp_mask.hdr.dst_port;
679                 }
680                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
681                 item.spec = &udp;
682                 item.mask = &udp_mask;
683                 field = modify_udp;
684         } else {
685                 MLX5_ASSERT(attr->tcp);
686                 memset(&tcp, 0, sizeof(tcp));
687                 memset(&tcp_mask, 0, sizeof(tcp_mask));
688                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
689                         tcp.hdr.src_port = conf->port;
690                         tcp_mask.hdr.src_port =
691                                         rte_flow_item_tcp_mask.hdr.src_port;
692                 } else {
693                         tcp.hdr.dst_port = conf->port;
694                         tcp_mask.hdr.dst_port =
695                                         rte_flow_item_tcp_mask.hdr.dst_port;
696                 }
697                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
698                 item.spec = &tcp;
699                 item.mask = &tcp_mask;
700                 field = modify_tcp;
701         }
702         return flow_dv_convert_modify_action(&item, field, NULL, resource,
703                                              MLX5_MODIFICATION_TYPE_SET, error);
704 }
705
706 /**
707  * Convert modify-header set TTL action to DV specification.
708  *
709  * @param[in,out] resource
710  *   Pointer to the modify-header resource.
711  * @param[in] action
712  *   Pointer to action specification.
713  * @param[in] items
714  *   Pointer to rte_flow_item objects list.
715  * @param[in] attr
716  *   Pointer to flow attributes structure.
717  * @param[in] dev_flow
718  *   Pointer to the sub flow.
719  * @param[in] tunnel_decap
720  *   Whether action is after tunnel decapsulation.
721  * @param[out] error
722  *   Pointer to the error structure.
723  *
724  * @return
725  *   0 on success, a negative errno value otherwise and rte_errno is set.
726  */
727 static int
728 flow_dv_convert_action_modify_ttl
729                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
730                          const struct rte_flow_action *action,
731                          const struct rte_flow_item *items,
732                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
733                          bool tunnel_decap, struct rte_flow_error *error)
734 {
735         const struct rte_flow_action_set_ttl *conf =
736                 (const struct rte_flow_action_set_ttl *)(action->conf);
737         struct rte_flow_item item;
738         struct rte_flow_item_ipv4 ipv4;
739         struct rte_flow_item_ipv4 ipv4_mask;
740         struct rte_flow_item_ipv6 ipv6;
741         struct rte_flow_item_ipv6 ipv6_mask;
742         struct field_modify_info *field;
743
744         if (!attr->valid)
745                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
746         if (attr->ipv4) {
747                 memset(&ipv4, 0, sizeof(ipv4));
748                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
749                 ipv4.hdr.time_to_live = conf->ttl_value;
750                 ipv4_mask.hdr.time_to_live = 0xFF;
751                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
752                 item.spec = &ipv4;
753                 item.mask = &ipv4_mask;
754                 field = modify_ipv4;
755         } else {
756                 MLX5_ASSERT(attr->ipv6);
757                 memset(&ipv6, 0, sizeof(ipv6));
758                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
759                 ipv6.hdr.hop_limits = conf->ttl_value;
760                 ipv6_mask.hdr.hop_limits = 0xFF;
761                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
762                 item.spec = &ipv6;
763                 item.mask = &ipv6_mask;
764                 field = modify_ipv6;
765         }
766         return flow_dv_convert_modify_action(&item, field, NULL, resource,
767                                              MLX5_MODIFICATION_TYPE_SET, error);
768 }
769
770 /**
771  * Convert modify-header decrement TTL action to DV specification.
772  *
773  * @param[in,out] resource
774  *   Pointer to the modify-header resource.
775  * @param[in] action
776  *   Pointer to action specification.
777  * @param[in] items
778  *   Pointer to rte_flow_item objects list.
779  * @param[in] attr
780  *   Pointer to flow attributes structure.
781  * @param[in] dev_flow
782  *   Pointer to the sub flow.
783  * @param[in] tunnel_decap
784  *   Whether action is after tunnel decapsulation.
785  * @param[out] error
786  *   Pointer to the error structure.
787  *
788  * @return
789  *   0 on success, a negative errno value otherwise and rte_errno is set.
790  */
791 static int
792 flow_dv_convert_action_modify_dec_ttl
793                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
794                          const struct rte_flow_item *items,
795                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
796                          bool tunnel_decap, struct rte_flow_error *error)
797 {
798         struct rte_flow_item item;
799         struct rte_flow_item_ipv4 ipv4;
800         struct rte_flow_item_ipv4 ipv4_mask;
801         struct rte_flow_item_ipv6 ipv6;
802         struct rte_flow_item_ipv6 ipv6_mask;
803         struct field_modify_info *field;
804
805         if (!attr->valid)
806                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
807         if (attr->ipv4) {
808                 memset(&ipv4, 0, sizeof(ipv4));
809                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
810                 ipv4.hdr.time_to_live = 0xFF;
811                 ipv4_mask.hdr.time_to_live = 0xFF;
812                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
813                 item.spec = &ipv4;
814                 item.mask = &ipv4_mask;
815                 field = modify_ipv4;
816         } else {
817                 MLX5_ASSERT(attr->ipv6);
818                 memset(&ipv6, 0, sizeof(ipv6));
819                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
820                 ipv6.hdr.hop_limits = 0xFF;
821                 ipv6_mask.hdr.hop_limits = 0xFF;
822                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
823                 item.spec = &ipv6;
824                 item.mask = &ipv6_mask;
825                 field = modify_ipv6;
826         }
827         return flow_dv_convert_modify_action(&item, field, NULL, resource,
828                                              MLX5_MODIFICATION_TYPE_ADD, error);
829 }
830
831 /**
832  * Convert modify-header increment/decrement TCP Sequence number
833  * to DV specification.
834  *
835  * @param[in,out] resource
836  *   Pointer to the modify-header resource.
837  * @param[in] action
838  *   Pointer to action specification.
839  * @param[out] error
840  *   Pointer to the error structure.
841  *
842  * @return
843  *   0 on success, a negative errno value otherwise and rte_errno is set.
844  */
845 static int
846 flow_dv_convert_action_modify_tcp_seq
847                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
848                          const struct rte_flow_action *action,
849                          struct rte_flow_error *error)
850 {
851         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
852         uint64_t value = rte_be_to_cpu_32(*conf);
853         struct rte_flow_item item;
854         struct rte_flow_item_tcp tcp;
855         struct rte_flow_item_tcp tcp_mask;
856
857         memset(&tcp, 0, sizeof(tcp));
858         memset(&tcp_mask, 0, sizeof(tcp_mask));
859         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
860                 /*
861                  * The HW has no decrement operation, only increment operation.
862                  * To simulate decrement X from Y using increment operation
863                  * we need to add UINT32_MAX X times to Y.
864                  * Each adding of UINT32_MAX decrements Y by 1.
865                  */
866                 value *= UINT32_MAX;
867         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
868         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
869         item.type = RTE_FLOW_ITEM_TYPE_TCP;
870         item.spec = &tcp;
871         item.mask = &tcp_mask;
872         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
873                                              MLX5_MODIFICATION_TYPE_ADD, error);
874 }
875
876 /**
877  * Convert modify-header increment/decrement TCP Acknowledgment number
878  * to DV specification.
879  *
880  * @param[in,out] resource
881  *   Pointer to the modify-header resource.
882  * @param[in] action
883  *   Pointer to action specification.
884  * @param[out] error
885  *   Pointer to the error structure.
886  *
887  * @return
888  *   0 on success, a negative errno value otherwise and rte_errno is set.
889  */
890 static int
891 flow_dv_convert_action_modify_tcp_ack
892                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
893                          const struct rte_flow_action *action,
894                          struct rte_flow_error *error)
895 {
896         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
897         uint64_t value = rte_be_to_cpu_32(*conf);
898         struct rte_flow_item item;
899         struct rte_flow_item_tcp tcp;
900         struct rte_flow_item_tcp tcp_mask;
901
902         memset(&tcp, 0, sizeof(tcp));
903         memset(&tcp_mask, 0, sizeof(tcp_mask));
904         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
905                 /*
906                  * The HW has no decrement operation, only increment operation.
907                  * To simulate decrement X from Y using increment operation
908                  * we need to add UINT32_MAX X times to Y.
909                  * Each adding of UINT32_MAX decrements Y by 1.
910                  */
911                 value *= UINT32_MAX;
912         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
913         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
914         item.type = RTE_FLOW_ITEM_TYPE_TCP;
915         item.spec = &tcp;
916         item.mask = &tcp_mask;
917         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
918                                              MLX5_MODIFICATION_TYPE_ADD, error);
919 }
920
921 static enum mlx5_modification_field reg_to_field[] = {
922         [REG_NON] = MLX5_MODI_OUT_NONE,
923         [REG_A] = MLX5_MODI_META_DATA_REG_A,
924         [REG_B] = MLX5_MODI_META_DATA_REG_B,
925         [REG_C_0] = MLX5_MODI_META_REG_C_0,
926         [REG_C_1] = MLX5_MODI_META_REG_C_1,
927         [REG_C_2] = MLX5_MODI_META_REG_C_2,
928         [REG_C_3] = MLX5_MODI_META_REG_C_3,
929         [REG_C_4] = MLX5_MODI_META_REG_C_4,
930         [REG_C_5] = MLX5_MODI_META_REG_C_5,
931         [REG_C_6] = MLX5_MODI_META_REG_C_6,
932         [REG_C_7] = MLX5_MODI_META_REG_C_7,
933 };
934
935 /**
936  * Convert register set to DV specification.
937  *
938  * @param[in,out] resource
939  *   Pointer to the modify-header resource.
940  * @param[in] action
941  *   Pointer to action specification.
942  * @param[out] error
943  *   Pointer to the error structure.
944  *
945  * @return
946  *   0 on success, a negative errno value otherwise and rte_errno is set.
947  */
948 static int
949 flow_dv_convert_action_set_reg
950                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
951                          const struct rte_flow_action *action,
952                          struct rte_flow_error *error)
953 {
954         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
955         struct mlx5_modification_cmd *actions = resource->actions;
956         uint32_t i = resource->actions_num;
957
958         if (i >= MLX5_MAX_MODIFY_NUM)
959                 return rte_flow_error_set(error, EINVAL,
960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
961                                           "too many items to modify");
962         MLX5_ASSERT(conf->id != REG_NON);
963         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
964         actions[i] = (struct mlx5_modification_cmd) {
965                 .action_type = MLX5_MODIFICATION_TYPE_SET,
966                 .field = reg_to_field[conf->id],
967         };
968         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
969         actions[i].data1 = rte_cpu_to_be_32(conf->data);
970         ++i;
971         resource->actions_num = i;
972         return 0;
973 }
974
975 /**
976  * Convert SET_TAG action to DV specification.
977  *
978  * @param[in] dev
979  *   Pointer to the rte_eth_dev structure.
980  * @param[in,out] resource
981  *   Pointer to the modify-header resource.
982  * @param[in] conf
983  *   Pointer to action specification.
984  * @param[out] error
985  *   Pointer to the error structure.
986  *
987  * @return
988  *   0 on success, a negative errno value otherwise and rte_errno is set.
989  */
990 static int
991 flow_dv_convert_action_set_tag
992                         (struct rte_eth_dev *dev,
993                          struct mlx5_flow_dv_modify_hdr_resource *resource,
994                          const struct rte_flow_action_set_tag *conf,
995                          struct rte_flow_error *error)
996 {
997         rte_be32_t data = rte_cpu_to_be_32(conf->data);
998         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
999         struct rte_flow_item item = {
1000                 .spec = &data,
1001                 .mask = &mask,
1002         };
1003         struct field_modify_info reg_c_x[] = {
1004                 [1] = {0, 0, 0},
1005         };
1006         enum mlx5_modification_field reg_type;
1007         int ret;
1008
1009         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1010         if (ret < 0)
1011                 return ret;
1012         MLX5_ASSERT(ret != REG_NON);
1013         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1014         reg_type = reg_to_field[ret];
1015         MLX5_ASSERT(reg_type > 0);
1016         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1017         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1018                                              MLX5_MODIFICATION_TYPE_SET, error);
1019 }
1020
1021 /**
1022  * Convert internal COPY_REG action to DV specification.
1023  *
1024  * @param[in] dev
1025  *   Pointer to the rte_eth_dev structure.
1026  * @param[in,out] res
1027  *   Pointer to the modify-header resource.
1028  * @param[in] action
1029  *   Pointer to action specification.
1030  * @param[out] error
1031  *   Pointer to the error structure.
1032  *
1033  * @return
1034  *   0 on success, a negative errno value otherwise and rte_errno is set.
1035  */
1036 static int
1037 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1038                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1039                                  const struct rte_flow_action *action,
1040                                  struct rte_flow_error *error)
1041 {
1042         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1043         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1044         struct rte_flow_item item = {
1045                 .spec = NULL,
1046                 .mask = &mask,
1047         };
1048         struct field_modify_info reg_src[] = {
1049                 {4, 0, reg_to_field[conf->src]},
1050                 {0, 0, 0},
1051         };
1052         struct field_modify_info reg_dst = {
1053                 .offset = 0,
1054                 .id = reg_to_field[conf->dst],
1055         };
1056         /* Adjust reg_c[0] usage according to reported mask. */
1057         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1058                 struct mlx5_priv *priv = dev->data->dev_private;
1059                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1060
1061                 MLX5_ASSERT(reg_c0);
1062                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1063                 if (conf->dst == REG_C_0) {
1064                         /* Copy to reg_c[0], within mask only. */
1065                         reg_dst.offset = rte_bsf32(reg_c0);
1066                         /*
1067                          * Mask is ignoring the enianness, because
1068                          * there is no conversion in datapath.
1069                          */
1070 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1071                         /* Copy from destination lower bits to reg_c[0]. */
1072                         mask = reg_c0 >> reg_dst.offset;
1073 #else
1074                         /* Copy from destination upper bits to reg_c[0]. */
1075                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1076                                           rte_fls_u32(reg_c0));
1077 #endif
1078                 } else {
1079                         mask = rte_cpu_to_be_32(reg_c0);
1080 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1081                         /* Copy from reg_c[0] to destination lower bits. */
1082                         reg_dst.offset = 0;
1083 #else
1084                         /* Copy from reg_c[0] to destination upper bits. */
1085                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1086                                          (rte_fls_u32(reg_c0) -
1087                                           rte_bsf32(reg_c0));
1088 #endif
1089                 }
1090         }
1091         return flow_dv_convert_modify_action(&item,
1092                                              reg_src, &reg_dst, res,
1093                                              MLX5_MODIFICATION_TYPE_COPY,
1094                                              error);
1095 }
1096
1097 /**
1098  * Convert MARK action to DV specification. This routine is used
1099  * in extensive metadata only and requires metadata register to be
1100  * handled. In legacy mode hardware tag resource is engaged.
1101  *
1102  * @param[in] dev
1103  *   Pointer to the rte_eth_dev structure.
1104  * @param[in] conf
1105  *   Pointer to MARK action specification.
1106  * @param[in,out] resource
1107  *   Pointer to the modify-header resource.
1108  * @param[out] error
1109  *   Pointer to the error structure.
1110  *
1111  * @return
1112  *   0 on success, a negative errno value otherwise and rte_errno is set.
1113  */
1114 static int
1115 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1116                             const struct rte_flow_action_mark *conf,
1117                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1118                             struct rte_flow_error *error)
1119 {
1120         struct mlx5_priv *priv = dev->data->dev_private;
1121         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1122                                            priv->sh->dv_mark_mask);
1123         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1124         struct rte_flow_item item = {
1125                 .spec = &data,
1126                 .mask = &mask,
1127         };
1128         struct field_modify_info reg_c_x[] = {
1129                 [1] = {0, 0, 0},
1130         };
1131         int reg;
1132
1133         if (!mask)
1134                 return rte_flow_error_set(error, EINVAL,
1135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1136                                           NULL, "zero mark action mask");
1137         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1138         if (reg < 0)
1139                 return reg;
1140         MLX5_ASSERT(reg > 0);
1141         if (reg == REG_C_0) {
1142                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1143                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1144
1145                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1146                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1147                 mask = rte_cpu_to_be_32(mask << shl_c0);
1148         }
1149         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1150         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1151                                              MLX5_MODIFICATION_TYPE_SET, error);
1152 }
1153
1154 /**
1155  * Get metadata register index for specified steering domain.
1156  *
1157  * @param[in] dev
1158  *   Pointer to the rte_eth_dev structure.
1159  * @param[in] attr
1160  *   Attributes of flow to determine steering domain.
1161  * @param[out] error
1162  *   Pointer to the error structure.
1163  *
1164  * @return
1165  *   positive index on success, a negative errno value otherwise
1166  *   and rte_errno is set.
1167  */
1168 static enum modify_reg
1169 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1170                          const struct rte_flow_attr *attr,
1171                          struct rte_flow_error *error)
1172 {
1173         int reg =
1174                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1175                                           MLX5_METADATA_FDB :
1176                                             attr->egress ?
1177                                             MLX5_METADATA_TX :
1178                                             MLX5_METADATA_RX, 0, error);
1179         if (reg < 0)
1180                 return rte_flow_error_set(error,
1181                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1182                                           NULL, "unavailable "
1183                                           "metadata register");
1184         return reg;
1185 }
1186
1187 /**
1188  * Convert SET_META action to DV specification.
1189  *
1190  * @param[in] dev
1191  *   Pointer to the rte_eth_dev structure.
1192  * @param[in,out] resource
1193  *   Pointer to the modify-header resource.
1194  * @param[in] attr
1195  *   Attributes of flow that includes this item.
1196  * @param[in] conf
1197  *   Pointer to action specification.
1198  * @param[out] error
1199  *   Pointer to the error structure.
1200  *
1201  * @return
1202  *   0 on success, a negative errno value otherwise and rte_errno is set.
1203  */
1204 static int
1205 flow_dv_convert_action_set_meta
1206                         (struct rte_eth_dev *dev,
1207                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1208                          const struct rte_flow_attr *attr,
1209                          const struct rte_flow_action_set_meta *conf,
1210                          struct rte_flow_error *error)
1211 {
1212         uint32_t data = conf->data;
1213         uint32_t mask = conf->mask;
1214         struct rte_flow_item item = {
1215                 .spec = &data,
1216                 .mask = &mask,
1217         };
1218         struct field_modify_info reg_c_x[] = {
1219                 [1] = {0, 0, 0},
1220         };
1221         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1222
1223         if (reg < 0)
1224                 return reg;
1225         MLX5_ASSERT(reg != REG_NON);
1226         /*
1227          * In datapath code there is no endianness
1228          * coversions for perfromance reasons, all
1229          * pattern conversions are done in rte_flow.
1230          */
1231         if (reg == REG_C_0) {
1232                 struct mlx5_priv *priv = dev->data->dev_private;
1233                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1234                 uint32_t shl_c0;
1235
1236                 MLX5_ASSERT(msk_c0);
1237 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1238                 shl_c0 = rte_bsf32(msk_c0);
1239 #else
1240                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1241 #endif
1242                 mask <<= shl_c0;
1243                 data <<= shl_c0;
1244                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1245         }
1246         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1247         /* The routine expects parameters in memory as big-endian ones. */
1248         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1249                                              MLX5_MODIFICATION_TYPE_SET, error);
1250 }
1251
1252 /**
1253  * Convert modify-header set IPv4 DSCP action to DV specification.
1254  *
1255  * @param[in,out] resource
1256  *   Pointer to the modify-header resource.
1257  * @param[in] action
1258  *   Pointer to action specification.
1259  * @param[out] error
1260  *   Pointer to the error structure.
1261  *
1262  * @return
1263  *   0 on success, a negative errno value otherwise and rte_errno is set.
1264  */
1265 static int
1266 flow_dv_convert_action_modify_ipv4_dscp
1267                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1268                          const struct rte_flow_action *action,
1269                          struct rte_flow_error *error)
1270 {
1271         const struct rte_flow_action_set_dscp *conf =
1272                 (const struct rte_flow_action_set_dscp *)(action->conf);
1273         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1274         struct rte_flow_item_ipv4 ipv4;
1275         struct rte_flow_item_ipv4 ipv4_mask;
1276
1277         memset(&ipv4, 0, sizeof(ipv4));
1278         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1279         ipv4.hdr.type_of_service = conf->dscp;
1280         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1281         item.spec = &ipv4;
1282         item.mask = &ipv4_mask;
1283         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1284                                              MLX5_MODIFICATION_TYPE_SET, error);
1285 }
1286
1287 /**
1288  * Convert modify-header set IPv6 DSCP action to DV specification.
1289  *
1290  * @param[in,out] resource
1291  *   Pointer to the modify-header resource.
1292  * @param[in] action
1293  *   Pointer to action specification.
1294  * @param[out] error
1295  *   Pointer to the error structure.
1296  *
1297  * @return
1298  *   0 on success, a negative errno value otherwise and rte_errno is set.
1299  */
1300 static int
1301 flow_dv_convert_action_modify_ipv6_dscp
1302                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1303                          const struct rte_flow_action *action,
1304                          struct rte_flow_error *error)
1305 {
1306         const struct rte_flow_action_set_dscp *conf =
1307                 (const struct rte_flow_action_set_dscp *)(action->conf);
1308         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1309         struct rte_flow_item_ipv6 ipv6;
1310         struct rte_flow_item_ipv6 ipv6_mask;
1311
1312         memset(&ipv6, 0, sizeof(ipv6));
1313         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1314         /*
1315          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1316          * rdma-core only accept the DSCP bits byte aligned start from
1317          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1318          * bits in IPv6 case as rdma-core requires byte aligned value.
1319          */
1320         ipv6.hdr.vtc_flow = conf->dscp;
1321         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1322         item.spec = &ipv6;
1323         item.mask = &ipv6_mask;
1324         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1325                                              MLX5_MODIFICATION_TYPE_SET, error);
1326 }
1327
1328 static void
1329 mlx5_flow_field_id_to_modify_info
1330                 (const struct rte_flow_action_modify_data *data,
1331                  struct field_modify_info *info,
1332                  uint32_t *mask, uint32_t *value, uint32_t width,
1333                  struct rte_eth_dev *dev,
1334                  const struct rte_flow_attr *attr,
1335                  struct rte_flow_error *error)
1336 {
1337         uint32_t idx = 0;
1338         switch (data->field) {
1339         case RTE_FLOW_FIELD_START:
1340                 /* not supported yet */
1341                 MLX5_ASSERT(false);
1342                 break;
1343         case RTE_FLOW_FIELD_MAC_DST:
1344                 if (mask) {
1345                         if (data->offset < 32) {
1346                                 info[idx] = (struct field_modify_info){4, 0,
1347                                                 MLX5_MODI_OUT_DMAC_47_16};
1348                                 mask[idx] = 0xffffffff;
1349                                 if (width < 32) {
1350                                         mask[idx] = mask[idx] << (32 - width);
1351                                         width = 0;
1352                                 } else {
1353                                         width -= 32;
1354                                 }
1355                                 if (!width)
1356                                         break;
1357                                 ++idx;
1358                         }
1359                         info[idx] = (struct field_modify_info){2, 4 * idx,
1360                                                 MLX5_MODI_OUT_DMAC_15_0};
1361                         mask[idx] = (width) ? 0x0000ffff : 0x0;
1362                         if (width < 16)
1363                                 mask[idx] = (mask[idx] << (16 - width)) &
1364                                                 0x0000ffff;
1365                 } else {
1366                         if (data->offset < 32)
1367                                 info[idx++] = (struct field_modify_info){4, 0,
1368                                                 MLX5_MODI_OUT_DMAC_47_16};
1369                         info[idx] = (struct field_modify_info){2, 0,
1370                                                 MLX5_MODI_OUT_DMAC_15_0};
1371                 }
1372                 break;
1373         case RTE_FLOW_FIELD_MAC_SRC:
1374                 if (mask) {
1375                         if (data->offset < 32) {
1376                                 info[idx] = (struct field_modify_info){4, 0,
1377                                                 MLX5_MODI_OUT_SMAC_47_16};
1378                                 mask[idx] = 0xffffffff;
1379                                 if (width < 32) {
1380                                         mask[idx] = mask[idx] << (32 - width);
1381                                         width = 0;
1382                                 } else {
1383                                         width -= 32;
1384                                 }
1385                                 if (!width)
1386                                         break;
1387                                 ++idx;
1388                         }
1389                         info[idx] = (struct field_modify_info){2, 4 * idx,
1390                                                 MLX5_MODI_OUT_SMAC_15_0};
1391                         mask[idx] = (width) ? 0x0000ffff : 0x0;
1392                         if (width < 16)
1393                                 mask[idx] = (mask[idx] << (16 - width)) &
1394                                                 0x0000ffff;
1395                 } else {
1396                         if (data->offset < 32)
1397                                 info[idx++] = (struct field_modify_info){4, 0,
1398                                                 MLX5_MODI_OUT_SMAC_47_16};
1399                         info[idx] = (struct field_modify_info){2, 0,
1400                                                 MLX5_MODI_OUT_SMAC_15_0};
1401                 }
1402                 break;
1403         case RTE_FLOW_FIELD_VLAN_TYPE:
1404                 /* not supported yet */
1405                 break;
1406         case RTE_FLOW_FIELD_VLAN_ID:
1407                 info[idx] = (struct field_modify_info){2, 0,
1408                                         MLX5_MODI_OUT_FIRST_VID};
1409                 if (mask) {
1410                         mask[idx] = 0x00000fff;
1411                         if (width < 12)
1412                                 mask[idx] = (mask[idx] << (12 - width)) &
1413                                                 0x00000fff;
1414                 }
1415                 break;
1416         case RTE_FLOW_FIELD_MAC_TYPE:
1417                 info[idx] = (struct field_modify_info){2, 0,
1418                                         MLX5_MODI_OUT_ETHERTYPE};
1419                 if (mask) {
1420                         mask[idx] = 0x0000ffff;
1421                         if (width < 16)
1422                                 mask[idx] = (mask[idx] << (16 - width)) &
1423                                                 0x0000ffff;
1424                 }
1425                 break;
1426         case RTE_FLOW_FIELD_IPV4_DSCP:
1427                 info[idx] = (struct field_modify_info){1, 0,
1428                                         MLX5_MODI_OUT_IP_DSCP};
1429                 if (mask) {
1430                         mask[idx] = 0x0000003f;
1431                         if (width < 6)
1432                                 mask[idx] = (mask[idx] << (6 - width)) &
1433                                                 0x0000003f;
1434                 }
1435                 break;
1436         case RTE_FLOW_FIELD_IPV4_TTL:
1437                 info[idx] = (struct field_modify_info){1, 0,
1438                                         MLX5_MODI_OUT_IPV4_TTL};
1439                 if (mask) {
1440                         mask[idx] = 0x000000ff;
1441                         if (width < 8)
1442                                 mask[idx] = (mask[idx] << (8 - width)) &
1443                                                 0x000000ff;
1444                 }
1445                 break;
1446         case RTE_FLOW_FIELD_IPV4_SRC:
1447                 info[idx] = (struct field_modify_info){4, 0,
1448                                         MLX5_MODI_OUT_SIPV4};
1449                 if (mask) {
1450                         mask[idx] = 0xffffffff;
1451                         if (width < 32)
1452                                 mask[idx] = mask[idx] << (32 - width);
1453                 }
1454                 break;
1455         case RTE_FLOW_FIELD_IPV4_DST:
1456                 info[idx] = (struct field_modify_info){4, 0,
1457                                         MLX5_MODI_OUT_DIPV4};
1458                 if (mask) {
1459                         mask[idx] = 0xffffffff;
1460                         if (width < 32)
1461                                 mask[idx] = mask[idx] << (32 - width);
1462                 }
1463                 break;
1464         case RTE_FLOW_FIELD_IPV6_DSCP:
1465                 info[idx] = (struct field_modify_info){1, 0,
1466                                         MLX5_MODI_OUT_IP_DSCP};
1467                 if (mask) {
1468                         mask[idx] = 0x0000003f;
1469                         if (width < 6)
1470                                 mask[idx] = (mask[idx] << (6 - width)) &
1471                                                 0x0000003f;
1472                 }
1473                 break;
1474         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1475                 info[idx] = (struct field_modify_info){1, 0,
1476                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1477                 if (mask) {
1478                         mask[idx] = 0x000000ff;
1479                         if (width < 8)
1480                                 mask[idx] = (mask[idx] << (8 - width)) &
1481                                                 0x000000ff;
1482                 }
1483                 break;
1484         case RTE_FLOW_FIELD_IPV6_SRC:
1485                 if (mask) {
1486                         if (data->offset < 32) {
1487                                 info[idx] = (struct field_modify_info){4, 0,
1488                                                 MLX5_MODI_OUT_SIPV6_127_96};
1489                                 mask[idx] = 0xffffffff;
1490                                 if (width < 32) {
1491                                         mask[idx] = mask[idx] << (32 - width);
1492                                         width = 0;
1493                                 } else {
1494                                         width -= 32;
1495                                 }
1496                                 if (!width)
1497                                         break;
1498                                 ++idx;
1499                         }
1500                         if (data->offset < 64) {
1501                                 info[idx] = (struct field_modify_info){4,
1502                                                 4 * idx,
1503                                                 MLX5_MODI_OUT_SIPV6_95_64};
1504                                 mask[idx] = 0xffffffff;
1505                                 if (width < 32) {
1506                                         mask[idx] = mask[idx] << (32 - width);
1507                                         width = 0;
1508                                 } else {
1509                                         width -= 32;
1510                                 }
1511                                 if (!width)
1512                                         break;
1513                                 ++idx;
1514                         }
1515                         if (data->offset < 96) {
1516                                 info[idx] = (struct field_modify_info){4,
1517                                                 8 * idx,
1518                                                 MLX5_MODI_OUT_SIPV6_63_32};
1519                                 mask[idx] = 0xffffffff;
1520                                 if (width < 32) {
1521                                         mask[idx] = mask[idx] << (32 - width);
1522                                         width = 0;
1523                                 } else {
1524                                         width -= 32;
1525                                 }
1526                                 if (!width)
1527                                         break;
1528                                 ++idx;
1529                         }
1530                         info[idx] = (struct field_modify_info){4, 12 * idx,
1531                                                 MLX5_MODI_OUT_SIPV6_31_0};
1532                         mask[idx] = 0xffffffff;
1533                         if (width < 32)
1534                                 mask[idx] = mask[idx] << (32 - width);
1535                 } else {
1536                         if (data->offset < 32)
1537                                 info[idx++] = (struct field_modify_info){4, 0,
1538                                                 MLX5_MODI_OUT_SIPV6_127_96};
1539                         if (data->offset < 64)
1540                                 info[idx++] = (struct field_modify_info){4, 0,
1541                                                 MLX5_MODI_OUT_SIPV6_95_64};
1542                         if (data->offset < 96)
1543                                 info[idx++] = (struct field_modify_info){4, 0,
1544                                                 MLX5_MODI_OUT_SIPV6_63_32};
1545                         if (data->offset < 128)
1546                                 info[idx++] = (struct field_modify_info){4, 0,
1547                                                 MLX5_MODI_OUT_SIPV6_31_0};
1548                 }
1549                 break;
1550         case RTE_FLOW_FIELD_IPV6_DST:
1551                 if (mask) {
1552                         if (data->offset < 32) {
1553                                 info[idx] = (struct field_modify_info){4, 0,
1554                                                 MLX5_MODI_OUT_DIPV6_127_96};
1555                                 mask[idx] = 0xffffffff;
1556                                 if (width < 32) {
1557                                         mask[idx] = mask[idx] << (32 - width);
1558                                         width = 0;
1559                                 } else {
1560                                         width -= 32;
1561                                 }
1562                                 if (!width)
1563                                         break;
1564                                 ++idx;
1565                         }
1566                         if (data->offset < 64) {
1567                                 info[idx] = (struct field_modify_info){4,
1568                                                 4 * idx,
1569                                                 MLX5_MODI_OUT_DIPV6_95_64};
1570                                 mask[idx] = 0xffffffff;
1571                                 if (width < 32) {
1572                                         mask[idx] = mask[idx] << (32 - width);
1573                                         width = 0;
1574                                 } else {
1575                                         width -= 32;
1576                                 }
1577                                 if (!width)
1578                                         break;
1579                                 ++idx;
1580                         }
1581                         if (data->offset < 96) {
1582                                 info[idx] = (struct field_modify_info){4,
1583                                                 8 * idx,
1584                                                 MLX5_MODI_OUT_DIPV6_63_32};
1585                                 mask[idx] = 0xffffffff;
1586                                 if (width < 32) {
1587                                         mask[idx] = mask[idx] << (32 - width);
1588                                         width = 0;
1589                                 } else {
1590                                         width -= 32;
1591                                 }
1592                                 if (!width)
1593                                         break;
1594                                 ++idx;
1595                         }
1596                         info[idx] = (struct field_modify_info){4, 12 * idx,
1597                                                 MLX5_MODI_OUT_DIPV6_31_0};
1598                         mask[idx] = 0xffffffff;
1599                         if (width < 32)
1600                                 mask[idx] = mask[idx] << (32 - width);
1601                 } else {
1602                         if (data->offset < 32)
1603                                 info[idx++] = (struct field_modify_info){4, 0,
1604                                                 MLX5_MODI_OUT_DIPV6_127_96};
1605                         if (data->offset < 64)
1606                                 info[idx++] = (struct field_modify_info){4, 0,
1607                                                 MLX5_MODI_OUT_DIPV6_95_64};
1608                         if (data->offset < 96)
1609                                 info[idx++] = (struct field_modify_info){4, 0,
1610                                                 MLX5_MODI_OUT_DIPV6_63_32};
1611                         if (data->offset < 128)
1612                                 info[idx++] = (struct field_modify_info){4, 0,
1613                                                 MLX5_MODI_OUT_DIPV6_31_0};
1614                 }
1615                 break;
1616         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1617                 info[idx] = (struct field_modify_info){2, 0,
1618                                         MLX5_MODI_OUT_TCP_SPORT};
1619                 if (mask) {
1620                         mask[idx] = 0x0000ffff;
1621                         if (width < 16)
1622                                 mask[idx] = (mask[idx] << (16 - width)) &
1623                                                 0x0000ffff;
1624                 }
1625                 break;
1626         case RTE_FLOW_FIELD_TCP_PORT_DST:
1627                 info[idx] = (struct field_modify_info){2, 0,
1628                                         MLX5_MODI_OUT_TCP_DPORT};
1629                 if (mask) {
1630                         mask[idx] = 0x0000ffff;
1631                         if (width < 16)
1632                                 mask[idx] = (mask[idx] << (16 - width)) &
1633                                                 0x0000ffff;
1634                 }
1635                 break;
1636         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1637                 info[idx] = (struct field_modify_info){4, 0,
1638                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1639                 if (mask) {
1640                         mask[idx] = 0xffffffff;
1641                         if (width < 32)
1642                                 mask[idx] = (mask[idx] << (32 - width));
1643                 }
1644                 break;
1645         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1646                 info[idx] = (struct field_modify_info){4, 0,
1647                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1648                 if (mask) {
1649                         mask[idx] = 0xffffffff;
1650                         if (width < 32)
1651                                 mask[idx] = (mask[idx] << (32 - width));
1652                 }
1653                 break;
1654         case RTE_FLOW_FIELD_TCP_FLAGS:
1655                 info[idx] = (struct field_modify_info){1, 0,
1656                                         MLX5_MODI_OUT_TCP_FLAGS};
1657                 if (mask) {
1658                         mask[idx] = 0x0000003f;
1659                         if (width < 6)
1660                                 mask[idx] = (mask[idx] << (6 - width)) &
1661                                                 0x0000003f;
1662                 }
1663                 break;
1664         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1665                 info[idx] = (struct field_modify_info){2, 0,
1666                                         MLX5_MODI_OUT_UDP_SPORT};
1667                 if (mask) {
1668                         mask[idx] = 0x0000ffff;
1669                         if (width < 16)
1670                                 mask[idx] = (mask[idx] << (16 - width)) &
1671                                                 0x0000ffff;
1672                 }
1673                 break;
1674         case RTE_FLOW_FIELD_UDP_PORT_DST:
1675                 info[idx] = (struct field_modify_info){2, 0,
1676                                         MLX5_MODI_OUT_UDP_DPORT};
1677                 if (mask) {
1678                         mask[idx] = 0x0000ffff;
1679                         if (width < 16)
1680                                 mask[idx] = (mask[idx] << (16 - width)) &
1681                                                 0x0000ffff;
1682                 }
1683                 break;
1684         case RTE_FLOW_FIELD_VXLAN_VNI:
1685                 /* not supported yet */
1686                 break;
1687         case RTE_FLOW_FIELD_GENEVE_VNI:
1688                 /* not supported yet*/
1689                 break;
1690         case RTE_FLOW_FIELD_GTP_TEID:
1691                 info[idx] = (struct field_modify_info){4, 0,
1692                                         MLX5_MODI_GTP_TEID};
1693                 if (mask) {
1694                         mask[idx] = 0xffffffff;
1695                         if (width < 32)
1696                                 mask[idx] = mask[idx] << (32 - width);
1697                 }
1698                 break;
1699         case RTE_FLOW_FIELD_TAG:
1700                 {
1701                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1702                                                    data->level, error);
1703                         if (reg < 0)
1704                                 return;
1705                         MLX5_ASSERT(reg != REG_NON);
1706                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1707                         info[idx] = (struct field_modify_info){4, 0,
1708                                                 reg_to_field[reg]};
1709                         if (mask) {
1710                                 mask[idx] = 0xffffffff;
1711                                 if (width < 32)
1712                                         mask[idx] = mask[idx] << (32 - width);
1713                         }
1714                 }
1715                 break;
1716         case RTE_FLOW_FIELD_MARK:
1717                 {
1718                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1719                                                        0, error);
1720                         if (reg < 0)
1721                                 return;
1722                         MLX5_ASSERT(reg != REG_NON);
1723                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1724                         info[idx] = (struct field_modify_info){4, 0,
1725                                                 reg_to_field[reg]};
1726                         if (mask) {
1727                                 mask[idx] = 0xffffffff;
1728                                 if (width < 32)
1729                                         mask[idx] = mask[idx] << (32 - width);
1730                         }
1731                 }
1732                 break;
1733         case RTE_FLOW_FIELD_META:
1734                 {
1735                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1736                         if (reg < 0)
1737                                 return;
1738                         MLX5_ASSERT(reg != REG_NON);
1739                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1740                         info[idx] = (struct field_modify_info){4, 0,
1741                                                 reg_to_field[reg]};
1742                         if (mask) {
1743                                 mask[idx] = 0xffffffff;
1744                                 if (width < 32)
1745                                         mask[idx] = mask[idx] << (32 - width);
1746                         }
1747                 }
1748                 break;
1749         case RTE_FLOW_FIELD_POINTER:
1750                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1751                         if (mask[idx]) {
1752                                 memcpy(&value[idx],
1753                                         (void *)(uintptr_t)data->value, 32);
1754                                 value[idx] = RTE_BE32(value[idx]);
1755                                 break;
1756                         }
1757                 }
1758                 break;
1759         case RTE_FLOW_FIELD_VALUE:
1760                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1761                         if (mask[idx]) {
1762                                 value[idx] = RTE_BE32((uint32_t)data->value);
1763                                 break;
1764                         }
1765                 }
1766                 break;
1767         default:
1768                 MLX5_ASSERT(false);
1769                 break;
1770         }
1771 }
1772
1773 /**
1774  * Convert modify_field action to DV specification.
1775  *
1776  * @param[in] dev
1777  *   Pointer to the rte_eth_dev structure.
1778  * @param[in,out] resource
1779  *   Pointer to the modify-header resource.
1780  * @param[in] action
1781  *   Pointer to action specification.
1782  * @param[in] attr
1783  *   Attributes of flow that includes this item.
1784  * @param[out] error
1785  *   Pointer to the error structure.
1786  *
1787  * @return
1788  *   0 on success, a negative errno value otherwise and rte_errno is set.
1789  */
1790 static int
1791 flow_dv_convert_action_modify_field
1792                         (struct rte_eth_dev *dev,
1793                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1794                          const struct rte_flow_action *action,
1795                          const struct rte_flow_attr *attr,
1796                          struct rte_flow_error *error)
1797 {
1798         const struct rte_flow_action_modify_field *conf =
1799                 (const struct rte_flow_action_modify_field *)(action->conf);
1800         struct rte_flow_item item;
1801         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1802                                                                 {0, 0, 0} };
1803         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1804                                                                 {0, 0, 0} };
1805         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1806         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1807         uint32_t type;
1808
1809         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1810                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1811                 type = MLX5_MODIFICATION_TYPE_SET;
1812                 /** For SET fill the destination field (field) first. */
1813                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1814                                           value, conf->width, dev, attr, error);
1815                 /** Then copy immediate value from source as per mask. */
1816                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1817                                           value, conf->width, dev, attr, error);
1818                 item.spec = &value;
1819         } else {
1820                 type = MLX5_MODIFICATION_TYPE_COPY;
1821                 /** For COPY fill the destination field (dcopy) without mask. */
1822                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1823                                           value, conf->width, dev, attr, error);
1824                 /** Then construct the source field (field) with mask. */
1825                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1826                                           value, conf->width, dev, attr, error);
1827         }
1828         item.mask = &mask;
1829         return flow_dv_convert_modify_action(&item,
1830                         field, dcopy, resource, type, error);
1831 }
1832
1833 /**
1834  * Validate MARK item.
1835  *
1836  * @param[in] dev
1837  *   Pointer to the rte_eth_dev structure.
1838  * @param[in] item
1839  *   Item specification.
1840  * @param[in] attr
1841  *   Attributes of flow that includes this item.
1842  * @param[out] error
1843  *   Pointer to error structure.
1844  *
1845  * @return
1846  *   0 on success, a negative errno value otherwise and rte_errno is set.
1847  */
1848 static int
1849 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1850                            const struct rte_flow_item *item,
1851                            const struct rte_flow_attr *attr __rte_unused,
1852                            struct rte_flow_error *error)
1853 {
1854         struct mlx5_priv *priv = dev->data->dev_private;
1855         struct mlx5_dev_config *config = &priv->config;
1856         const struct rte_flow_item_mark *spec = item->spec;
1857         const struct rte_flow_item_mark *mask = item->mask;
1858         const struct rte_flow_item_mark nic_mask = {
1859                 .id = priv->sh->dv_mark_mask,
1860         };
1861         int ret;
1862
1863         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1866                                           "extended metadata feature"
1867                                           " isn't enabled");
1868         if (!mlx5_flow_ext_mreg_supported(dev))
1869                 return rte_flow_error_set(error, ENOTSUP,
1870                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1871                                           "extended metadata register"
1872                                           " isn't supported");
1873         if (!nic_mask.id)
1874                 return rte_flow_error_set(error, ENOTSUP,
1875                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1876                                           "extended metadata register"
1877                                           " isn't available");
1878         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1879         if (ret < 0)
1880                 return ret;
1881         if (!spec)
1882                 return rte_flow_error_set(error, EINVAL,
1883                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1884                                           item->spec,
1885                                           "data cannot be empty");
1886         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1887                 return rte_flow_error_set(error, EINVAL,
1888                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1889                                           &spec->id,
1890                                           "mark id exceeds the limit");
1891         if (!mask)
1892                 mask = &nic_mask;
1893         if (!mask->id)
1894                 return rte_flow_error_set(error, EINVAL,
1895                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1896                                         "mask cannot be zero");
1897
1898         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1899                                         (const uint8_t *)&nic_mask,
1900                                         sizeof(struct rte_flow_item_mark),
1901                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1902         if (ret < 0)
1903                 return ret;
1904         return 0;
1905 }
1906
1907 /**
1908  * Validate META item.
1909  *
1910  * @param[in] dev
1911  *   Pointer to the rte_eth_dev structure.
1912  * @param[in] item
1913  *   Item specification.
1914  * @param[in] attr
1915  *   Attributes of flow that includes this item.
1916  * @param[out] error
1917  *   Pointer to error structure.
1918  *
1919  * @return
1920  *   0 on success, a negative errno value otherwise and rte_errno is set.
1921  */
1922 static int
1923 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1924                            const struct rte_flow_item *item,
1925                            const struct rte_flow_attr *attr,
1926                            struct rte_flow_error *error)
1927 {
1928         struct mlx5_priv *priv = dev->data->dev_private;
1929         struct mlx5_dev_config *config = &priv->config;
1930         const struct rte_flow_item_meta *spec = item->spec;
1931         const struct rte_flow_item_meta *mask = item->mask;
1932         struct rte_flow_item_meta nic_mask = {
1933                 .data = UINT32_MAX
1934         };
1935         int reg;
1936         int ret;
1937
1938         if (!spec)
1939                 return rte_flow_error_set(error, EINVAL,
1940                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1941                                           item->spec,
1942                                           "data cannot be empty");
1943         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1944                 if (!mlx5_flow_ext_mreg_supported(dev))
1945                         return rte_flow_error_set(error, ENOTSUP,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "extended metadata register"
1948                                           " isn't supported");
1949                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1950                 if (reg < 0)
1951                         return reg;
1952                 if (reg == REG_NON)
1953                         return rte_flow_error_set(error, ENOTSUP,
1954                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1955                                         "unavalable extended metadata register");
1956                 if (reg == REG_B)
1957                         return rte_flow_error_set(error, ENOTSUP,
1958                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1959                                           "match on reg_b "
1960                                           "isn't supported");
1961                 if (reg != REG_A)
1962                         nic_mask.data = priv->sh->dv_meta_mask;
1963         } else {
1964                 if (attr->transfer)
1965                         return rte_flow_error_set(error, ENOTSUP,
1966                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1967                                         "extended metadata feature "
1968                                         "should be enabled when "
1969                                         "meta item is requested "
1970                                         "with e-switch mode ");
1971                 if (attr->ingress)
1972                         return rte_flow_error_set(error, ENOTSUP,
1973                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1974                                         "match on metadata for ingress "
1975                                         "is not supported in legacy "
1976                                         "metadata mode");
1977         }
1978         if (!mask)
1979                 mask = &rte_flow_item_meta_mask;
1980         if (!mask->data)
1981                 return rte_flow_error_set(error, EINVAL,
1982                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1983                                         "mask cannot be zero");
1984
1985         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1986                                         (const uint8_t *)&nic_mask,
1987                                         sizeof(struct rte_flow_item_meta),
1988                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1989         return ret;
1990 }
1991
1992 /**
1993  * Validate TAG item.
1994  *
1995  * @param[in] dev
1996  *   Pointer to the rte_eth_dev structure.
1997  * @param[in] item
1998  *   Item specification.
1999  * @param[in] attr
2000  *   Attributes of flow that includes this item.
2001  * @param[out] error
2002  *   Pointer to error structure.
2003  *
2004  * @return
2005  *   0 on success, a negative errno value otherwise and rte_errno is set.
2006  */
2007 static int
2008 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2009                           const struct rte_flow_item *item,
2010                           const struct rte_flow_attr *attr __rte_unused,
2011                           struct rte_flow_error *error)
2012 {
2013         const struct rte_flow_item_tag *spec = item->spec;
2014         const struct rte_flow_item_tag *mask = item->mask;
2015         const struct rte_flow_item_tag nic_mask = {
2016                 .data = RTE_BE32(UINT32_MAX),
2017                 .index = 0xff,
2018         };
2019         int ret;
2020
2021         if (!mlx5_flow_ext_mreg_supported(dev))
2022                 return rte_flow_error_set(error, ENOTSUP,
2023                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2024                                           "extensive metadata register"
2025                                           " isn't supported");
2026         if (!spec)
2027                 return rte_flow_error_set(error, EINVAL,
2028                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2029                                           item->spec,
2030                                           "data cannot be empty");
2031         if (!mask)
2032                 mask = &rte_flow_item_tag_mask;
2033         if (!mask->data)
2034                 return rte_flow_error_set(error, EINVAL,
2035                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2036                                         "mask cannot be zero");
2037
2038         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2039                                         (const uint8_t *)&nic_mask,
2040                                         sizeof(struct rte_flow_item_tag),
2041                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2042         if (ret < 0)
2043                 return ret;
2044         if (mask->index != 0xff)
2045                 return rte_flow_error_set(error, EINVAL,
2046                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2047                                           "partial mask for tag index"
2048                                           " is not supported");
2049         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2050         if (ret < 0)
2051                 return ret;
2052         MLX5_ASSERT(ret != REG_NON);
2053         return 0;
2054 }
2055
2056 /**
2057  * Validate vport item.
2058  *
2059  * @param[in] dev
2060  *   Pointer to the rte_eth_dev structure.
2061  * @param[in] item
2062  *   Item specification.
2063  * @param[in] attr
2064  *   Attributes of flow that includes this item.
2065  * @param[in] item_flags
2066  *   Bit-fields that holds the items detected until now.
2067  * @param[out] error
2068  *   Pointer to error structure.
2069  *
2070  * @return
2071  *   0 on success, a negative errno value otherwise and rte_errno is set.
2072  */
2073 static int
2074 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2075                               const struct rte_flow_item *item,
2076                               const struct rte_flow_attr *attr,
2077                               uint64_t item_flags,
2078                               struct rte_flow_error *error)
2079 {
2080         const struct rte_flow_item_port_id *spec = item->spec;
2081         const struct rte_flow_item_port_id *mask = item->mask;
2082         const struct rte_flow_item_port_id switch_mask = {
2083                         .id = 0xffffffff,
2084         };
2085         struct mlx5_priv *esw_priv;
2086         struct mlx5_priv *dev_priv;
2087         int ret;
2088
2089         if (!attr->transfer)
2090                 return rte_flow_error_set(error, EINVAL,
2091                                           RTE_FLOW_ERROR_TYPE_ITEM,
2092                                           NULL,
2093                                           "match on port id is valid only"
2094                                           " when transfer flag is enabled");
2095         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2096                 return rte_flow_error_set(error, ENOTSUP,
2097                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2098                                           "multiple source ports are not"
2099                                           " supported");
2100         if (!mask)
2101                 mask = &switch_mask;
2102         if (mask->id != 0xffffffff)
2103                 return rte_flow_error_set(error, ENOTSUP,
2104                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2105                                            mask,
2106                                            "no support for partial mask on"
2107                                            " \"id\" field");
2108         ret = mlx5_flow_item_acceptable
2109                                 (item, (const uint8_t *)mask,
2110                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2111                                  sizeof(struct rte_flow_item_port_id),
2112                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2113         if (ret)
2114                 return ret;
2115         if (!spec)
2116                 return 0;
2117         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2118         if (!esw_priv)
2119                 return rte_flow_error_set(error, rte_errno,
2120                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2121                                           "failed to obtain E-Switch info for"
2122                                           " port");
2123         dev_priv = mlx5_dev_to_eswitch_info(dev);
2124         if (!dev_priv)
2125                 return rte_flow_error_set(error, rte_errno,
2126                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2127                                           NULL,
2128                                           "failed to obtain E-Switch info");
2129         if (esw_priv->domain_id != dev_priv->domain_id)
2130                 return rte_flow_error_set(error, EINVAL,
2131                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2132                                           "cannot match on a port from a"
2133                                           " different E-Switch");
2134         return 0;
2135 }
2136
2137 /**
2138  * Validate VLAN item.
2139  *
2140  * @param[in] item
2141  *   Item specification.
2142  * @param[in] item_flags
2143  *   Bit-fields that holds the items detected until now.
2144  * @param[in] dev
2145  *   Ethernet device flow is being created on.
2146  * @param[out] error
2147  *   Pointer to error structure.
2148  *
2149  * @return
2150  *   0 on success, a negative errno value otherwise and rte_errno is set.
2151  */
2152 static int
2153 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2154                            uint64_t item_flags,
2155                            struct rte_eth_dev *dev,
2156                            struct rte_flow_error *error)
2157 {
2158         const struct rte_flow_item_vlan *mask = item->mask;
2159         const struct rte_flow_item_vlan nic_mask = {
2160                 .tci = RTE_BE16(UINT16_MAX),
2161                 .inner_type = RTE_BE16(UINT16_MAX),
2162                 .has_more_vlan = 1,
2163         };
2164         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2165         int ret;
2166         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2167                                         MLX5_FLOW_LAYER_INNER_L4) :
2168                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2169                                         MLX5_FLOW_LAYER_OUTER_L4);
2170         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2171                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2172
2173         if (item_flags & vlanm)
2174                 return rte_flow_error_set(error, EINVAL,
2175                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2176                                           "multiple VLAN layers not supported");
2177         else if ((item_flags & l34m) != 0)
2178                 return rte_flow_error_set(error, EINVAL,
2179                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2180                                           "VLAN cannot follow L3/L4 layer");
2181         if (!mask)
2182                 mask = &rte_flow_item_vlan_mask;
2183         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2184                                         (const uint8_t *)&nic_mask,
2185                                         sizeof(struct rte_flow_item_vlan),
2186                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2187         if (ret)
2188                 return ret;
2189         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2190                 struct mlx5_priv *priv = dev->data->dev_private;
2191
2192                 if (priv->vmwa_context) {
2193                         /*
2194                          * Non-NULL context means we have a virtual machine
2195                          * and SR-IOV enabled, we have to create VLAN interface
2196                          * to make hypervisor to setup E-Switch vport
2197                          * context correctly. We avoid creating the multiple
2198                          * VLAN interfaces, so we cannot support VLAN tag mask.
2199                          */
2200                         return rte_flow_error_set(error, EINVAL,
2201                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2202                                                   item,
2203                                                   "VLAN tag mask is not"
2204                                                   " supported in virtual"
2205                                                   " environment");
2206                 }
2207         }
2208         return 0;
2209 }
2210
2211 /*
2212  * GTP flags are contained in 1 byte of the format:
2213  * -------------------------------------------
2214  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2215  * |-----------------------------------------|
2216  * | value | Version | PT | Res | E | S | PN |
2217  * -------------------------------------------
2218  *
2219  * Matching is supported only for GTP flags E, S, PN.
2220  */
2221 #define MLX5_GTP_FLAGS_MASK     0x07
2222
2223 /**
2224  * Validate GTP item.
2225  *
2226  * @param[in] dev
2227  *   Pointer to the rte_eth_dev structure.
2228  * @param[in] item
2229  *   Item specification.
2230  * @param[in] item_flags
2231  *   Bit-fields that holds the items detected until now.
2232  * @param[out] error
2233  *   Pointer to error structure.
2234  *
2235  * @return
2236  *   0 on success, a negative errno value otherwise and rte_errno is set.
2237  */
2238 static int
2239 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2240                           const struct rte_flow_item *item,
2241                           uint64_t item_flags,
2242                           struct rte_flow_error *error)
2243 {
2244         struct mlx5_priv *priv = dev->data->dev_private;
2245         const struct rte_flow_item_gtp *spec = item->spec;
2246         const struct rte_flow_item_gtp *mask = item->mask;
2247         const struct rte_flow_item_gtp nic_mask = {
2248                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2249                 .msg_type = 0xff,
2250                 .teid = RTE_BE32(0xffffffff),
2251         };
2252
2253         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2254                 return rte_flow_error_set(error, ENOTSUP,
2255                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2256                                           "GTP support is not enabled");
2257         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2258                 return rte_flow_error_set(error, ENOTSUP,
2259                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2260                                           "multiple tunnel layers not"
2261                                           " supported");
2262         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2263                 return rte_flow_error_set(error, EINVAL,
2264                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2265                                           "no outer UDP layer found");
2266         if (!mask)
2267                 mask = &rte_flow_item_gtp_mask;
2268         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2269                 return rte_flow_error_set(error, ENOTSUP,
2270                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2271                                           "Match is supported for GTP"
2272                                           " flags only");
2273         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2274                                          (const uint8_t *)&nic_mask,
2275                                          sizeof(struct rte_flow_item_gtp),
2276                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2277 }
2278
2279 /**
2280  * Validate GTP PSC item.
2281  *
2282  * @param[in] item
2283  *   Item specification.
2284  * @param[in] last_item
2285  *   Previous validated item in the pattern items.
2286  * @param[in] gtp_item
2287  *   Previous GTP item specification.
2288  * @param[in] attr
2289  *   Pointer to flow attributes.
2290  * @param[out] error
2291  *   Pointer to error structure.
2292  *
2293  * @return
2294  *   0 on success, a negative errno value otherwise and rte_errno is set.
2295  */
2296 static int
2297 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2298                               uint64_t last_item,
2299                               const struct rte_flow_item *gtp_item,
2300                               const struct rte_flow_attr *attr,
2301                               struct rte_flow_error *error)
2302 {
2303         const struct rte_flow_item_gtp *gtp_spec;
2304         const struct rte_flow_item_gtp *gtp_mask;
2305         const struct rte_flow_item_gtp_psc *spec;
2306         const struct rte_flow_item_gtp_psc *mask;
2307         const struct rte_flow_item_gtp_psc nic_mask = {
2308                 .pdu_type = 0xFF,
2309                 .qfi = 0xFF,
2310         };
2311
2312         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2313                 return rte_flow_error_set
2314                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2315                          "GTP PSC item must be preceded with GTP item");
2316         gtp_spec = gtp_item->spec;
2317         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2318         /* GTP spec and E flag is requested to match zero. */
2319         if (gtp_spec &&
2320                 (gtp_mask->v_pt_rsv_flags &
2321                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2322                 return rte_flow_error_set
2323                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2324                          "GTP E flag must be 1 to match GTP PSC");
2325         /* Check the flow is not created in group zero. */
2326         if (!attr->transfer && !attr->group)
2327                 return rte_flow_error_set
2328                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2329                          "GTP PSC is not supported for group 0");
2330         /* GTP spec is here and E flag is requested to match zero. */
2331         if (!item->spec)
2332                 return 0;
2333         spec = item->spec;
2334         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2335         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2336                 return rte_flow_error_set
2337                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2338                          "PDU type should be smaller than 16");
2339         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2340                                          (const uint8_t *)&nic_mask,
2341                                          sizeof(struct rte_flow_item_gtp_psc),
2342                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2343 }
2344
2345 /**
2346  * Validate IPV4 item.
2347  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2348  * add specific validation of fragment_offset field,
2349  *
2350  * @param[in] item
2351  *   Item specification.
2352  * @param[in] item_flags
2353  *   Bit-fields that holds the items detected until now.
2354  * @param[out] error
2355  *   Pointer to error structure.
2356  *
2357  * @return
2358  *   0 on success, a negative errno value otherwise and rte_errno is set.
2359  */
2360 static int
2361 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2362                            uint64_t item_flags,
2363                            uint64_t last_item,
2364                            uint16_t ether_type,
2365                            struct rte_flow_error *error)
2366 {
2367         int ret;
2368         const struct rte_flow_item_ipv4 *spec = item->spec;
2369         const struct rte_flow_item_ipv4 *last = item->last;
2370         const struct rte_flow_item_ipv4 *mask = item->mask;
2371         rte_be16_t fragment_offset_spec = 0;
2372         rte_be16_t fragment_offset_last = 0;
2373         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2374                 .hdr = {
2375                         .src_addr = RTE_BE32(0xffffffff),
2376                         .dst_addr = RTE_BE32(0xffffffff),
2377                         .type_of_service = 0xff,
2378                         .fragment_offset = RTE_BE16(0xffff),
2379                         .next_proto_id = 0xff,
2380                         .time_to_live = 0xff,
2381                 },
2382         };
2383
2384         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2385                                            ether_type, &nic_ipv4_mask,
2386                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2387         if (ret < 0)
2388                 return ret;
2389         if (spec && mask)
2390                 fragment_offset_spec = spec->hdr.fragment_offset &
2391                                        mask->hdr.fragment_offset;
2392         if (!fragment_offset_spec)
2393                 return 0;
2394         /*
2395          * spec and mask are valid, enforce using full mask to make sure the
2396          * complete value is used correctly.
2397          */
2398         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2399                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2400                 return rte_flow_error_set(error, EINVAL,
2401                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2402                                           item, "must use full mask for"
2403                                           " fragment_offset");
2404         /*
2405          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2406          * indicating this is 1st fragment of fragmented packet.
2407          * This is not yet supported in MLX5, return appropriate error message.
2408          */
2409         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2410                 return rte_flow_error_set(error, ENOTSUP,
2411                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2412                                           "match on first fragment not "
2413                                           "supported");
2414         if (fragment_offset_spec && !last)
2415                 return rte_flow_error_set(error, ENOTSUP,
2416                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2417                                           "specified value not supported");
2418         /* spec and last are valid, validate the specified range. */
2419         fragment_offset_last = last->hdr.fragment_offset &
2420                                mask->hdr.fragment_offset;
2421         /*
2422          * Match on fragment_offset spec 0x2001 and last 0x3fff
2423          * means MF is 1 and frag-offset is > 0.
2424          * This packet is fragment 2nd and onward, excluding last.
2425          * This is not yet supported in MLX5, return appropriate
2426          * error message.
2427          */
2428         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2429             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2430                 return rte_flow_error_set(error, ENOTSUP,
2431                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2432                                           last, "match on following "
2433                                           "fragments not supported");
2434         /*
2435          * Match on fragment_offset spec 0x0001 and last 0x1fff
2436          * means MF is 0 and frag-offset is > 0.
2437          * This packet is last fragment of fragmented packet.
2438          * This is not yet supported in MLX5, return appropriate
2439          * error message.
2440          */
2441         if (fragment_offset_spec == RTE_BE16(1) &&
2442             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2443                 return rte_flow_error_set(error, ENOTSUP,
2444                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2445                                           last, "match on last "
2446                                           "fragment not supported");
2447         /*
2448          * Match on fragment_offset spec 0x0001 and last 0x3fff
2449          * means MF and/or frag-offset is not 0.
2450          * This is a fragmented packet.
2451          * Other range values are invalid and rejected.
2452          */
2453         if (!(fragment_offset_spec == RTE_BE16(1) &&
2454               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2455                 return rte_flow_error_set(error, ENOTSUP,
2456                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2457                                           "specified range not supported");
2458         return 0;
2459 }
2460
2461 /**
2462  * Validate IPV6 fragment extension item.
2463  *
2464  * @param[in] item
2465  *   Item specification.
2466  * @param[in] item_flags
2467  *   Bit-fields that holds the items detected until now.
2468  * @param[out] error
2469  *   Pointer to error structure.
2470  *
2471  * @return
2472  *   0 on success, a negative errno value otherwise and rte_errno is set.
2473  */
2474 static int
2475 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2476                                     uint64_t item_flags,
2477                                     struct rte_flow_error *error)
2478 {
2479         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2480         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2481         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2482         rte_be16_t frag_data_spec = 0;
2483         rte_be16_t frag_data_last = 0;
2484         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2485         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2486                                       MLX5_FLOW_LAYER_OUTER_L4;
2487         int ret = 0;
2488         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2489                 .hdr = {
2490                         .next_header = 0xff,
2491                         .frag_data = RTE_BE16(0xffff),
2492                 },
2493         };
2494
2495         if (item_flags & l4m)
2496                 return rte_flow_error_set(error, EINVAL,
2497                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2498                                           "ipv6 fragment extension item cannot "
2499                                           "follow L4 item.");
2500         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2501             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2502                 return rte_flow_error_set(error, EINVAL,
2503                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2504                                           "ipv6 fragment extension item must "
2505                                           "follow ipv6 item");
2506         if (spec && mask)
2507                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2508         if (!frag_data_spec)
2509                 return 0;
2510         /*
2511          * spec and mask are valid, enforce using full mask to make sure the
2512          * complete value is used correctly.
2513          */
2514         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2515                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2516                 return rte_flow_error_set(error, EINVAL,
2517                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2518                                           item, "must use full mask for"
2519                                           " frag_data");
2520         /*
2521          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2522          * This is 1st fragment of fragmented packet.
2523          */
2524         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2525                 return rte_flow_error_set(error, ENOTSUP,
2526                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2527                                           "match on first fragment not "
2528                                           "supported");
2529         if (frag_data_spec && !last)
2530                 return rte_flow_error_set(error, EINVAL,
2531                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2532                                           "specified value not supported");
2533         ret = mlx5_flow_item_acceptable
2534                                 (item, (const uint8_t *)mask,
2535                                  (const uint8_t *)&nic_mask,
2536                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2537                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2538         if (ret)
2539                 return ret;
2540         /* spec and last are valid, validate the specified range. */
2541         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2542         /*
2543          * Match on frag_data spec 0x0009 and last 0xfff9
2544          * means M is 1 and frag-offset is > 0.
2545          * This packet is fragment 2nd and onward, excluding last.
2546          * This is not yet supported in MLX5, return appropriate
2547          * error message.
2548          */
2549         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2550                                        RTE_IPV6_EHDR_MF_MASK) &&
2551             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2552                 return rte_flow_error_set(error, ENOTSUP,
2553                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2554                                           last, "match on following "
2555                                           "fragments not supported");
2556         /*
2557          * Match on frag_data spec 0x0008 and last 0xfff8
2558          * means M is 0 and frag-offset is > 0.
2559          * This packet is last fragment of fragmented packet.
2560          * This is not yet supported in MLX5, return appropriate
2561          * error message.
2562          */
2563         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2564             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2565                 return rte_flow_error_set(error, ENOTSUP,
2566                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2567                                           last, "match on last "
2568                                           "fragment not supported");
2569         /* Other range values are invalid and rejected. */
2570         return rte_flow_error_set(error, EINVAL,
2571                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2572                                   "specified range not supported");
2573 }
2574
2575 /**
2576  * Validate the pop VLAN action.
2577  *
2578  * @param[in] dev
2579  *   Pointer to the rte_eth_dev structure.
2580  * @param[in] action_flags
2581  *   Holds the actions detected until now.
2582  * @param[in] action
2583  *   Pointer to the pop vlan action.
2584  * @param[in] item_flags
2585  *   The items found in this flow rule.
2586  * @param[in] attr
2587  *   Pointer to flow attributes.
2588  * @param[out] error
2589  *   Pointer to error structure.
2590  *
2591  * @return
2592  *   0 on success, a negative errno value otherwise and rte_errno is set.
2593  */
2594 static int
2595 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2596                                  uint64_t action_flags,
2597                                  const struct rte_flow_action *action,
2598                                  uint64_t item_flags,
2599                                  const struct rte_flow_attr *attr,
2600                                  struct rte_flow_error *error)
2601 {
2602         const struct mlx5_priv *priv = dev->data->dev_private;
2603
2604         (void)action;
2605         (void)attr;
2606         if (!priv->sh->pop_vlan_action)
2607                 return rte_flow_error_set(error, ENOTSUP,
2608                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2609                                           NULL,
2610                                           "pop vlan action is not supported");
2611         if (attr->egress)
2612                 return rte_flow_error_set(error, ENOTSUP,
2613                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2614                                           NULL,
2615                                           "pop vlan action not supported for "
2616                                           "egress");
2617         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2618                 return rte_flow_error_set(error, ENOTSUP,
2619                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2620                                           "no support for multiple VLAN "
2621                                           "actions");
2622         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2623         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2624             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2625                 return rte_flow_error_set(error, ENOTSUP,
2626                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2627                                           NULL,
2628                                           "cannot pop vlan after decap without "
2629                                           "match on inner vlan in the flow");
2630         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2631         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2632             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2633                 return rte_flow_error_set(error, ENOTSUP,
2634                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2635                                           NULL,
2636                                           "cannot pop vlan without a "
2637                                           "match on (outer) vlan in the flow");
2638         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2639                 return rte_flow_error_set(error, EINVAL,
2640                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2641                                           "wrong action order, port_id should "
2642                                           "be after pop VLAN action");
2643         if (!attr->transfer && priv->representor)
2644                 return rte_flow_error_set(error, ENOTSUP,
2645                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2646                                           "pop vlan action for VF representor "
2647                                           "not supported on NIC table");
2648         return 0;
2649 }
2650
2651 /**
2652  * Get VLAN default info from vlan match info.
2653  *
2654  * @param[in] items
2655  *   the list of item specifications.
2656  * @param[out] vlan
2657  *   pointer VLAN info to fill to.
2658  *
2659  * @return
2660  *   0 on success, a negative errno value otherwise and rte_errno is set.
2661  */
2662 static void
2663 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2664                                   struct rte_vlan_hdr *vlan)
2665 {
2666         const struct rte_flow_item_vlan nic_mask = {
2667                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2668                                 MLX5DV_FLOW_VLAN_VID_MASK),
2669                 .inner_type = RTE_BE16(0xffff),
2670         };
2671
2672         if (items == NULL)
2673                 return;
2674         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2675                 int type = items->type;
2676
2677                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2678                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2679                         break;
2680         }
2681         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2682                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2683                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2684
2685                 /* If VLAN item in pattern doesn't contain data, return here. */
2686                 if (!vlan_v)
2687                         return;
2688                 if (!vlan_m)
2689                         vlan_m = &nic_mask;
2690                 /* Only full match values are accepted */
2691                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2692                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2693                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2694                         vlan->vlan_tci |=
2695                                 rte_be_to_cpu_16(vlan_v->tci &
2696                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2697                 }
2698                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2699                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2700                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2701                         vlan->vlan_tci |=
2702                                 rte_be_to_cpu_16(vlan_v->tci &
2703                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2704                 }
2705                 if (vlan_m->inner_type == nic_mask.inner_type)
2706                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2707                                                            vlan_m->inner_type);
2708         }
2709 }
2710
2711 /**
2712  * Validate the push VLAN action.
2713  *
2714  * @param[in] dev
2715  *   Pointer to the rte_eth_dev structure.
2716  * @param[in] action_flags
2717  *   Holds the actions detected until now.
2718  * @param[in] item_flags
2719  *   The items found in this flow rule.
2720  * @param[in] action
2721  *   Pointer to the action structure.
2722  * @param[in] attr
2723  *   Pointer to flow attributes
2724  * @param[out] error
2725  *   Pointer to error structure.
2726  *
2727  * @return
2728  *   0 on success, a negative errno value otherwise and rte_errno is set.
2729  */
2730 static int
2731 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2732                                   uint64_t action_flags,
2733                                   const struct rte_flow_item_vlan *vlan_m,
2734                                   const struct rte_flow_action *action,
2735                                   const struct rte_flow_attr *attr,
2736                                   struct rte_flow_error *error)
2737 {
2738         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2739         const struct mlx5_priv *priv = dev->data->dev_private;
2740
2741         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2742             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2743                 return rte_flow_error_set(error, EINVAL,
2744                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2745                                           "invalid vlan ethertype");
2746         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2747                 return rte_flow_error_set(error, EINVAL,
2748                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2749                                           "wrong action order, port_id should "
2750                                           "be after push VLAN");
2751         if (!attr->transfer && priv->representor)
2752                 return rte_flow_error_set(error, ENOTSUP,
2753                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2754                                           "push vlan action for VF representor "
2755                                           "not supported on NIC table");
2756         if (vlan_m &&
2757             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2758             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2759                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2760             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2761             !(mlx5_flow_find_action
2762                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2763                 return rte_flow_error_set(error, EINVAL,
2764                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2765                                           "not full match mask on VLAN PCP and "
2766                                           "there is no of_set_vlan_pcp action, "
2767                                           "push VLAN action cannot figure out "
2768                                           "PCP value");
2769         if (vlan_m &&
2770             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2771             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2772                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2773             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2774             !(mlx5_flow_find_action
2775                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2776                 return rte_flow_error_set(error, EINVAL,
2777                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2778                                           "not full match mask on VLAN VID and "
2779                                           "there is no of_set_vlan_vid action, "
2780                                           "push VLAN action cannot figure out "
2781                                           "VID value");
2782         (void)attr;
2783         return 0;
2784 }
2785
2786 /**
2787  * Validate the set VLAN PCP.
2788  *
2789  * @param[in] action_flags
2790  *   Holds the actions detected until now.
2791  * @param[in] actions
2792  *   Pointer to the list of actions remaining in the flow rule.
2793  * @param[out] error
2794  *   Pointer to error structure.
2795  *
2796  * @return
2797  *   0 on success, a negative errno value otherwise and rte_errno is set.
2798  */
2799 static int
2800 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2801                                      const struct rte_flow_action actions[],
2802                                      struct rte_flow_error *error)
2803 {
2804         const struct rte_flow_action *action = actions;
2805         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2806
2807         if (conf->vlan_pcp > 7)
2808                 return rte_flow_error_set(error, EINVAL,
2809                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2810                                           "VLAN PCP value is too big");
2811         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2812                 return rte_flow_error_set(error, ENOTSUP,
2813                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2814                                           "set VLAN PCP action must follow "
2815                                           "the push VLAN action");
2816         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2817                 return rte_flow_error_set(error, ENOTSUP,
2818                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2819                                           "Multiple VLAN PCP modification are "
2820                                           "not supported");
2821         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2822                 return rte_flow_error_set(error, EINVAL,
2823                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2824                                           "wrong action order, port_id should "
2825                                           "be after set VLAN PCP");
2826         return 0;
2827 }
2828
2829 /**
2830  * Validate the set VLAN VID.
2831  *
2832  * @param[in] item_flags
2833  *   Holds the items detected in this rule.
2834  * @param[in] action_flags
2835  *   Holds the actions detected until now.
2836  * @param[in] actions
2837  *   Pointer to the list of actions remaining in the flow rule.
2838  * @param[out] error
2839  *   Pointer to error structure.
2840  *
2841  * @return
2842  *   0 on success, a negative errno value otherwise and rte_errno is set.
2843  */
2844 static int
2845 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2846                                      uint64_t action_flags,
2847                                      const struct rte_flow_action actions[],
2848                                      struct rte_flow_error *error)
2849 {
2850         const struct rte_flow_action *action = actions;
2851         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2852
2853         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2854                 return rte_flow_error_set(error, EINVAL,
2855                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2856                                           "VLAN VID value is too big");
2857         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2858             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2859                 return rte_flow_error_set(error, ENOTSUP,
2860                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2861                                           "set VLAN VID action must follow push"
2862                                           " VLAN action or match on VLAN item");
2863         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2864                 return rte_flow_error_set(error, ENOTSUP,
2865                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2866                                           "Multiple VLAN VID modifications are "
2867                                           "not supported");
2868         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2869                 return rte_flow_error_set(error, EINVAL,
2870                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2871                                           "wrong action order, port_id should "
2872                                           "be after set VLAN VID");
2873         return 0;
2874 }
2875
2876 /*
2877  * Validate the FLAG action.
2878  *
2879  * @param[in] dev
2880  *   Pointer to the rte_eth_dev structure.
2881  * @param[in] action_flags
2882  *   Holds the actions detected until now.
2883  * @param[in] attr
2884  *   Pointer to flow attributes
2885  * @param[out] error
2886  *   Pointer to error structure.
2887  *
2888  * @return
2889  *   0 on success, a negative errno value otherwise and rte_errno is set.
2890  */
2891 static int
2892 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2893                              uint64_t action_flags,
2894                              const struct rte_flow_attr *attr,
2895                              struct rte_flow_error *error)
2896 {
2897         struct mlx5_priv *priv = dev->data->dev_private;
2898         struct mlx5_dev_config *config = &priv->config;
2899         int ret;
2900
2901         /* Fall back if no extended metadata register support. */
2902         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2903                 return mlx5_flow_validate_action_flag(action_flags, attr,
2904                                                       error);
2905         /* Extensive metadata mode requires registers. */
2906         if (!mlx5_flow_ext_mreg_supported(dev))
2907                 return rte_flow_error_set(error, ENOTSUP,
2908                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2909                                           "no metadata registers "
2910                                           "to support flag action");
2911         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2912                 return rte_flow_error_set(error, ENOTSUP,
2913                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2914                                           "extended metadata register"
2915                                           " isn't available");
2916         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2917         if (ret < 0)
2918                 return ret;
2919         MLX5_ASSERT(ret > 0);
2920         if (action_flags & MLX5_FLOW_ACTION_MARK)
2921                 return rte_flow_error_set(error, EINVAL,
2922                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2923                                           "can't mark and flag in same flow");
2924         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2925                 return rte_flow_error_set(error, EINVAL,
2926                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2927                                           "can't have 2 flag"
2928                                           " actions in same flow");
2929         return 0;
2930 }
2931
2932 /**
2933  * Validate MARK action.
2934  *
2935  * @param[in] dev
2936  *   Pointer to the rte_eth_dev structure.
2937  * @param[in] action
2938  *   Pointer to action.
2939  * @param[in] action_flags
2940  *   Holds the actions detected until now.
2941  * @param[in] attr
2942  *   Pointer to flow attributes
2943  * @param[out] error
2944  *   Pointer to error structure.
2945  *
2946  * @return
2947  *   0 on success, a negative errno value otherwise and rte_errno is set.
2948  */
2949 static int
2950 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2951                              const struct rte_flow_action *action,
2952                              uint64_t action_flags,
2953                              const struct rte_flow_attr *attr,
2954                              struct rte_flow_error *error)
2955 {
2956         struct mlx5_priv *priv = dev->data->dev_private;
2957         struct mlx5_dev_config *config = &priv->config;
2958         const struct rte_flow_action_mark *mark = action->conf;
2959         int ret;
2960
2961         if (is_tunnel_offload_active(dev))
2962                 return rte_flow_error_set(error, ENOTSUP,
2963                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2964                                           "no mark action "
2965                                           "if tunnel offload active");
2966         /* Fall back if no extended metadata register support. */
2967         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2968                 return mlx5_flow_validate_action_mark(action, action_flags,
2969                                                       attr, error);
2970         /* Extensive metadata mode requires registers. */
2971         if (!mlx5_flow_ext_mreg_supported(dev))
2972                 return rte_flow_error_set(error, ENOTSUP,
2973                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2974                                           "no metadata registers "
2975                                           "to support mark action");
2976         if (!priv->sh->dv_mark_mask)
2977                 return rte_flow_error_set(error, ENOTSUP,
2978                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2979                                           "extended metadata register"
2980                                           " isn't available");
2981         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2982         if (ret < 0)
2983                 return ret;
2984         MLX5_ASSERT(ret > 0);
2985         if (!mark)
2986                 return rte_flow_error_set(error, EINVAL,
2987                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2988                                           "configuration cannot be null");
2989         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2990                 return rte_flow_error_set(error, EINVAL,
2991                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2992                                           &mark->id,
2993                                           "mark id exceeds the limit");
2994         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2995                 return rte_flow_error_set(error, EINVAL,
2996                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2997                                           "can't flag and mark in same flow");
2998         if (action_flags & MLX5_FLOW_ACTION_MARK)
2999                 return rte_flow_error_set(error, EINVAL,
3000                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3001                                           "can't have 2 mark actions in same"
3002                                           " flow");
3003         return 0;
3004 }
3005
3006 /**
3007  * Validate SET_META action.
3008  *
3009  * @param[in] dev
3010  *   Pointer to the rte_eth_dev structure.
3011  * @param[in] action
3012  *   Pointer to the action structure.
3013  * @param[in] action_flags
3014  *   Holds the actions detected until now.
3015  * @param[in] attr
3016  *   Pointer to flow attributes
3017  * @param[out] error
3018  *   Pointer to error structure.
3019  *
3020  * @return
3021  *   0 on success, a negative errno value otherwise and rte_errno is set.
3022  */
3023 static int
3024 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3025                                  const struct rte_flow_action *action,
3026                                  uint64_t action_flags __rte_unused,
3027                                  const struct rte_flow_attr *attr,
3028                                  struct rte_flow_error *error)
3029 {
3030         const struct rte_flow_action_set_meta *conf;
3031         uint32_t nic_mask = UINT32_MAX;
3032         int reg;
3033
3034         if (!mlx5_flow_ext_mreg_supported(dev))
3035                 return rte_flow_error_set(error, ENOTSUP,
3036                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3037                                           "extended metadata register"
3038                                           " isn't supported");
3039         reg = flow_dv_get_metadata_reg(dev, attr, error);
3040         if (reg < 0)
3041                 return reg;
3042         if (reg == REG_NON)
3043                 return rte_flow_error_set(error, ENOTSUP,
3044                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3045                                           "unavalable extended metadata register");
3046         if (reg != REG_A && reg != REG_B) {
3047                 struct mlx5_priv *priv = dev->data->dev_private;
3048
3049                 nic_mask = priv->sh->dv_meta_mask;
3050         }
3051         if (!(action->conf))
3052                 return rte_flow_error_set(error, EINVAL,
3053                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3054                                           "configuration cannot be null");
3055         conf = (const struct rte_flow_action_set_meta *)action->conf;
3056         if (!conf->mask)
3057                 return rte_flow_error_set(error, EINVAL,
3058                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3059                                           "zero mask doesn't have any effect");
3060         if (conf->mask & ~nic_mask)
3061                 return rte_flow_error_set(error, EINVAL,
3062                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3063                                           "meta data must be within reg C0");
3064         return 0;
3065 }
3066
3067 /**
3068  * Validate SET_TAG action.
3069  *
3070  * @param[in] dev
3071  *   Pointer to the rte_eth_dev structure.
3072  * @param[in] action
3073  *   Pointer to the action structure.
3074  * @param[in] action_flags
3075  *   Holds the actions detected until now.
3076  * @param[in] attr
3077  *   Pointer to flow attributes
3078  * @param[out] error
3079  *   Pointer to error structure.
3080  *
3081  * @return
3082  *   0 on success, a negative errno value otherwise and rte_errno is set.
3083  */
3084 static int
3085 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3086                                 const struct rte_flow_action *action,
3087                                 uint64_t action_flags,
3088                                 const struct rte_flow_attr *attr,
3089                                 struct rte_flow_error *error)
3090 {
3091         const struct rte_flow_action_set_tag *conf;
3092         const uint64_t terminal_action_flags =
3093                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3094                 MLX5_FLOW_ACTION_RSS;
3095         int ret;
3096
3097         if (!mlx5_flow_ext_mreg_supported(dev))
3098                 return rte_flow_error_set(error, ENOTSUP,
3099                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3100                                           "extensive metadata register"
3101                                           " isn't supported");
3102         if (!(action->conf))
3103                 return rte_flow_error_set(error, EINVAL,
3104                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3105                                           "configuration cannot be null");
3106         conf = (const struct rte_flow_action_set_tag *)action->conf;
3107         if (!conf->mask)
3108                 return rte_flow_error_set(error, EINVAL,
3109                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3110                                           "zero mask doesn't have any effect");
3111         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3112         if (ret < 0)
3113                 return ret;
3114         if (!attr->transfer && attr->ingress &&
3115             (action_flags & terminal_action_flags))
3116                 return rte_flow_error_set(error, EINVAL,
3117                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3118                                           "set_tag has no effect"
3119                                           " with terminal actions");
3120         return 0;
3121 }
3122
3123 /**
3124  * Validate count action.
3125  *
3126  * @param[in] dev
3127  *   Pointer to rte_eth_dev structure.
3128  * @param[in] action
3129  *   Pointer to the action structure.
3130  * @param[in] action_flags
3131  *   Holds the actions detected until now.
3132  * @param[out] error
3133  *   Pointer to error structure.
3134  *
3135  * @return
3136  *   0 on success, a negative errno value otherwise and rte_errno is set.
3137  */
3138 static int
3139 flow_dv_validate_action_count(struct rte_eth_dev *dev,
3140                               const struct rte_flow_action *action,
3141                               uint64_t action_flags,
3142                               struct rte_flow_error *error)
3143 {
3144         struct mlx5_priv *priv = dev->data->dev_private;
3145         const struct rte_flow_action_count *count;
3146
3147         if (!priv->config.devx)
3148                 goto notsup_err;
3149         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3150                 return rte_flow_error_set(error, EINVAL,
3151                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3152                                           "duplicate count actions set");
3153         count = (const struct rte_flow_action_count *)action->conf;
3154         if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3155             !priv->sh->flow_hit_aso_en)
3156                 return rte_flow_error_set(error, EINVAL,
3157                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3158                                           "old age and shared count combination is not supported");
3159 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3160         return 0;
3161 #endif
3162 notsup_err:
3163         return rte_flow_error_set
3164                       (error, ENOTSUP,
3165                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3166                        NULL,
3167                        "count action not supported");
3168 }
3169
3170 /**
3171  * Validate the L2 encap action.
3172  *
3173  * @param[in] dev
3174  *   Pointer to the rte_eth_dev structure.
3175  * @param[in] action_flags
3176  *   Holds the actions detected until now.
3177  * @param[in] action
3178  *   Pointer to the action structure.
3179  * @param[in] attr
3180  *   Pointer to flow attributes.
3181  * @param[out] error
3182  *   Pointer to error structure.
3183  *
3184  * @return
3185  *   0 on success, a negative errno value otherwise and rte_errno is set.
3186  */
3187 static int
3188 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3189                                  uint64_t action_flags,
3190                                  const struct rte_flow_action *action,
3191                                  const struct rte_flow_attr *attr,
3192                                  struct rte_flow_error *error)
3193 {
3194         const struct mlx5_priv *priv = dev->data->dev_private;
3195
3196         if (!(action->conf))
3197                 return rte_flow_error_set(error, EINVAL,
3198                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3199                                           "configuration cannot be null");
3200         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3203                                           "can only have a single encap action "
3204                                           "in a flow");
3205         if (!attr->transfer && priv->representor)
3206                 return rte_flow_error_set(error, ENOTSUP,
3207                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3208                                           "encap action for VF representor "
3209                                           "not supported on NIC table");
3210         return 0;
3211 }
3212
3213 /**
3214  * Validate a decap action.
3215  *
3216  * @param[in] dev
3217  *   Pointer to the rte_eth_dev structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] action
3221  *   Pointer to the action structure.
3222  * @param[in] item_flags
3223  *   Holds the items detected.
3224  * @param[in] attr
3225  *   Pointer to flow attributes
3226  * @param[out] error
3227  *   Pointer to error structure.
3228  *
3229  * @return
3230  *   0 on success, a negative errno value otherwise and rte_errno is set.
3231  */
3232 static int
3233 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3234                               uint64_t action_flags,
3235                               const struct rte_flow_action *action,
3236                               const uint64_t item_flags,
3237                               const struct rte_flow_attr *attr,
3238                               struct rte_flow_error *error)
3239 {
3240         const struct mlx5_priv *priv = dev->data->dev_private;
3241
3242         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3243             !priv->config.decap_en)
3244                 return rte_flow_error_set(error, ENOTSUP,
3245                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3246                                           "decap is not enabled");
3247         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3248                 return rte_flow_error_set(error, ENOTSUP,
3249                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3250                                           action_flags &
3251                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3252                                           "have a single decap action" : "decap "
3253                                           "after encap is not supported");
3254         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3255                 return rte_flow_error_set(error, EINVAL,
3256                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3257                                           "can't have decap action after"
3258                                           " modify action");
3259         if (attr->egress)
3260                 return rte_flow_error_set(error, ENOTSUP,
3261                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3262                                           NULL,
3263                                           "decap action not supported for "
3264                                           "egress");
3265         if (!attr->transfer && priv->representor)
3266                 return rte_flow_error_set(error, ENOTSUP,
3267                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3268                                           "decap action for VF representor "
3269                                           "not supported on NIC table");
3270         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3271             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3272                 return rte_flow_error_set(error, ENOTSUP,
3273                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3274                                 "VXLAN item should be present for VXLAN decap");
3275         return 0;
3276 }
3277
3278 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3279
3280 /**
3281  * Validate the raw encap and decap actions.
3282  *
3283  * @param[in] dev
3284  *   Pointer to the rte_eth_dev structure.
3285  * @param[in] decap
3286  *   Pointer to the decap action.
3287  * @param[in] encap
3288  *   Pointer to the encap action.
3289  * @param[in] attr
3290  *   Pointer to flow attributes
3291  * @param[in/out] action_flags
3292  *   Holds the actions detected until now.
3293  * @param[out] actions_n
3294  *   pointer to the number of actions counter.
3295  * @param[in] action
3296  *   Pointer to the action structure.
3297  * @param[in] item_flags
3298  *   Holds the items detected.
3299  * @param[out] error
3300  *   Pointer to error structure.
3301  *
3302  * @return
3303  *   0 on success, a negative errno value otherwise and rte_errno is set.
3304  */
3305 static int
3306 flow_dv_validate_action_raw_encap_decap
3307         (struct rte_eth_dev *dev,
3308          const struct rte_flow_action_raw_decap *decap,
3309          const struct rte_flow_action_raw_encap *encap,
3310          const struct rte_flow_attr *attr, uint64_t *action_flags,
3311          int *actions_n, const struct rte_flow_action *action,
3312          uint64_t item_flags, struct rte_flow_error *error)
3313 {
3314         const struct mlx5_priv *priv = dev->data->dev_private;
3315         int ret;
3316
3317         if (encap && (!encap->size || !encap->data))
3318                 return rte_flow_error_set(error, EINVAL,
3319                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3320                                           "raw encap data cannot be empty");
3321         if (decap && encap) {
3322                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3323                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3324                         /* L3 encap. */
3325                         decap = NULL;
3326                 else if (encap->size <=
3327                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3328                            decap->size >
3329                            MLX5_ENCAPSULATION_DECISION_SIZE)
3330                         /* L3 decap. */
3331                         encap = NULL;
3332                 else if (encap->size >
3333                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3334                            decap->size >
3335                            MLX5_ENCAPSULATION_DECISION_SIZE)
3336                         /* 2 L2 actions: encap and decap. */
3337                         ;
3338                 else
3339                         return rte_flow_error_set(error,
3340                                 ENOTSUP,
3341                                 RTE_FLOW_ERROR_TYPE_ACTION,
3342                                 NULL, "unsupported too small "
3343                                 "raw decap and too small raw "
3344                                 "encap combination");
3345         }
3346         if (decap) {
3347                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3348                                                     item_flags, attr, error);
3349                 if (ret < 0)
3350                         return ret;
3351                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3352                 ++(*actions_n);
3353         }
3354         if (encap) {
3355                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3356                         return rte_flow_error_set(error, ENOTSUP,
3357                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3358                                                   NULL,
3359                                                   "small raw encap size");
3360                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3361                         return rte_flow_error_set(error, EINVAL,
3362                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3363                                                   NULL,
3364                                                   "more than one encap action");
3365                 if (!attr->transfer && priv->representor)
3366                         return rte_flow_error_set
3367                                         (error, ENOTSUP,
3368                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3369                                          "encap action for VF representor "
3370                                          "not supported on NIC table");
3371                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3372                 ++(*actions_n);
3373         }
3374         return 0;
3375 }
3376
3377 /**
3378  * Match encap_decap resource.
3379  *
3380  * @param list
3381  *   Pointer to the hash list.
3382  * @param entry
3383  *   Pointer to exist resource entry object.
3384  * @param key
3385  *   Key of the new entry.
3386  * @param ctx_cb
3387  *   Pointer to new encap_decap resource.
3388  *
3389  * @return
3390  *   0 on matching, none-zero otherwise.
3391  */
3392 int
3393 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3394                              struct mlx5_hlist_entry *entry,
3395                              uint64_t key __rte_unused, void *cb_ctx)
3396 {
3397         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3398         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3399         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3400
3401         cache_resource = container_of(entry,
3402                                       struct mlx5_flow_dv_encap_decap_resource,
3403                                       entry);
3404         if (resource->reformat_type == cache_resource->reformat_type &&
3405             resource->ft_type == cache_resource->ft_type &&
3406             resource->flags == cache_resource->flags &&
3407             resource->size == cache_resource->size &&
3408             !memcmp((const void *)resource->buf,
3409                     (const void *)cache_resource->buf,
3410                     resource->size))
3411                 return 0;
3412         return -1;
3413 }
3414
3415 /**
3416  * Allocate encap_decap resource.
3417  *
3418  * @param list
3419  *   Pointer to the hash list.
3420  * @param entry
3421  *   Pointer to exist resource entry object.
3422  * @param ctx_cb
3423  *   Pointer to new encap_decap resource.
3424  *
3425  * @return
3426  *   0 on matching, none-zero otherwise.
3427  */
3428 struct mlx5_hlist_entry *
3429 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3430                               uint64_t key __rte_unused,
3431                               void *cb_ctx)
3432 {
3433         struct mlx5_dev_ctx_shared *sh = list->ctx;
3434         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3435         struct mlx5dv_dr_domain *domain;
3436         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3437         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3438         uint32_t idx;
3439         int ret;
3440
3441         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3442                 domain = sh->fdb_domain;
3443         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3444                 domain = sh->rx_domain;
3445         else
3446                 domain = sh->tx_domain;
3447         /* Register new encap/decap resource. */
3448         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3449                                        &idx);
3450         if (!cache_resource) {
3451                 rte_flow_error_set(ctx->error, ENOMEM,
3452                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3453                                    "cannot allocate resource memory");
3454                 return NULL;
3455         }
3456         *cache_resource = *resource;
3457         cache_resource->idx = idx;
3458         ret = mlx5_flow_os_create_flow_action_packet_reformat
3459                                         (sh->ctx, domain, cache_resource,
3460                                          &cache_resource->action);
3461         if (ret) {
3462                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3463                 rte_flow_error_set(ctx->error, ENOMEM,
3464                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3465                                    NULL, "cannot create action");
3466                 return NULL;
3467         }
3468
3469         return &cache_resource->entry;
3470 }
3471
3472 /**
3473  * Find existing encap/decap resource or create and register a new one.
3474  *
3475  * @param[in, out] dev
3476  *   Pointer to rte_eth_dev structure.
3477  * @param[in, out] resource
3478  *   Pointer to encap/decap resource.
3479  * @parm[in, out] dev_flow
3480  *   Pointer to the dev_flow.
3481  * @param[out] error
3482  *   pointer to error structure.
3483  *
3484  * @return
3485  *   0 on success otherwise -errno and errno is set.
3486  */
3487 static int
3488 flow_dv_encap_decap_resource_register
3489                         (struct rte_eth_dev *dev,
3490                          struct mlx5_flow_dv_encap_decap_resource *resource,
3491                          struct mlx5_flow *dev_flow,
3492                          struct rte_flow_error *error)
3493 {
3494         struct mlx5_priv *priv = dev->data->dev_private;
3495         struct mlx5_dev_ctx_shared *sh = priv->sh;
3496         struct mlx5_hlist_entry *entry;
3497         union {
3498                 struct {
3499                         uint32_t ft_type:8;
3500                         uint32_t refmt_type:8;
3501                         /*
3502                          * Header reformat actions can be shared between
3503                          * non-root tables. One bit to indicate non-root
3504                          * table or not.
3505                          */
3506                         uint32_t is_root:1;
3507                         uint32_t reserve:15;
3508                 };
3509                 uint32_t v32;
3510         } encap_decap_key = {
3511                 {
3512                         .ft_type = resource->ft_type,
3513                         .refmt_type = resource->reformat_type,
3514                         .is_root = !!dev_flow->dv.group,
3515                         .reserve = 0,
3516                 }
3517         };
3518         struct mlx5_flow_cb_ctx ctx = {
3519                 .error = error,
3520                 .data = resource,
3521         };
3522         uint64_t key64;
3523
3524         resource->flags = dev_flow->dv.group ? 0 : 1;
3525         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3526                                  sizeof(encap_decap_key.v32), 0);
3527         if (resource->reformat_type !=
3528             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3529             resource->size)
3530                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3531         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3532         if (!entry)
3533                 return -rte_errno;
3534         resource = container_of(entry, typeof(*resource), entry);
3535         dev_flow->dv.encap_decap = resource;
3536         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3537         return 0;
3538 }
3539
3540 /**
3541  * Find existing table jump resource or create and register a new one.
3542  *
3543  * @param[in, out] dev
3544  *   Pointer to rte_eth_dev structure.
3545  * @param[in, out] tbl
3546  *   Pointer to flow table resource.
3547  * @parm[in, out] dev_flow
3548  *   Pointer to the dev_flow.
3549  * @param[out] error
3550  *   pointer to error structure.
3551  *
3552  * @return
3553  *   0 on success otherwise -errno and errno is set.
3554  */
3555 static int
3556 flow_dv_jump_tbl_resource_register
3557                         (struct rte_eth_dev *dev __rte_unused,
3558                          struct mlx5_flow_tbl_resource *tbl,
3559                          struct mlx5_flow *dev_flow,
3560                          struct rte_flow_error *error __rte_unused)
3561 {
3562         struct mlx5_flow_tbl_data_entry *tbl_data =
3563                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3564
3565         MLX5_ASSERT(tbl);
3566         MLX5_ASSERT(tbl_data->jump.action);
3567         dev_flow->handle->rix_jump = tbl_data->idx;
3568         dev_flow->dv.jump = &tbl_data->jump;
3569         return 0;
3570 }
3571
3572 int
3573 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3574                          struct mlx5_cache_entry *entry, void *cb_ctx)
3575 {
3576         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3577         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3578         struct mlx5_flow_dv_port_id_action_resource *res =
3579                         container_of(entry, typeof(*res), entry);
3580
3581         return ref->port_id != res->port_id;
3582 }
3583
3584 struct mlx5_cache_entry *
3585 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3586                           struct mlx5_cache_entry *entry __rte_unused,
3587                           void *cb_ctx)
3588 {
3589         struct mlx5_dev_ctx_shared *sh = list->ctx;
3590         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3591         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3592         struct mlx5_flow_dv_port_id_action_resource *cache;
3593         uint32_t idx;
3594         int ret;
3595
3596         /* Register new port id action resource. */
3597         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3598         if (!cache) {
3599                 rte_flow_error_set(ctx->error, ENOMEM,
3600                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3601                                    "cannot allocate port_id action cache memory");
3602                 return NULL;
3603         }
3604         *cache = *ref;
3605         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3606                                                         ref->port_id,
3607                                                         &cache->action);
3608         if (ret) {
3609                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3610                 rte_flow_error_set(ctx->error, ENOMEM,
3611                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3612                                    "cannot create action");
3613                 return NULL;
3614         }
3615         cache->idx = idx;
3616         return &cache->entry;
3617 }
3618
3619 /**
3620  * Find existing table port ID resource or create and register a new one.
3621  *
3622  * @param[in, out] dev
3623  *   Pointer to rte_eth_dev structure.
3624  * @param[in, out] resource
3625  *   Pointer to port ID action resource.
3626  * @parm[in, out] dev_flow
3627  *   Pointer to the dev_flow.
3628  * @param[out] error
3629  *   pointer to error structure.
3630  *
3631  * @return
3632  *   0 on success otherwise -errno and errno is set.
3633  */
3634 static int
3635 flow_dv_port_id_action_resource_register
3636                         (struct rte_eth_dev *dev,
3637                          struct mlx5_flow_dv_port_id_action_resource *resource,
3638                          struct mlx5_flow *dev_flow,
3639                          struct rte_flow_error *error)
3640 {
3641         struct mlx5_priv *priv = dev->data->dev_private;
3642         struct mlx5_cache_entry *entry;
3643         struct mlx5_flow_dv_port_id_action_resource *cache;
3644         struct mlx5_flow_cb_ctx ctx = {
3645                 .error = error,
3646                 .data = resource,
3647         };
3648
3649         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3650         if (!entry)
3651                 return -rte_errno;
3652         cache = container_of(entry, typeof(*cache), entry);
3653         dev_flow->dv.port_id_action = cache;
3654         dev_flow->handle->rix_port_id_action = cache->idx;
3655         return 0;
3656 }
3657
3658 int
3659 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3660                          struct mlx5_cache_entry *entry, void *cb_ctx)
3661 {
3662         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3663         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3664         struct mlx5_flow_dv_push_vlan_action_resource *res =
3665                         container_of(entry, typeof(*res), entry);
3666
3667         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3668 }
3669
3670 struct mlx5_cache_entry *
3671 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3672                           struct mlx5_cache_entry *entry __rte_unused,
3673                           void *cb_ctx)
3674 {
3675         struct mlx5_dev_ctx_shared *sh = list->ctx;
3676         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3677         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3678         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3679         struct mlx5dv_dr_domain *domain;
3680         uint32_t idx;
3681         int ret;
3682
3683         /* Register new port id action resource. */
3684         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3685         if (!cache) {
3686                 rte_flow_error_set(ctx->error, ENOMEM,
3687                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3688                                    "cannot allocate push_vlan action cache memory");
3689                 return NULL;
3690         }
3691         *cache = *ref;
3692         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3693                 domain = sh->fdb_domain;
3694         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3695                 domain = sh->rx_domain;
3696         else
3697                 domain = sh->tx_domain;
3698         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3699                                                         &cache->action);
3700         if (ret) {
3701                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3702                 rte_flow_error_set(ctx->error, ENOMEM,
3703                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3704                                    "cannot create push vlan action");
3705                 return NULL;
3706         }
3707         cache->idx = idx;
3708         return &cache->entry;
3709 }
3710
3711 /**
3712  * Find existing push vlan resource or create and register a new one.
3713  *
3714  * @param [in, out] dev
3715  *   Pointer to rte_eth_dev structure.
3716  * @param[in, out] resource
3717  *   Pointer to port ID action resource.
3718  * @parm[in, out] dev_flow
3719  *   Pointer to the dev_flow.
3720  * @param[out] error
3721  *   pointer to error structure.
3722  *
3723  * @return
3724  *   0 on success otherwise -errno and errno is set.
3725  */
3726 static int
3727 flow_dv_push_vlan_action_resource_register
3728                        (struct rte_eth_dev *dev,
3729                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3730                         struct mlx5_flow *dev_flow,
3731                         struct rte_flow_error *error)
3732 {
3733         struct mlx5_priv *priv = dev->data->dev_private;
3734         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3735         struct mlx5_cache_entry *entry;
3736         struct mlx5_flow_cb_ctx ctx = {
3737                 .error = error,
3738                 .data = resource,
3739         };
3740
3741         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3742         if (!entry)
3743                 return -rte_errno;
3744         cache = container_of(entry, typeof(*cache), entry);
3745
3746         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3747         dev_flow->dv.push_vlan_res = cache;
3748         return 0;
3749 }
3750
3751 /**
3752  * Get the size of specific rte_flow_item_type hdr size
3753  *
3754  * @param[in] item_type
3755  *   Tested rte_flow_item_type.
3756  *
3757  * @return
3758  *   sizeof struct item_type, 0 if void or irrelevant.
3759  */
3760 static size_t
3761 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3762 {
3763         size_t retval;
3764
3765         switch (item_type) {
3766         case RTE_FLOW_ITEM_TYPE_ETH:
3767                 retval = sizeof(struct rte_ether_hdr);
3768                 break;
3769         case RTE_FLOW_ITEM_TYPE_VLAN:
3770                 retval = sizeof(struct rte_vlan_hdr);
3771                 break;
3772         case RTE_FLOW_ITEM_TYPE_IPV4:
3773                 retval = sizeof(struct rte_ipv4_hdr);
3774                 break;
3775         case RTE_FLOW_ITEM_TYPE_IPV6:
3776                 retval = sizeof(struct rte_ipv6_hdr);
3777                 break;
3778         case RTE_FLOW_ITEM_TYPE_UDP:
3779                 retval = sizeof(struct rte_udp_hdr);
3780                 break;
3781         case RTE_FLOW_ITEM_TYPE_TCP:
3782                 retval = sizeof(struct rte_tcp_hdr);
3783                 break;
3784         case RTE_FLOW_ITEM_TYPE_VXLAN:
3785         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3786                 retval = sizeof(struct rte_vxlan_hdr);
3787                 break;
3788         case RTE_FLOW_ITEM_TYPE_GRE:
3789         case RTE_FLOW_ITEM_TYPE_NVGRE:
3790                 retval = sizeof(struct rte_gre_hdr);
3791                 break;
3792         case RTE_FLOW_ITEM_TYPE_MPLS:
3793                 retval = sizeof(struct rte_mpls_hdr);
3794                 break;
3795         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3796         default:
3797                 retval = 0;
3798                 break;
3799         }
3800         return retval;
3801 }
3802
3803 #define MLX5_ENCAP_IPV4_VERSION         0x40
3804 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3805 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3806 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3807 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3808 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3809 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3810
3811 /**
3812  * Convert the encap action data from list of rte_flow_item to raw buffer
3813  *
3814  * @param[in] items
3815  *   Pointer to rte_flow_item objects list.
3816  * @param[out] buf
3817  *   Pointer to the output buffer.
3818  * @param[out] size
3819  *   Pointer to the output buffer size.
3820  * @param[out] error
3821  *   Pointer to the error structure.
3822  *
3823  * @return
3824  *   0 on success, a negative errno value otherwise and rte_errno is set.
3825  */
3826 static int
3827 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3828                            size_t *size, struct rte_flow_error *error)
3829 {
3830         struct rte_ether_hdr *eth = NULL;
3831         struct rte_vlan_hdr *vlan = NULL;
3832         struct rte_ipv4_hdr *ipv4 = NULL;
3833         struct rte_ipv6_hdr *ipv6 = NULL;
3834         struct rte_udp_hdr *udp = NULL;
3835         struct rte_vxlan_hdr *vxlan = NULL;
3836         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3837         struct rte_gre_hdr *gre = NULL;
3838         size_t len;
3839         size_t temp_size = 0;
3840
3841         if (!items)
3842                 return rte_flow_error_set(error, EINVAL,
3843                                           RTE_FLOW_ERROR_TYPE_ACTION,
3844                                           NULL, "invalid empty data");
3845         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3846                 len = flow_dv_get_item_hdr_len(items->type);
3847                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3848                         return rte_flow_error_set(error, EINVAL,
3849                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3850                                                   (void *)items->type,
3851                                                   "items total size is too big"
3852                                                   " for encap action");
3853                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3854                 switch (items->type) {
3855                 case RTE_FLOW_ITEM_TYPE_ETH:
3856                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3857                         break;
3858                 case RTE_FLOW_ITEM_TYPE_VLAN:
3859                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3860                         if (!eth)
3861                                 return rte_flow_error_set(error, EINVAL,
3862                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3863                                                 (void *)items->type,
3864                                                 "eth header not found");
3865                         if (!eth->ether_type)
3866                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3867                         break;
3868                 case RTE_FLOW_ITEM_TYPE_IPV4:
3869                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3870                         if (!vlan && !eth)
3871                                 return rte_flow_error_set(error, EINVAL,
3872                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3873                                                 (void *)items->type,
3874                                                 "neither eth nor vlan"
3875                                                 " header found");
3876                         if (vlan && !vlan->eth_proto)
3877                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3878                         else if (eth && !eth->ether_type)
3879                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3880                         if (!ipv4->version_ihl)
3881                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3882                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3883                         if (!ipv4->time_to_live)
3884                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3885                         break;
3886                 case RTE_FLOW_ITEM_TYPE_IPV6:
3887                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3888                         if (!vlan && !eth)
3889                                 return rte_flow_error_set(error, EINVAL,
3890                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3891                                                 (void *)items->type,
3892                                                 "neither eth nor vlan"
3893                                                 " header found");
3894                         if (vlan && !vlan->eth_proto)
3895                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3896                         else if (eth && !eth->ether_type)
3897                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3898                         if (!ipv6->vtc_flow)
3899                                 ipv6->vtc_flow =
3900                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3901                         if (!ipv6->hop_limits)
3902                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3903                         break;
3904                 case RTE_FLOW_ITEM_TYPE_UDP:
3905                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3906                         if (!ipv4 && !ipv6)
3907                                 return rte_flow_error_set(error, EINVAL,
3908                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3909                                                 (void *)items->type,
3910                                                 "ip header not found");
3911                         if (ipv4 && !ipv4->next_proto_id)
3912                                 ipv4->next_proto_id = IPPROTO_UDP;
3913                         else if (ipv6 && !ipv6->proto)
3914                                 ipv6->proto = IPPROTO_UDP;
3915                         break;
3916                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3917                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3918                         if (!udp)
3919                                 return rte_flow_error_set(error, EINVAL,
3920                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3921                                                 (void *)items->type,
3922                                                 "udp header not found");
3923                         if (!udp->dst_port)
3924                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3925                         if (!vxlan->vx_flags)
3926                                 vxlan->vx_flags =
3927                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3928                         break;
3929                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3930                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3931                         if (!udp)
3932                                 return rte_flow_error_set(error, EINVAL,
3933                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3934                                                 (void *)items->type,
3935                                                 "udp header not found");
3936                         if (!vxlan_gpe->proto)
3937                                 return rte_flow_error_set(error, EINVAL,
3938                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3939                                                 (void *)items->type,
3940                                                 "next protocol not found");
3941                         if (!udp->dst_port)
3942                                 udp->dst_port =
3943                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3944                         if (!vxlan_gpe->vx_flags)
3945                                 vxlan_gpe->vx_flags =
3946                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3947                         break;
3948                 case RTE_FLOW_ITEM_TYPE_GRE:
3949                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3950                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3951                         if (!gre->proto)
3952                                 return rte_flow_error_set(error, EINVAL,
3953                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3954                                                 (void *)items->type,
3955                                                 "next protocol not found");
3956                         if (!ipv4 && !ipv6)
3957                                 return rte_flow_error_set(error, EINVAL,
3958                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3959                                                 (void *)items->type,
3960                                                 "ip header not found");
3961                         if (ipv4 && !ipv4->next_proto_id)
3962                                 ipv4->next_proto_id = IPPROTO_GRE;
3963                         else if (ipv6 && !ipv6->proto)
3964                                 ipv6->proto = IPPROTO_GRE;
3965                         break;
3966                 case RTE_FLOW_ITEM_TYPE_VOID:
3967                         break;
3968                 default:
3969                         return rte_flow_error_set(error, EINVAL,
3970                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3971                                                   (void *)items->type,
3972                                                   "unsupported item type");
3973                         break;
3974                 }
3975                 temp_size += len;
3976         }
3977         *size = temp_size;
3978         return 0;
3979 }
3980
3981 static int
3982 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3983 {
3984         struct rte_ether_hdr *eth = NULL;
3985         struct rte_vlan_hdr *vlan = NULL;
3986         struct rte_ipv6_hdr *ipv6 = NULL;
3987         struct rte_udp_hdr *udp = NULL;
3988         char *next_hdr;
3989         uint16_t proto;
3990
3991         eth = (struct rte_ether_hdr *)data;
3992         next_hdr = (char *)(eth + 1);
3993         proto = RTE_BE16(eth->ether_type);
3994
3995         /* VLAN skipping */
3996         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3997                 vlan = (struct rte_vlan_hdr *)next_hdr;
3998                 proto = RTE_BE16(vlan->eth_proto);
3999                 next_hdr += sizeof(struct rte_vlan_hdr);
4000         }
4001
4002         /* HW calculates IPv4 csum. no need to proceed */
4003         if (proto == RTE_ETHER_TYPE_IPV4)
4004                 return 0;
4005
4006         /* non IPv4/IPv6 header. not supported */
4007         if (proto != RTE_ETHER_TYPE_IPV6) {
4008                 return rte_flow_error_set(error, ENOTSUP,
4009                                           RTE_FLOW_ERROR_TYPE_ACTION,
4010                                           NULL, "Cannot offload non IPv4/IPv6");
4011         }
4012
4013         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4014
4015         /* ignore non UDP */
4016         if (ipv6->proto != IPPROTO_UDP)
4017                 return 0;
4018
4019         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4020         udp->dgram_cksum = 0;
4021
4022         return 0;
4023 }
4024
4025 /**
4026  * Convert L2 encap action to DV specification.
4027  *
4028  * @param[in] dev
4029  *   Pointer to rte_eth_dev structure.
4030  * @param[in] action
4031  *   Pointer to action structure.
4032  * @param[in, out] dev_flow
4033  *   Pointer to the mlx5_flow.
4034  * @param[in] transfer
4035  *   Mark if the flow is E-Switch flow.
4036  * @param[out] error
4037  *   Pointer to the error structure.
4038  *
4039  * @return
4040  *   0 on success, a negative errno value otherwise and rte_errno is set.
4041  */
4042 static int
4043 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4044                                const struct rte_flow_action *action,
4045                                struct mlx5_flow *dev_flow,
4046                                uint8_t transfer,
4047                                struct rte_flow_error *error)
4048 {
4049         const struct rte_flow_item *encap_data;
4050         const struct rte_flow_action_raw_encap *raw_encap_data;
4051         struct mlx5_flow_dv_encap_decap_resource res = {
4052                 .reformat_type =
4053                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4054                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4055                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4056         };
4057
4058         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4059                 raw_encap_data =
4060                         (const struct rte_flow_action_raw_encap *)action->conf;
4061                 res.size = raw_encap_data->size;
4062                 memcpy(res.buf, raw_encap_data->data, res.size);
4063         } else {
4064                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4065                         encap_data =
4066                                 ((const struct rte_flow_action_vxlan_encap *)
4067                                                 action->conf)->definition;
4068                 else
4069                         encap_data =
4070                                 ((const struct rte_flow_action_nvgre_encap *)
4071                                                 action->conf)->definition;
4072                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4073                                                &res.size, error))
4074                         return -rte_errno;
4075         }
4076         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4077                 return -rte_errno;
4078         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4079                 return rte_flow_error_set(error, EINVAL,
4080                                           RTE_FLOW_ERROR_TYPE_ACTION,
4081                                           NULL, "can't create L2 encap action");
4082         return 0;
4083 }
4084
4085 /**
4086  * Convert L2 decap action to DV specification.
4087  *
4088  * @param[in] dev
4089  *   Pointer to rte_eth_dev structure.
4090  * @param[in, out] dev_flow
4091  *   Pointer to the mlx5_flow.
4092  * @param[in] transfer
4093  *   Mark if the flow is E-Switch flow.
4094  * @param[out] error
4095  *   Pointer to the error structure.
4096  *
4097  * @return
4098  *   0 on success, a negative errno value otherwise and rte_errno is set.
4099  */
4100 static int
4101 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4102                                struct mlx5_flow *dev_flow,
4103                                uint8_t transfer,
4104                                struct rte_flow_error *error)
4105 {
4106         struct mlx5_flow_dv_encap_decap_resource res = {
4107                 .size = 0,
4108                 .reformat_type =
4109                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4110                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4111                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4112         };
4113
4114         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4115                 return rte_flow_error_set(error, EINVAL,
4116                                           RTE_FLOW_ERROR_TYPE_ACTION,
4117                                           NULL, "can't create L2 decap action");
4118         return 0;
4119 }
4120
4121 /**
4122  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4123  *
4124  * @param[in] dev
4125  *   Pointer to rte_eth_dev structure.
4126  * @param[in] action
4127  *   Pointer to action structure.
4128  * @param[in, out] dev_flow
4129  *   Pointer to the mlx5_flow.
4130  * @param[in] attr
4131  *   Pointer to the flow attributes.
4132  * @param[out] error
4133  *   Pointer to the error structure.
4134  *
4135  * @return
4136  *   0 on success, a negative errno value otherwise and rte_errno is set.
4137  */
4138 static int
4139 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4140                                 const struct rte_flow_action *action,
4141                                 struct mlx5_flow *dev_flow,
4142                                 const struct rte_flow_attr *attr,
4143                                 struct rte_flow_error *error)
4144 {
4145         const struct rte_flow_action_raw_encap *encap_data;
4146         struct mlx5_flow_dv_encap_decap_resource res;
4147
4148         memset(&res, 0, sizeof(res));
4149         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4150         res.size = encap_data->size;
4151         memcpy(res.buf, encap_data->data, res.size);
4152         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4153                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4154                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4155         if (attr->transfer)
4156                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4157         else
4158                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4159                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4160         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4161                 return rte_flow_error_set(error, EINVAL,
4162                                           RTE_FLOW_ERROR_TYPE_ACTION,
4163                                           NULL, "can't create encap action");
4164         return 0;
4165 }
4166
4167 /**
4168  * Create action push VLAN.
4169  *
4170  * @param[in] dev
4171  *   Pointer to rte_eth_dev structure.
4172  * @param[in] attr
4173  *   Pointer to the flow attributes.
4174  * @param[in] vlan
4175  *   Pointer to the vlan to push to the Ethernet header.
4176  * @param[in, out] dev_flow
4177  *   Pointer to the mlx5_flow.
4178  * @param[out] error
4179  *   Pointer to the error structure.
4180  *
4181  * @return
4182  *   0 on success, a negative errno value otherwise and rte_errno is set.
4183  */
4184 static int
4185 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4186                                 const struct rte_flow_attr *attr,
4187                                 const struct rte_vlan_hdr *vlan,
4188                                 struct mlx5_flow *dev_flow,
4189                                 struct rte_flow_error *error)
4190 {
4191         struct mlx5_flow_dv_push_vlan_action_resource res;
4192
4193         memset(&res, 0, sizeof(res));
4194         res.vlan_tag =
4195                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4196                                  vlan->vlan_tci);
4197         if (attr->transfer)
4198                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4199         else
4200                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4201                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4202         return flow_dv_push_vlan_action_resource_register
4203                                             (dev, &res, dev_flow, error);
4204 }
4205
4206 /**
4207  * Validate the modify-header actions.
4208  *
4209  * @param[in] action_flags
4210  *   Holds the actions detected until now.
4211  * @param[in] action
4212  *   Pointer to the modify action.
4213  * @param[out] error
4214  *   Pointer to error structure.
4215  *
4216  * @return
4217  *   0 on success, a negative errno value otherwise and rte_errno is set.
4218  */
4219 static int
4220 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4221                                    const struct rte_flow_action *action,
4222                                    struct rte_flow_error *error)
4223 {
4224         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4225                 return rte_flow_error_set(error, EINVAL,
4226                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4227                                           NULL, "action configuration not set");
4228         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4229                 return rte_flow_error_set(error, EINVAL,
4230                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4231                                           "can't have encap action before"
4232                                           " modify action");
4233         return 0;
4234 }
4235
4236 /**
4237  * Validate the modify-header MAC address actions.
4238  *
4239  * @param[in] action_flags
4240  *   Holds the actions detected until now.
4241  * @param[in] action
4242  *   Pointer to the modify action.
4243  * @param[in] item_flags
4244  *   Holds the items detected.
4245  * @param[out] error
4246  *   Pointer to error structure.
4247  *
4248  * @return
4249  *   0 on success, a negative errno value otherwise and rte_errno is set.
4250  */
4251 static int
4252 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4253                                    const struct rte_flow_action *action,
4254                                    const uint64_t item_flags,
4255                                    struct rte_flow_error *error)
4256 {
4257         int ret = 0;
4258
4259         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4260         if (!ret) {
4261                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4262                         return rte_flow_error_set(error, EINVAL,
4263                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4264                                                   NULL,
4265                                                   "no L2 item in pattern");
4266         }
4267         return ret;
4268 }
4269
4270 /**
4271  * Validate the modify-header IPv4 address actions.
4272  *
4273  * @param[in] action_flags
4274  *   Holds the actions detected until now.
4275  * @param[in] action
4276  *   Pointer to the modify action.
4277  * @param[in] item_flags
4278  *   Holds the items detected.
4279  * @param[out] error
4280  *   Pointer to error structure.
4281  *
4282  * @return
4283  *   0 on success, a negative errno value otherwise and rte_errno is set.
4284  */
4285 static int
4286 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4287                                     const struct rte_flow_action *action,
4288                                     const uint64_t item_flags,
4289                                     struct rte_flow_error *error)
4290 {
4291         int ret = 0;
4292         uint64_t layer;
4293
4294         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4295         if (!ret) {
4296                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4297                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4298                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4299                 if (!(item_flags & layer))
4300                         return rte_flow_error_set(error, EINVAL,
4301                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4302                                                   NULL,
4303                                                   "no ipv4 item in pattern");
4304         }
4305         return ret;
4306 }
4307
4308 /**
4309  * Validate the modify-header IPv6 address actions.
4310  *
4311  * @param[in] action_flags
4312  *   Holds the actions detected until now.
4313  * @param[in] action
4314  *   Pointer to the modify action.
4315  * @param[in] item_flags
4316  *   Holds the items detected.
4317  * @param[out] error
4318  *   Pointer to error structure.
4319  *
4320  * @return
4321  *   0 on success, a negative errno value otherwise and rte_errno is set.
4322  */
4323 static int
4324 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4325                                     const struct rte_flow_action *action,
4326                                     const uint64_t item_flags,
4327                                     struct rte_flow_error *error)
4328 {
4329         int ret = 0;
4330         uint64_t layer;
4331
4332         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4333         if (!ret) {
4334                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4335                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4336                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4337                 if (!(item_flags & layer))
4338                         return rte_flow_error_set(error, EINVAL,
4339                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4340                                                   NULL,
4341                                                   "no ipv6 item in pattern");
4342         }
4343         return ret;
4344 }
4345
4346 /**
4347  * Validate the modify-header TP actions.
4348  *
4349  * @param[in] action_flags
4350  *   Holds the actions detected until now.
4351  * @param[in] action
4352  *   Pointer to the modify action.
4353  * @param[in] item_flags
4354  *   Holds the items detected.
4355  * @param[out] error
4356  *   Pointer to error structure.
4357  *
4358  * @return
4359  *   0 on success, a negative errno value otherwise and rte_errno is set.
4360  */
4361 static int
4362 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4363                                   const struct rte_flow_action *action,
4364                                   const uint64_t item_flags,
4365                                   struct rte_flow_error *error)
4366 {
4367         int ret = 0;
4368         uint64_t layer;
4369
4370         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4371         if (!ret) {
4372                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4373                                  MLX5_FLOW_LAYER_INNER_L4 :
4374                                  MLX5_FLOW_LAYER_OUTER_L4;
4375                 if (!(item_flags & layer))
4376                         return rte_flow_error_set(error, EINVAL,
4377                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4378                                                   NULL, "no transport layer "
4379                                                   "in pattern");
4380         }
4381         return ret;
4382 }
4383
4384 /**
4385  * Validate the modify-header actions of increment/decrement
4386  * TCP Sequence-number.
4387  *
4388  * @param[in] action_flags
4389  *   Holds the actions detected until now.
4390  * @param[in] action
4391  *   Pointer to the modify action.
4392  * @param[in] item_flags
4393  *   Holds the items detected.
4394  * @param[out] error
4395  *   Pointer to error structure.
4396  *
4397  * @return
4398  *   0 on success, a negative errno value otherwise and rte_errno is set.
4399  */
4400 static int
4401 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4402                                        const struct rte_flow_action *action,
4403                                        const uint64_t item_flags,
4404                                        struct rte_flow_error *error)
4405 {
4406         int ret = 0;
4407         uint64_t layer;
4408
4409         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4410         if (!ret) {
4411                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4412                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4413                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4414                 if (!(item_flags & layer))
4415                         return rte_flow_error_set(error, EINVAL,
4416                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4417                                                   NULL, "no TCP item in"
4418                                                   " pattern");
4419                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4420                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4421                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4422                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4423                         return rte_flow_error_set(error, EINVAL,
4424                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4425                                                   NULL,
4426                                                   "cannot decrease and increase"
4427                                                   " TCP sequence number"
4428                                                   " at the same time");
4429         }
4430         return ret;
4431 }
4432
4433 /**
4434  * Validate the modify-header actions of increment/decrement
4435  * TCP Acknowledgment number.
4436  *
4437  * @param[in] action_flags
4438  *   Holds the actions detected until now.
4439  * @param[in] action
4440  *   Pointer to the modify action.
4441  * @param[in] item_flags
4442  *   Holds the items detected.
4443  * @param[out] error
4444  *   Pointer to error structure.
4445  *
4446  * @return
4447  *   0 on success, a negative errno value otherwise and rte_errno is set.
4448  */
4449 static int
4450 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4451                                        const struct rte_flow_action *action,
4452                                        const uint64_t item_flags,
4453                                        struct rte_flow_error *error)
4454 {
4455         int ret = 0;
4456         uint64_t layer;
4457
4458         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4459         if (!ret) {
4460                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4461                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4462                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4463                 if (!(item_flags & layer))
4464                         return rte_flow_error_set(error, EINVAL,
4465                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4466                                                   NULL, "no TCP item in"
4467                                                   " pattern");
4468                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4469                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4470                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4471                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4472                         return rte_flow_error_set(error, EINVAL,
4473                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4474                                                   NULL,
4475                                                   "cannot decrease and increase"
4476                                                   " TCP acknowledgment number"
4477                                                   " at the same time");
4478         }
4479         return ret;
4480 }
4481
4482 /**
4483  * Validate the modify-header TTL actions.
4484  *
4485  * @param[in] action_flags
4486  *   Holds the actions detected until now.
4487  * @param[in] action
4488  *   Pointer to the modify action.
4489  * @param[in] item_flags
4490  *   Holds the items detected.
4491  * @param[out] error
4492  *   Pointer to error structure.
4493  *
4494  * @return
4495  *   0 on success, a negative errno value otherwise and rte_errno is set.
4496  */
4497 static int
4498 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4499                                    const struct rte_flow_action *action,
4500                                    const uint64_t item_flags,
4501                                    struct rte_flow_error *error)
4502 {
4503         int ret = 0;
4504         uint64_t layer;
4505
4506         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4507         if (!ret) {
4508                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4509                                  MLX5_FLOW_LAYER_INNER_L3 :
4510                                  MLX5_FLOW_LAYER_OUTER_L3;
4511                 if (!(item_flags & layer))
4512                         return rte_flow_error_set(error, EINVAL,
4513                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4514                                                   NULL,
4515                                                   "no IP protocol in pattern");
4516         }
4517         return ret;
4518 }
4519
4520 static int
4521 mlx5_flow_item_field_width(enum rte_flow_field_id field)
4522 {
4523         switch (field) {
4524         case RTE_FLOW_FIELD_START:
4525                 return 32;
4526         case RTE_FLOW_FIELD_MAC_DST:
4527         case RTE_FLOW_FIELD_MAC_SRC:
4528                 return 48;
4529         case RTE_FLOW_FIELD_VLAN_TYPE:
4530                 return 16;
4531         case RTE_FLOW_FIELD_VLAN_ID:
4532                 return 12;
4533         case RTE_FLOW_FIELD_MAC_TYPE:
4534                 return 16;
4535         case RTE_FLOW_FIELD_IPV4_DSCP:
4536                 return 6;
4537         case RTE_FLOW_FIELD_IPV4_TTL:
4538                 return 8;
4539         case RTE_FLOW_FIELD_IPV4_SRC:
4540         case RTE_FLOW_FIELD_IPV4_DST:
4541                 return 32;
4542         case RTE_FLOW_FIELD_IPV6_DSCP:
4543                 return 6;
4544         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
4545                 return 8;
4546         case RTE_FLOW_FIELD_IPV6_SRC:
4547         case RTE_FLOW_FIELD_IPV6_DST:
4548                 return 128;
4549         case RTE_FLOW_FIELD_TCP_PORT_SRC:
4550         case RTE_FLOW_FIELD_TCP_PORT_DST:
4551                 return 16;
4552         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
4553         case RTE_FLOW_FIELD_TCP_ACK_NUM:
4554                 return 32;
4555         case RTE_FLOW_FIELD_TCP_FLAGS:
4556                 return 6;
4557         case RTE_FLOW_FIELD_UDP_PORT_SRC:
4558         case RTE_FLOW_FIELD_UDP_PORT_DST:
4559                 return 16;
4560         case RTE_FLOW_FIELD_VXLAN_VNI:
4561         case RTE_FLOW_FIELD_GENEVE_VNI:
4562                 return 24;
4563         case RTE_FLOW_FIELD_GTP_TEID:
4564         case RTE_FLOW_FIELD_TAG:
4565                 return 32;
4566         case RTE_FLOW_FIELD_MARK:
4567                 return 24;
4568         case RTE_FLOW_FIELD_META:
4569         case RTE_FLOW_FIELD_POINTER:
4570         case RTE_FLOW_FIELD_VALUE:
4571                 return 32;
4572         default:
4573                 MLX5_ASSERT(false);
4574         }
4575         return 0;
4576 }
4577
4578 /**
4579  * Validate the generic modify field actions.
4580  *
4581  * @param[in] action_flags
4582  *   Holds the actions detected until now.
4583  * @param[in] action
4584  *   Pointer to the modify action.
4585  * @param[in] item_flags
4586  *   Holds the items detected.
4587  * @param[out] error
4588  *   Pointer to error structure.
4589  *
4590  * @return
4591  *   Number of header fields to modify (0 or more) on success,
4592  *   a negative errno value otherwise and rte_errno is set.
4593  */
4594 static int
4595 flow_dv_validate_action_modify_field(const uint64_t action_flags,
4596                                    const struct rte_flow_action *action,
4597                                    struct rte_flow_error *error)
4598 {
4599         int ret = 0;
4600         const struct rte_flow_action_modify_field *action_modify_field =
4601                 action->conf;
4602         uint32_t dst_width =
4603                 mlx5_flow_item_field_width(action_modify_field->dst.field);
4604         uint32_t src_width =
4605                 mlx5_flow_item_field_width(action_modify_field->src.field);
4606
4607         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4608         if (ret)
4609                 return ret;
4610
4611         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4612             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4613                 if (action_modify_field->dst.offset >= dst_width ||
4614                     (action_modify_field->dst.offset % 32))
4615                         return rte_flow_error_set(error, EINVAL,
4616                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4617                                                 NULL,
4618                                                 "destination offset is too big"
4619                                                 " or not aligned to 4 bytes");
4620                 if (action_modify_field->dst.level &&
4621                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4622                         return rte_flow_error_set(error, EINVAL,
4623                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4624                                                 NULL,
4625                                                 "cannot modify inner headers");
4626         }
4627         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4628             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4629                 if (action_modify_field->src.offset >= src_width ||
4630                     (action_modify_field->src.offset % 32))
4631                         return rte_flow_error_set(error, EINVAL,
4632                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4633                                                 NULL,
4634                                                 "source offset is too big"
4635                                                 " or not aligned to 4 bytes");
4636                 if (action_modify_field->src.level &&
4637                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4638                         return rte_flow_error_set(error, EINVAL,
4639                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4640                                                 NULL,
4641                                                 "cannot copy from inner headers");
4642         }
4643         if (action_modify_field->width == 0)
4644                 return rte_flow_error_set(error, EINVAL,
4645                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4646                                                 NULL,
4647                                                 "width is required for modify action");
4648         if (action_modify_field->dst.field ==
4649             action_modify_field->src.field)
4650                 return rte_flow_error_set(error, EINVAL,
4651                                         RTE_FLOW_ERROR_TYPE_ACTION,
4652                                         NULL,
4653                                         "source and destination fields"
4654                                         " cannot be the same");
4655         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4656             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4657                 return rte_flow_error_set(error, EINVAL,
4658                                         RTE_FLOW_ERROR_TYPE_ACTION,
4659                                         NULL,
4660                                         "immediate value or a pointer to it"
4661                                         " cannot be used as a destination");
4662         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4663             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4664                 return rte_flow_error_set(error, EINVAL,
4665                                 RTE_FLOW_ERROR_TYPE_ACTION,
4666                                 NULL,
4667                                 "modifications of an arbitrary"
4668                                 " place in a packet is not supported");
4669         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4670                 return rte_flow_error_set(error, EINVAL,
4671                                 RTE_FLOW_ERROR_TYPE_ACTION,
4672                                 NULL,
4673                                 "add and sub operations"
4674                                 " are not supported");
4675         return (action_modify_field->width / 32) +
4676                !!(action_modify_field->width % 32);
4677 }
4678
4679 /**
4680  * Validate jump action.
4681  *
4682  * @param[in] action
4683  *   Pointer to the jump action.
4684  * @param[in] action_flags
4685  *   Holds the actions detected until now.
4686  * @param[in] attributes
4687  *   Pointer to flow attributes
4688  * @param[in] external
4689  *   Action belongs to flow rule created by request external to PMD.
4690  * @param[out] error
4691  *   Pointer to error structure.
4692  *
4693  * @return
4694  *   0 on success, a negative errno value otherwise and rte_errno is set.
4695  */
4696 static int
4697 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4698                              const struct mlx5_flow_tunnel *tunnel,
4699                              const struct rte_flow_action *action,
4700                              uint64_t action_flags,
4701                              const struct rte_flow_attr *attributes,
4702                              bool external, struct rte_flow_error *error)
4703 {
4704         uint32_t target_group, table;
4705         int ret = 0;
4706         struct flow_grp_info grp_info = {
4707                 .external = !!external,
4708                 .transfer = !!attributes->transfer,
4709                 .fdb_def_rule = 1,
4710                 .std_tbl_fix = 0
4711         };
4712         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4713                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4714                 return rte_flow_error_set(error, EINVAL,
4715                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4716                                           "can't have 2 fate actions in"
4717                                           " same flow");
4718         if (action_flags & MLX5_FLOW_ACTION_METER)
4719                 return rte_flow_error_set(error, ENOTSUP,
4720                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4721                                           "jump with meter not support");
4722         if (!action->conf)
4723                 return rte_flow_error_set(error, EINVAL,
4724                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4725                                           NULL, "action configuration not set");
4726         target_group =
4727                 ((const struct rte_flow_action_jump *)action->conf)->group;
4728         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4729                                        &grp_info, error);
4730         if (ret)
4731                 return ret;
4732         if (attributes->group == target_group &&
4733             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4734                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4735                 return rte_flow_error_set(error, EINVAL,
4736                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4737                                           "target group must be other than"
4738                                           " the current flow group");
4739         return 0;
4740 }
4741
4742 /*
4743  * Validate the port_id action.
4744  *
4745  * @param[in] dev
4746  *   Pointer to rte_eth_dev structure.
4747  * @param[in] action_flags
4748  *   Bit-fields that holds the actions detected until now.
4749  * @param[in] action
4750  *   Port_id RTE action structure.
4751  * @param[in] attr
4752  *   Attributes of flow that includes this action.
4753  * @param[out] error
4754  *   Pointer to error structure.
4755  *
4756  * @return
4757  *   0 on success, a negative errno value otherwise and rte_errno is set.
4758  */
4759 static int
4760 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4761                                 uint64_t action_flags,
4762                                 const struct rte_flow_action *action,
4763                                 const struct rte_flow_attr *attr,
4764                                 struct rte_flow_error *error)
4765 {
4766         const struct rte_flow_action_port_id *port_id;
4767         struct mlx5_priv *act_priv;
4768         struct mlx5_priv *dev_priv;
4769         uint16_t port;
4770
4771         if (!attr->transfer)
4772                 return rte_flow_error_set(error, ENOTSUP,
4773                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4774                                           NULL,
4775                                           "port id action is valid in transfer"
4776                                           " mode only");
4777         if (!action || !action->conf)
4778                 return rte_flow_error_set(error, ENOTSUP,
4779                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4780                                           NULL,
4781                                           "port id action parameters must be"
4782                                           " specified");
4783         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4784                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4785                 return rte_flow_error_set(error, EINVAL,
4786                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4787                                           "can have only one fate actions in"
4788                                           " a flow");
4789         dev_priv = mlx5_dev_to_eswitch_info(dev);
4790         if (!dev_priv)
4791                 return rte_flow_error_set(error, rte_errno,
4792                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4793                                           NULL,
4794                                           "failed to obtain E-Switch info");
4795         port_id = action->conf;
4796         port = port_id->original ? dev->data->port_id : port_id->id;
4797         act_priv = mlx5_port_to_eswitch_info(port, false);
4798         if (!act_priv)
4799                 return rte_flow_error_set
4800                                 (error, rte_errno,
4801                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4802                                  "failed to obtain E-Switch port id for port");
4803         if (act_priv->domain_id != dev_priv->domain_id)
4804                 return rte_flow_error_set
4805                                 (error, EINVAL,
4806                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4807                                  "port does not belong to"
4808                                  " E-Switch being configured");
4809         return 0;
4810 }
4811
4812 /**
4813  * Get the maximum number of modify header actions.
4814  *
4815  * @param dev
4816  *   Pointer to rte_eth_dev structure.
4817  * @param flags
4818  *   Flags bits to check if root level.
4819  *
4820  * @return
4821  *   Max number of modify header actions device can support.
4822  */
4823 static inline unsigned int
4824 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4825                               uint64_t flags)
4826 {
4827         /*
4828          * There's no way to directly query the max capacity from FW.
4829          * The maximal value on root table should be assumed to be supported.
4830          */
4831         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4832                 return MLX5_MAX_MODIFY_NUM;
4833         else
4834                 return MLX5_ROOT_TBL_MODIFY_NUM;
4835 }
4836
4837 /**
4838  * Validate the meter action.
4839  *
4840  * @param[in] dev
4841  *   Pointer to rte_eth_dev structure.
4842  * @param[in] action_flags
4843  *   Bit-fields that holds the actions detected until now.
4844  * @param[in] action
4845  *   Pointer to the meter action.
4846  * @param[in] attr
4847  *   Attributes of flow that includes this action.
4848  * @param[out] error
4849  *   Pointer to error structure.
4850  *
4851  * @return
4852  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4853  */
4854 static int
4855 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4856                                 uint64_t action_flags,
4857                                 const struct rte_flow_action *action,
4858                                 const struct rte_flow_attr *attr,
4859                                 struct rte_flow_error *error)
4860 {
4861         struct mlx5_priv *priv = dev->data->dev_private;
4862         const struct rte_flow_action_meter *am = action->conf;
4863         struct mlx5_flow_meter *fm;
4864
4865         if (!am)
4866                 return rte_flow_error_set(error, EINVAL,
4867                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4868                                           "meter action conf is NULL");
4869
4870         if (action_flags & MLX5_FLOW_ACTION_METER)
4871                 return rte_flow_error_set(error, ENOTSUP,
4872                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4873                                           "meter chaining not support");
4874         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4875                 return rte_flow_error_set(error, ENOTSUP,
4876                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4877                                           "meter with jump not support");
4878         if (!priv->mtr_en)
4879                 return rte_flow_error_set(error, ENOTSUP,
4880                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4881                                           NULL,
4882                                           "meter action not supported");
4883         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4884         if (!fm)
4885                 return rte_flow_error_set(error, EINVAL,
4886                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4887                                           "Meter not found");
4888         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4889               (!fm->ingress && !attr->ingress && attr->egress) ||
4890               (!fm->egress && !attr->egress && attr->ingress))))
4891                 return rte_flow_error_set(error, EINVAL,
4892                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4893                                           "Flow attributes are either invalid "
4894                                           "or have a conflict with current "
4895                                           "meter attributes");
4896         return 0;
4897 }
4898
4899 /**
4900  * Validate the age action.
4901  *
4902  * @param[in] action_flags
4903  *   Holds the actions detected until now.
4904  * @param[in] action
4905  *   Pointer to the age action.
4906  * @param[in] dev
4907  *   Pointer to the Ethernet device structure.
4908  * @param[out] error
4909  *   Pointer to error structure.
4910  *
4911  * @return
4912  *   0 on success, a negative errno value otherwise and rte_errno is set.
4913  */
4914 static int
4915 flow_dv_validate_action_age(uint64_t action_flags,
4916                             const struct rte_flow_action *action,
4917                             struct rte_eth_dev *dev,
4918                             struct rte_flow_error *error)
4919 {
4920         struct mlx5_priv *priv = dev->data->dev_private;
4921         const struct rte_flow_action_age *age = action->conf;
4922
4923         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4924             !priv->sh->aso_age_mng))
4925                 return rte_flow_error_set(error, ENOTSUP,
4926                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4927                                           NULL,
4928                                           "age action not supported");
4929         if (!(action->conf))
4930                 return rte_flow_error_set(error, EINVAL,
4931                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4932                                           "configuration cannot be null");
4933         if (!(age->timeout))
4934                 return rte_flow_error_set(error, EINVAL,
4935                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4936                                           "invalid timeout value 0");
4937         if (action_flags & MLX5_FLOW_ACTION_AGE)
4938                 return rte_flow_error_set(error, EINVAL,
4939                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4940                                           "duplicate age actions set");
4941         return 0;
4942 }
4943
4944 /**
4945  * Validate the modify-header IPv4 DSCP actions.
4946  *
4947  * @param[in] action_flags
4948  *   Holds the actions detected until now.
4949  * @param[in] action
4950  *   Pointer to the modify action.
4951  * @param[in] item_flags
4952  *   Holds the items detected.
4953  * @param[out] error
4954  *   Pointer to error structure.
4955  *
4956  * @return
4957  *   0 on success, a negative errno value otherwise and rte_errno is set.
4958  */
4959 static int
4960 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4961                                          const struct rte_flow_action *action,
4962                                          const uint64_t item_flags,
4963                                          struct rte_flow_error *error)
4964 {
4965         int ret = 0;
4966
4967         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4968         if (!ret) {
4969                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4970                         return rte_flow_error_set(error, EINVAL,
4971                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4972                                                   NULL,
4973                                                   "no ipv4 item in pattern");
4974         }
4975         return ret;
4976 }
4977
4978 /**
4979  * Validate the modify-header IPv6 DSCP actions.
4980  *
4981  * @param[in] action_flags
4982  *   Holds the actions detected until now.
4983  * @param[in] action
4984  *   Pointer to the modify action.
4985  * @param[in] item_flags
4986  *   Holds the items detected.
4987  * @param[out] error
4988  *   Pointer to error structure.
4989  *
4990  * @return
4991  *   0 on success, a negative errno value otherwise and rte_errno is set.
4992  */
4993 static int
4994 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4995                                          const struct rte_flow_action *action,
4996                                          const uint64_t item_flags,
4997                                          struct rte_flow_error *error)
4998 {
4999         int ret = 0;
5000
5001         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5002         if (!ret) {
5003                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5004                         return rte_flow_error_set(error, EINVAL,
5005                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5006                                                   NULL,
5007                                                   "no ipv6 item in pattern");
5008         }
5009         return ret;
5010 }
5011
5012 /**
5013  * Match modify-header resource.
5014  *
5015  * @param list
5016  *   Pointer to the hash list.
5017  * @param entry
5018  *   Pointer to exist resource entry object.
5019  * @param key
5020  *   Key of the new entry.
5021  * @param ctx
5022  *   Pointer to new modify-header resource.
5023  *
5024  * @return
5025  *   0 on matching, non-zero otherwise.
5026  */
5027 int
5028 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5029                         struct mlx5_hlist_entry *entry,
5030                         uint64_t key __rte_unused, void *cb_ctx)
5031 {
5032         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5033         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5034         struct mlx5_flow_dv_modify_hdr_resource *resource =
5035                         container_of(entry, typeof(*resource), entry);
5036         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5037
5038         key_len += ref->actions_num * sizeof(ref->actions[0]);
5039         return ref->actions_num != resource->actions_num ||
5040                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5041 }
5042
5043 struct mlx5_hlist_entry *
5044 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5045                          void *cb_ctx)
5046 {
5047         struct mlx5_dev_ctx_shared *sh = list->ctx;
5048         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5049         struct mlx5dv_dr_domain *ns;
5050         struct mlx5_flow_dv_modify_hdr_resource *entry;
5051         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5052         int ret;
5053         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5054         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5055
5056         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5057                             SOCKET_ID_ANY);
5058         if (!entry) {
5059                 rte_flow_error_set(ctx->error, ENOMEM,
5060                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5061                                    "cannot allocate resource memory");
5062                 return NULL;
5063         }
5064         rte_memcpy(&entry->ft_type,
5065                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5066                    key_len + data_len);
5067         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5068                 ns = sh->fdb_domain;
5069         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5070                 ns = sh->tx_domain;
5071         else
5072                 ns = sh->rx_domain;
5073         ret = mlx5_flow_os_create_flow_action_modify_header
5074                                         (sh->ctx, ns, entry,
5075                                          data_len, &entry->action);
5076         if (ret) {
5077                 mlx5_free(entry);
5078                 rte_flow_error_set(ctx->error, ENOMEM,
5079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5080                                    NULL, "cannot create modification action");
5081                 return NULL;
5082         }
5083         return &entry->entry;
5084 }
5085
5086 /**
5087  * Validate the sample action.
5088  *
5089  * @param[in, out] action_flags
5090  *   Holds the actions detected until now.
5091  * @param[in] action
5092  *   Pointer to the sample action.
5093  * @param[in] dev
5094  *   Pointer to the Ethernet device structure.
5095  * @param[in] attr
5096  *   Attributes of flow that includes this action.
5097  * @param[in] item_flags
5098  *   Holds the items detected.
5099  * @param[in] rss
5100  *   Pointer to the RSS action.
5101  * @param[out] sample_rss
5102  *   Pointer to the RSS action in sample action list.
5103  * @param[out] count
5104  *   Pointer to the COUNT action in sample action list.
5105  * @param[out] fdb_mirror_limit
5106  *   Pointer to the FDB mirror limitation flag.
5107  * @param[out] error
5108  *   Pointer to error structure.
5109  *
5110  * @return
5111  *   0 on success, a negative errno value otherwise and rte_errno is set.
5112  */
5113 static int
5114 flow_dv_validate_action_sample(uint64_t *action_flags,
5115                                const struct rte_flow_action *action,
5116                                struct rte_eth_dev *dev,
5117                                const struct rte_flow_attr *attr,
5118                                uint64_t item_flags,
5119                                const struct rte_flow_action_rss *rss,
5120                                const struct rte_flow_action_rss **sample_rss,
5121                                const struct rte_flow_action_count **count,
5122                                int *fdb_mirror_limit,
5123                                struct rte_flow_error *error)
5124 {
5125         struct mlx5_priv *priv = dev->data->dev_private;
5126         struct mlx5_dev_config *dev_conf = &priv->config;
5127         const struct rte_flow_action_sample *sample = action->conf;
5128         const struct rte_flow_action *act;
5129         uint64_t sub_action_flags = 0;
5130         uint16_t queue_index = 0xFFFF;
5131         int actions_n = 0;
5132         int ret;
5133
5134         if (!sample)
5135                 return rte_flow_error_set(error, EINVAL,
5136                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5137                                           "configuration cannot be NULL");
5138         if (sample->ratio == 0)
5139                 return rte_flow_error_set(error, EINVAL,
5140                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5141                                           "ratio value starts from 1");
5142         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5143                 return rte_flow_error_set(error, ENOTSUP,
5144                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5145                                           NULL,
5146                                           "sample action not supported");
5147         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5148                 return rte_flow_error_set(error, EINVAL,
5149                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5150                                           "Multiple sample actions not "
5151                                           "supported");
5152         if (*action_flags & MLX5_FLOW_ACTION_METER)
5153                 return rte_flow_error_set(error, EINVAL,
5154                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5155                                           "wrong action order, meter should "
5156                                           "be after sample action");
5157         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5158                 return rte_flow_error_set(error, EINVAL,
5159                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5160                                           "wrong action order, jump should "
5161                                           "be after sample action");
5162         act = sample->actions;
5163         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5164                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5165                         return rte_flow_error_set(error, ENOTSUP,
5166                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5167                                                   act, "too many actions");
5168                 switch (act->type) {
5169                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5170                         ret = mlx5_flow_validate_action_queue(act,
5171                                                               sub_action_flags,
5172                                                               dev,
5173                                                               attr, error);
5174                         if (ret < 0)
5175                                 return ret;
5176                         queue_index = ((const struct rte_flow_action_queue *)
5177                                                         (act->conf))->index;
5178                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5179                         ++actions_n;
5180                         break;
5181                 case RTE_FLOW_ACTION_TYPE_RSS:
5182                         *sample_rss = act->conf;
5183                         ret = mlx5_flow_validate_action_rss(act,
5184                                                             sub_action_flags,
5185                                                             dev, attr,
5186                                                             item_flags,
5187                                                             error);
5188                         if (ret < 0)
5189                                 return ret;
5190                         if (rss && *sample_rss &&
5191                             ((*sample_rss)->level != rss->level ||
5192                             (*sample_rss)->types != rss->types))
5193                                 return rte_flow_error_set(error, ENOTSUP,
5194                                         RTE_FLOW_ERROR_TYPE_ACTION,
5195                                         NULL,
5196                                         "Can't use the different RSS types "
5197                                         "or level in the same flow");
5198                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5199                                 queue_index = (*sample_rss)->queue[0];
5200                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5201                         ++actions_n;
5202                         break;
5203                 case RTE_FLOW_ACTION_TYPE_MARK:
5204                         ret = flow_dv_validate_action_mark(dev, act,
5205                                                            sub_action_flags,
5206                                                            attr, error);
5207                         if (ret < 0)
5208                                 return ret;
5209                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5210                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5211                                                 MLX5_FLOW_ACTION_MARK_EXT;
5212                         else
5213                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5214                         ++actions_n;
5215                         break;
5216                 case RTE_FLOW_ACTION_TYPE_COUNT:
5217                         ret = flow_dv_validate_action_count
5218                                 (dev, act,
5219                                  *action_flags | sub_action_flags,
5220                                  error);
5221                         if (ret < 0)
5222                                 return ret;
5223                         *count = act->conf;
5224                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5225                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5226                         ++actions_n;
5227                         break;
5228                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5229                         ret = flow_dv_validate_action_port_id(dev,
5230                                                               sub_action_flags,
5231                                                               act,
5232                                                               attr,
5233                                                               error);
5234                         if (ret)
5235                                 return ret;
5236                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5237                         ++actions_n;
5238                         break;
5239                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5240                         ret = flow_dv_validate_action_raw_encap_decap
5241                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5242                                  &actions_n, action, item_flags, error);
5243                         if (ret < 0)
5244                                 return ret;
5245                         ++actions_n;
5246                         break;
5247                 default:
5248                         return rte_flow_error_set(error, ENOTSUP,
5249                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5250                                                   NULL,
5251                                                   "Doesn't support optional "
5252                                                   "action");
5253                 }
5254         }
5255         if (attr->ingress && !attr->transfer) {
5256                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5257                                           MLX5_FLOW_ACTION_RSS)))
5258                         return rte_flow_error_set(error, EINVAL,
5259                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5260                                                   NULL,
5261                                                   "Ingress must has a dest "
5262                                                   "QUEUE for Sample");
5263         } else if (attr->egress && !attr->transfer) {
5264                 return rte_flow_error_set(error, ENOTSUP,
5265                                           RTE_FLOW_ERROR_TYPE_ACTION,
5266                                           NULL,
5267                                           "Sample Only support Ingress "
5268                                           "or E-Switch");
5269         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5270                 MLX5_ASSERT(attr->transfer);
5271                 if (sample->ratio > 1)
5272                         return rte_flow_error_set(error, ENOTSUP,
5273                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5274                                                   NULL,
5275                                                   "E-Switch doesn't support "
5276                                                   "any optional action "
5277                                                   "for sampling");
5278                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5279                         return rte_flow_error_set(error, ENOTSUP,
5280                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5281                                                   NULL,
5282                                                   "unsupported action QUEUE");
5283                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5284                         return rte_flow_error_set(error, ENOTSUP,
5285                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5286                                                   NULL,
5287                                                   "unsupported action QUEUE");
5288                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5289                         return rte_flow_error_set(error, EINVAL,
5290                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5291                                                   NULL,
5292                                                   "E-Switch must has a dest "
5293                                                   "port for mirroring");
5294                 if (!priv->config.hca_attr.reg_c_preserve &&
5295                      priv->representor_id != -1)
5296                         *fdb_mirror_limit = 1;
5297         }
5298         /* Continue validation for Xcap actions.*/
5299         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5300             (queue_index == 0xFFFF ||
5301              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5302                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5303                      MLX5_FLOW_XCAP_ACTIONS)
5304                         return rte_flow_error_set(error, ENOTSUP,
5305                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5306                                                   NULL, "encap and decap "
5307                                                   "combination aren't "
5308                                                   "supported");
5309                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5310                                                         MLX5_FLOW_ACTION_ENCAP))
5311                         return rte_flow_error_set(error, ENOTSUP,
5312                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5313                                                   NULL, "encap is not supported"
5314                                                   " for ingress traffic");
5315         }
5316         return 0;
5317 }
5318
5319 /**
5320  * Find existing modify-header resource or create and register a new one.
5321  *
5322  * @param dev[in, out]
5323  *   Pointer to rte_eth_dev structure.
5324  * @param[in, out] resource
5325  *   Pointer to modify-header resource.
5326  * @parm[in, out] dev_flow
5327  *   Pointer to the dev_flow.
5328  * @param[out] error
5329  *   pointer to error structure.
5330  *
5331  * @return
5332  *   0 on success otherwise -errno and errno is set.
5333  */
5334 static int
5335 flow_dv_modify_hdr_resource_register
5336                         (struct rte_eth_dev *dev,
5337                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5338                          struct mlx5_flow *dev_flow,
5339                          struct rte_flow_error *error)
5340 {
5341         struct mlx5_priv *priv = dev->data->dev_private;
5342         struct mlx5_dev_ctx_shared *sh = priv->sh;
5343         uint32_t key_len = sizeof(*resource) -
5344                            offsetof(typeof(*resource), ft_type) +
5345                            resource->actions_num * sizeof(resource->actions[0]);
5346         struct mlx5_hlist_entry *entry;
5347         struct mlx5_flow_cb_ctx ctx = {
5348                 .error = error,
5349                 .data = resource,
5350         };
5351         uint64_t key64;
5352
5353         resource->flags = dev_flow->dv.group ? 0 :
5354                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5355         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5356                                     resource->flags))
5357                 return rte_flow_error_set(error, EOVERFLOW,
5358                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5359                                           "too many modify header items");
5360         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5361         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5362         if (!entry)
5363                 return -rte_errno;
5364         resource = container_of(entry, typeof(*resource), entry);
5365         dev_flow->handle->dvh.modify_hdr = resource;
5366         return 0;
5367 }
5368
5369 /**
5370  * Get DV flow counter by index.
5371  *
5372  * @param[in] dev
5373  *   Pointer to the Ethernet device structure.
5374  * @param[in] idx
5375  *   mlx5 flow counter index in the container.
5376  * @param[out] ppool
5377  *   mlx5 flow counter pool in the container,
5378  *
5379  * @return
5380  *   Pointer to the counter, NULL otherwise.
5381  */
5382 static struct mlx5_flow_counter *
5383 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5384                            uint32_t idx,
5385                            struct mlx5_flow_counter_pool **ppool)
5386 {
5387         struct mlx5_priv *priv = dev->data->dev_private;
5388         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5389         struct mlx5_flow_counter_pool *pool;
5390
5391         /* Decrease to original index and clear shared bit. */
5392         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5393         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5394         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5395         MLX5_ASSERT(pool);
5396         if (ppool)
5397                 *ppool = pool;
5398         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5399 }
5400
5401 /**
5402  * Check the devx counter belongs to the pool.
5403  *
5404  * @param[in] pool
5405  *   Pointer to the counter pool.
5406  * @param[in] id
5407  *   The counter devx ID.
5408  *
5409  * @return
5410  *   True if counter belongs to the pool, false otherwise.
5411  */
5412 static bool
5413 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5414 {
5415         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5416                    MLX5_COUNTERS_PER_POOL;
5417
5418         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5419                 return true;
5420         return false;
5421 }
5422
5423 /**
5424  * Get a pool by devx counter ID.
5425  *
5426  * @param[in] cmng
5427  *   Pointer to the counter management.
5428  * @param[in] id
5429  *   The counter devx ID.
5430  *
5431  * @return
5432  *   The counter pool pointer if exists, NULL otherwise,
5433  */
5434 static struct mlx5_flow_counter_pool *
5435 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5436 {
5437         uint32_t i;
5438         struct mlx5_flow_counter_pool *pool = NULL;
5439
5440         rte_spinlock_lock(&cmng->pool_update_sl);
5441         /* Check last used pool. */
5442         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5443             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5444                 pool = cmng->pools[cmng->last_pool_idx];
5445                 goto out;
5446         }
5447         /* ID out of range means no suitable pool in the container. */
5448         if (id > cmng->max_id || id < cmng->min_id)
5449                 goto out;
5450         /*
5451          * Find the pool from the end of the container, since mostly counter
5452          * ID is sequence increasing, and the last pool should be the needed
5453          * one.
5454          */
5455         i = cmng->n_valid;
5456         while (i--) {
5457                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5458
5459                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5460                         pool = pool_tmp;
5461                         break;
5462                 }
5463         }
5464 out:
5465         rte_spinlock_unlock(&cmng->pool_update_sl);
5466         return pool;
5467 }
5468
5469 /**
5470  * Resize a counter container.
5471  *
5472  * @param[in] dev
5473  *   Pointer to the Ethernet device structure.
5474  *
5475  * @return
5476  *   0 on success, otherwise negative errno value and rte_errno is set.
5477  */
5478 static int
5479 flow_dv_container_resize(struct rte_eth_dev *dev)
5480 {
5481         struct mlx5_priv *priv = dev->data->dev_private;
5482         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5483         void *old_pools = cmng->pools;
5484         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5485         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5486         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5487
5488         if (!pools) {
5489                 rte_errno = ENOMEM;
5490                 return -ENOMEM;
5491         }
5492         if (old_pools)
5493                 memcpy(pools, old_pools, cmng->n *
5494                                        sizeof(struct mlx5_flow_counter_pool *));
5495         cmng->n = resize;
5496         cmng->pools = pools;
5497         if (old_pools)
5498                 mlx5_free(old_pools);
5499         return 0;
5500 }
5501
5502 /**
5503  * Query a devx flow counter.
5504  *
5505  * @param[in] dev
5506  *   Pointer to the Ethernet device structure.
5507  * @param[in] cnt
5508  *   Index to the flow counter.
5509  * @param[out] pkts
5510  *   The statistics value of packets.
5511  * @param[out] bytes
5512  *   The statistics value of bytes.
5513  *
5514  * @return
5515  *   0 on success, otherwise a negative errno value and rte_errno is set.
5516  */
5517 static inline int
5518 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5519                      uint64_t *bytes)
5520 {
5521         struct mlx5_priv *priv = dev->data->dev_private;
5522         struct mlx5_flow_counter_pool *pool = NULL;
5523         struct mlx5_flow_counter *cnt;
5524         int offset;
5525
5526         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5527         MLX5_ASSERT(pool);
5528         if (priv->sh->cmng.counter_fallback)
5529                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5530                                         0, pkts, bytes, 0, NULL, NULL, 0);
5531         rte_spinlock_lock(&pool->sl);
5532         if (!pool->raw) {
5533                 *pkts = 0;
5534                 *bytes = 0;
5535         } else {
5536                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5537                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5538                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5539         }
5540         rte_spinlock_unlock(&pool->sl);
5541         return 0;
5542 }
5543
5544 /**
5545  * Create and initialize a new counter pool.
5546  *
5547  * @param[in] dev
5548  *   Pointer to the Ethernet device structure.
5549  * @param[out] dcs
5550  *   The devX counter handle.
5551  * @param[in] age
5552  *   Whether the pool is for counter that was allocated for aging.
5553  * @param[in/out] cont_cur
5554  *   Pointer to the container pointer, it will be update in pool resize.
5555  *
5556  * @return
5557  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5558  */
5559 static struct mlx5_flow_counter_pool *
5560 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5561                     uint32_t age)
5562 {
5563         struct mlx5_priv *priv = dev->data->dev_private;
5564         struct mlx5_flow_counter_pool *pool;
5565         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5566         bool fallback = priv->sh->cmng.counter_fallback;
5567         uint32_t size = sizeof(*pool);
5568
5569         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5570         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5571         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5572         if (!pool) {
5573                 rte_errno = ENOMEM;
5574                 return NULL;
5575         }
5576         pool->raw = NULL;
5577         pool->is_aged = !!age;
5578         pool->query_gen = 0;
5579         pool->min_dcs = dcs;
5580         rte_spinlock_init(&pool->sl);
5581         rte_spinlock_init(&pool->csl);
5582         TAILQ_INIT(&pool->counters[0]);
5583         TAILQ_INIT(&pool->counters[1]);
5584         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5585         rte_spinlock_lock(&cmng->pool_update_sl);
5586         pool->index = cmng->n_valid;
5587         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5588                 mlx5_free(pool);
5589                 rte_spinlock_unlock(&cmng->pool_update_sl);
5590                 return NULL;
5591         }
5592         cmng->pools[pool->index] = pool;
5593         cmng->n_valid++;
5594         if (unlikely(fallback)) {
5595                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5596
5597                 if (base < cmng->min_id)
5598                         cmng->min_id = base;
5599                 if (base > cmng->max_id)
5600                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5601                 cmng->last_pool_idx = pool->index;
5602         }
5603         rte_spinlock_unlock(&cmng->pool_update_sl);
5604         return pool;
5605 }
5606
5607 /**
5608  * Prepare a new counter and/or a new counter pool.
5609  *
5610  * @param[in] dev
5611  *   Pointer to the Ethernet device structure.
5612  * @param[out] cnt_free
5613  *   Where to put the pointer of a new counter.
5614  * @param[in] age
5615  *   Whether the pool is for counter that was allocated for aging.
5616  *
5617  * @return
5618  *   The counter pool pointer and @p cnt_free is set on success,
5619  *   NULL otherwise and rte_errno is set.
5620  */
5621 static struct mlx5_flow_counter_pool *
5622 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5623                              struct mlx5_flow_counter **cnt_free,
5624                              uint32_t age)
5625 {
5626         struct mlx5_priv *priv = dev->data->dev_private;
5627         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5628         struct mlx5_flow_counter_pool *pool;
5629         struct mlx5_counters tmp_tq;
5630         struct mlx5_devx_obj *dcs = NULL;
5631         struct mlx5_flow_counter *cnt;
5632         enum mlx5_counter_type cnt_type =
5633                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5634         bool fallback = priv->sh->cmng.counter_fallback;
5635         uint32_t i;
5636
5637         if (fallback) {
5638                 /* bulk_bitmap must be 0 for single counter allocation. */
5639                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5640                 if (!dcs)
5641                         return NULL;
5642                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5643                 if (!pool) {
5644                         pool = flow_dv_pool_create(dev, dcs, age);
5645                         if (!pool) {
5646                                 mlx5_devx_cmd_destroy(dcs);
5647                                 return NULL;
5648                         }
5649                 }
5650                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5651                 cnt = MLX5_POOL_GET_CNT(pool, i);
5652                 cnt->pool = pool;
5653                 cnt->dcs_when_free = dcs;
5654                 *cnt_free = cnt;
5655                 return pool;
5656         }
5657         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5658         if (!dcs) {
5659                 rte_errno = ENODATA;
5660                 return NULL;
5661         }
5662         pool = flow_dv_pool_create(dev, dcs, age);
5663         if (!pool) {
5664                 mlx5_devx_cmd_destroy(dcs);
5665                 return NULL;
5666         }
5667         TAILQ_INIT(&tmp_tq);
5668         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5669                 cnt = MLX5_POOL_GET_CNT(pool, i);
5670                 cnt->pool = pool;
5671                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5672         }
5673         rte_spinlock_lock(&cmng->csl[cnt_type]);
5674         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5675         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5676         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5677         (*cnt_free)->pool = pool;
5678         return pool;
5679 }
5680
5681 /**
5682  * Allocate a flow counter.
5683  *
5684  * @param[in] dev
5685  *   Pointer to the Ethernet device structure.
5686  * @param[in] age
5687  *   Whether the counter was allocated for aging.
5688  *
5689  * @return
5690  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5691  */
5692 static uint32_t
5693 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5694 {
5695         struct mlx5_priv *priv = dev->data->dev_private;
5696         struct mlx5_flow_counter_pool *pool = NULL;
5697         struct mlx5_flow_counter *cnt_free = NULL;
5698         bool fallback = priv->sh->cmng.counter_fallback;
5699         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5700         enum mlx5_counter_type cnt_type =
5701                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5702         uint32_t cnt_idx;
5703
5704         if (!priv->config.devx) {
5705                 rte_errno = ENOTSUP;
5706                 return 0;
5707         }
5708         /* Get free counters from container. */
5709         rte_spinlock_lock(&cmng->csl[cnt_type]);
5710         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5711         if (cnt_free)
5712                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5713         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5714         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5715                 goto err;
5716         pool = cnt_free->pool;
5717         if (fallback)
5718                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5719         /* Create a DV counter action only in the first time usage. */
5720         if (!cnt_free->action) {
5721                 uint16_t offset;
5722                 struct mlx5_devx_obj *dcs;
5723                 int ret;
5724
5725                 if (!fallback) {
5726                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5727                         dcs = pool->min_dcs;
5728                 } else {
5729                         offset = 0;
5730                         dcs = cnt_free->dcs_when_free;
5731                 }
5732                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5733                                                             &cnt_free->action);
5734                 if (ret) {
5735                         rte_errno = errno;
5736                         goto err;
5737                 }
5738         }
5739         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5740                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5741         /* Update the counter reset values. */
5742         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5743                                  &cnt_free->bytes))
5744                 goto err;
5745         if (!fallback && !priv->sh->cmng.query_thread_on)
5746                 /* Start the asynchronous batch query by the host thread. */
5747                 mlx5_set_query_alarm(priv->sh);
5748         return cnt_idx;
5749 err:
5750         if (cnt_free) {
5751                 cnt_free->pool = pool;
5752                 if (fallback)
5753                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5754                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5755                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5756                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5757         }
5758         return 0;
5759 }
5760
5761 /**
5762  * Allocate a shared flow counter.
5763  *
5764  * @param[in] ctx
5765  *   Pointer to the shared counter configuration.
5766  * @param[in] data
5767  *   Pointer to save the allocated counter index.
5768  *
5769  * @return
5770  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5771  */
5772
5773 static int32_t
5774 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5775 {
5776         struct mlx5_shared_counter_conf *conf = ctx;
5777         struct rte_eth_dev *dev = conf->dev;
5778         struct mlx5_flow_counter *cnt;
5779
5780         data->dword = flow_dv_counter_alloc(dev, 0);
5781         data->dword |= MLX5_CNT_SHARED_OFFSET;
5782         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5783         cnt->shared_info.id = conf->id;
5784         return 0;
5785 }
5786
5787 /**
5788  * Get a shared flow counter.
5789  *
5790  * @param[in] dev
5791  *   Pointer to the Ethernet device structure.
5792  * @param[in] id
5793  *   Counter identifier.
5794  *
5795  * @return
5796  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5797  */
5798 static uint32_t
5799 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5800 {
5801         struct mlx5_priv *priv = dev->data->dev_private;
5802         struct mlx5_shared_counter_conf conf = {
5803                 .dev = dev,
5804                 .id = id,
5805         };
5806         union mlx5_l3t_data data = {
5807                 .dword = 0,
5808         };
5809
5810         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5811                                flow_dv_counter_alloc_shared_cb, &conf);
5812         return data.dword;
5813 }
5814
5815 /**
5816  * Get age param from counter index.
5817  *
5818  * @param[in] dev
5819  *   Pointer to the Ethernet device structure.
5820  * @param[in] counter
5821  *   Index to the counter handler.
5822  *
5823  * @return
5824  *   The aging parameter specified for the counter index.
5825  */
5826 static struct mlx5_age_param*
5827 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5828                                 uint32_t counter)
5829 {
5830         struct mlx5_flow_counter *cnt;
5831         struct mlx5_flow_counter_pool *pool = NULL;
5832
5833         flow_dv_counter_get_by_idx(dev, counter, &pool);
5834         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5835         cnt = MLX5_POOL_GET_CNT(pool, counter);
5836         return MLX5_CNT_TO_AGE(cnt);
5837 }
5838
5839 /**
5840  * Remove a flow counter from aged counter list.
5841  *
5842  * @param[in] dev
5843  *   Pointer to the Ethernet device structure.
5844  * @param[in] counter
5845  *   Index to the counter handler.
5846  * @param[in] cnt
5847  *   Pointer to the counter handler.
5848  */
5849 static void
5850 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5851                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5852 {
5853         struct mlx5_age_info *age_info;
5854         struct mlx5_age_param *age_param;
5855         struct mlx5_priv *priv = dev->data->dev_private;
5856         uint16_t expected = AGE_CANDIDATE;
5857
5858         age_info = GET_PORT_AGE_INFO(priv);
5859         age_param = flow_dv_counter_idx_get_age(dev, counter);
5860         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5861                                          AGE_FREE, false, __ATOMIC_RELAXED,
5862                                          __ATOMIC_RELAXED)) {
5863                 /**
5864                  * We need the lock even it is age timeout,
5865                  * since counter may still in process.
5866                  */
5867                 rte_spinlock_lock(&age_info->aged_sl);
5868                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5869                 rte_spinlock_unlock(&age_info->aged_sl);
5870                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5871         }
5872 }
5873
5874 /**
5875  * Release a flow counter.
5876  *
5877  * @param[in] dev
5878  *   Pointer to the Ethernet device structure.
5879  * @param[in] counter
5880  *   Index to the counter handler.
5881  */
5882 static void
5883 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5884 {
5885         struct mlx5_priv *priv = dev->data->dev_private;
5886         struct mlx5_flow_counter_pool *pool = NULL;
5887         struct mlx5_flow_counter *cnt;
5888         enum mlx5_counter_type cnt_type;
5889
5890         if (!counter)
5891                 return;
5892         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5893         MLX5_ASSERT(pool);
5894         if (IS_SHARED_CNT(counter) &&
5895             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5896                 return;
5897         if (pool->is_aged)
5898                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5899         cnt->pool = pool;
5900         /*
5901          * Put the counter back to list to be updated in none fallback mode.
5902          * Currently, we are using two list alternately, while one is in query,
5903          * add the freed counter to the other list based on the pool query_gen
5904          * value. After query finishes, add counter the list to the global
5905          * container counter list. The list changes while query starts. In
5906          * this case, lock will not be needed as query callback and release
5907          * function both operate with the different list.
5908          *
5909          */
5910         if (!priv->sh->cmng.counter_fallback) {
5911                 rte_spinlock_lock(&pool->csl);
5912                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5913                 rte_spinlock_unlock(&pool->csl);
5914         } else {
5915                 cnt->dcs_when_free = cnt->dcs_when_active;
5916                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5917                                            MLX5_COUNTER_TYPE_ORIGIN;
5918                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5919                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5920                                   cnt, next);
5921                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5922         }
5923 }
5924
5925 /**
5926  * Verify the @p attributes will be correctly understood by the NIC and store
5927  * them in the @p flow if everything is correct.
5928  *
5929  * @param[in] dev
5930  *   Pointer to dev struct.
5931  * @param[in] attributes
5932  *   Pointer to flow attributes
5933  * @param[in] external
5934  *   This flow rule is created by request external to PMD.
5935  * @param[out] error
5936  *   Pointer to error structure.
5937  *
5938  * @return
5939  *   - 0 on success and non root table.
5940  *   - 1 on success and root table.
5941  *   - a negative errno value otherwise and rte_errno is set.
5942  */
5943 static int
5944 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5945                             const struct mlx5_flow_tunnel *tunnel,
5946                             const struct rte_flow_attr *attributes,
5947                             const struct flow_grp_info *grp_info,
5948                             struct rte_flow_error *error)
5949 {
5950         struct mlx5_priv *priv = dev->data->dev_private;
5951         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
5952         int ret = 0;
5953
5954 #ifndef HAVE_MLX5DV_DR
5955         RTE_SET_USED(tunnel);
5956         RTE_SET_USED(grp_info);
5957         if (attributes->group)
5958                 return rte_flow_error_set(error, ENOTSUP,
5959                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5960                                           NULL,
5961                                           "groups are not supported");
5962 #else
5963         uint32_t table = 0;
5964
5965         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5966                                        grp_info, error);
5967         if (ret)
5968                 return ret;
5969         if (!table)
5970                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5971 #endif
5972         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
5973             attributes->priority > lowest_priority)
5974                 return rte_flow_error_set(error, ENOTSUP,
5975                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5976                                           NULL,
5977                                           "priority out of range");
5978         if (attributes->transfer) {
5979                 if (!priv->config.dv_esw_en)
5980                         return rte_flow_error_set
5981                                 (error, ENOTSUP,
5982                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5983                                  "E-Switch dr is not supported");
5984                 if (!(priv->representor || priv->master))
5985                         return rte_flow_error_set
5986                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5987                                  NULL, "E-Switch configuration can only be"
5988                                  " done by a master or a representor device");
5989                 if (attributes->egress)
5990                         return rte_flow_error_set
5991                                 (error, ENOTSUP,
5992                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5993                                  "egress is not supported");
5994         }
5995         if (!(attributes->egress ^ attributes->ingress))
5996                 return rte_flow_error_set(error, ENOTSUP,
5997                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5998                                           "must specify exactly one of "
5999                                           "ingress or egress");
6000         return ret;
6001 }
6002
6003 /**
6004  * Internal validation function. For validating both actions and items.
6005  *
6006  * @param[in] dev
6007  *   Pointer to the rte_eth_dev structure.
6008  * @param[in] attr
6009  *   Pointer to the flow attributes.
6010  * @param[in] items
6011  *   Pointer to the list of items.
6012  * @param[in] actions
6013  *   Pointer to the list of actions.
6014  * @param[in] external
6015  *   This flow rule is created by request external to PMD.
6016  * @param[in] hairpin
6017  *   Number of hairpin TX actions, 0 means classic flow.
6018  * @param[out] error
6019  *   Pointer to the error structure.
6020  *
6021  * @return
6022  *   0 on success, a negative errno value otherwise and rte_errno is set.
6023  */
6024 static int
6025 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6026                  const struct rte_flow_item items[],
6027                  const struct rte_flow_action actions[],
6028                  bool external, int hairpin, struct rte_flow_error *error)
6029 {
6030         int ret;
6031         uint64_t action_flags = 0;
6032         uint64_t item_flags = 0;
6033         uint64_t last_item = 0;
6034         uint8_t next_protocol = 0xff;
6035         uint16_t ether_type = 0;
6036         int actions_n = 0;
6037         uint8_t item_ipv6_proto = 0;
6038         int fdb_mirror_limit = 0;
6039         int modify_after_mirror = 0;
6040         const struct rte_flow_item *geneve_item = NULL;
6041         const struct rte_flow_item *gre_item = NULL;
6042         const struct rte_flow_item *gtp_item = NULL;
6043         const struct rte_flow_action_raw_decap *decap;
6044         const struct rte_flow_action_raw_encap *encap;
6045         const struct rte_flow_action_rss *rss = NULL;
6046         const struct rte_flow_action_rss *sample_rss = NULL;
6047         const struct rte_flow_action_count *count = NULL;
6048         const struct rte_flow_action_count *sample_count = NULL;
6049         const struct rte_flow_item_tcp nic_tcp_mask = {
6050                 .hdr = {
6051                         .tcp_flags = 0xFF,
6052                         .src_port = RTE_BE16(UINT16_MAX),
6053                         .dst_port = RTE_BE16(UINT16_MAX),
6054                 }
6055         };
6056         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6057                 .hdr = {
6058                         .src_addr =
6059                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6060                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6061                         .dst_addr =
6062                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6063                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6064                         .vtc_flow = RTE_BE32(0xffffffff),
6065                         .proto = 0xff,
6066                         .hop_limits = 0xff,
6067                 },
6068                 .has_frag_ext = 1,
6069         };
6070         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6071                 .hdr = {
6072                         .common = {
6073                                 .u32 =
6074                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6075                                         .type = 0xFF,
6076                                         }).u32),
6077                         },
6078                         .dummy[0] = 0xffffffff,
6079                 },
6080         };
6081         struct mlx5_priv *priv = dev->data->dev_private;
6082         struct mlx5_dev_config *dev_conf = &priv->config;
6083         uint16_t queue_index = 0xFFFF;
6084         const struct rte_flow_item_vlan *vlan_m = NULL;
6085         uint32_t rw_act_num = 0;
6086         uint64_t is_root;
6087         const struct mlx5_flow_tunnel *tunnel;
6088         struct flow_grp_info grp_info = {
6089                 .external = !!external,
6090                 .transfer = !!attr->transfer,
6091                 .fdb_def_rule = !!priv->fdb_def_rule,
6092         };
6093         const struct rte_eth_hairpin_conf *conf;
6094
6095         if (items == NULL)
6096                 return -1;
6097         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
6098                 tunnel = flow_items_to_tunnel(items);
6099                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6100                                 MLX5_FLOW_ACTION_DECAP;
6101         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
6102                 tunnel = flow_actions_to_tunnel(actions);
6103                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6104         } else {
6105                 tunnel = NULL;
6106         }
6107         if (tunnel && priv->representor)
6108                 return rte_flow_error_set(error, ENOTSUP,
6109                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6110                                           "decap not supported "
6111                                           "for VF representor");
6112         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6113                                 (dev, tunnel, attr, items, actions);
6114         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6115         if (ret < 0)
6116                 return ret;
6117         is_root = (uint64_t)ret;
6118         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6119                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6120                 int type = items->type;
6121
6122                 if (!mlx5_flow_os_item_supported(type))
6123                         return rte_flow_error_set(error, ENOTSUP,
6124                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6125                                                   NULL, "item not supported");
6126                 switch (type) {
6127                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6128                         if (items[0].type != (typeof(items[0].type))
6129                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
6130                                 return rte_flow_error_set
6131                                                 (error, EINVAL,
6132                                                 RTE_FLOW_ERROR_TYPE_ITEM,
6133                                                 NULL, "MLX5 private items "
6134                                                 "must be the first");
6135                         break;
6136                 case RTE_FLOW_ITEM_TYPE_VOID:
6137                         break;
6138                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6139                         ret = flow_dv_validate_item_port_id
6140                                         (dev, items, attr, item_flags, error);
6141                         if (ret < 0)
6142                                 return ret;
6143                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6144                         break;
6145                 case RTE_FLOW_ITEM_TYPE_ETH:
6146                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6147                                                           true, error);
6148                         if (ret < 0)
6149                                 return ret;
6150                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6151                                              MLX5_FLOW_LAYER_OUTER_L2;
6152                         if (items->mask != NULL && items->spec != NULL) {
6153                                 ether_type =
6154                                         ((const struct rte_flow_item_eth *)
6155                                          items->spec)->type;
6156                                 ether_type &=
6157                                         ((const struct rte_flow_item_eth *)
6158                                          items->mask)->type;
6159                                 ether_type = rte_be_to_cpu_16(ether_type);
6160                         } else {
6161                                 ether_type = 0;
6162                         }
6163                         break;
6164                 case RTE_FLOW_ITEM_TYPE_VLAN:
6165                         ret = flow_dv_validate_item_vlan(items, item_flags,
6166                                                          dev, error);
6167                         if (ret < 0)
6168                                 return ret;
6169                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6170                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6171                         if (items->mask != NULL && items->spec != NULL) {
6172                                 ether_type =
6173                                         ((const struct rte_flow_item_vlan *)
6174                                          items->spec)->inner_type;
6175                                 ether_type &=
6176                                         ((const struct rte_flow_item_vlan *)
6177                                          items->mask)->inner_type;
6178                                 ether_type = rte_be_to_cpu_16(ether_type);
6179                         } else {
6180                                 ether_type = 0;
6181                         }
6182                         /* Store outer VLAN mask for of_push_vlan action. */
6183                         if (!tunnel)
6184                                 vlan_m = items->mask;
6185                         break;
6186                 case RTE_FLOW_ITEM_TYPE_IPV4:
6187                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6188                                                   &item_flags, &tunnel);
6189                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6190                                                          last_item, ether_type,
6191                                                          error);
6192                         if (ret < 0)
6193                                 return ret;
6194                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6195                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6196                         if (items->mask != NULL &&
6197                             ((const struct rte_flow_item_ipv4 *)
6198                              items->mask)->hdr.next_proto_id) {
6199                                 next_protocol =
6200                                         ((const struct rte_flow_item_ipv4 *)
6201                                          (items->spec))->hdr.next_proto_id;
6202                                 next_protocol &=
6203                                         ((const struct rte_flow_item_ipv4 *)
6204                                          (items->mask))->hdr.next_proto_id;
6205                         } else {
6206                                 /* Reset for inner layer. */
6207                                 next_protocol = 0xff;
6208                         }
6209                         break;
6210                 case RTE_FLOW_ITEM_TYPE_IPV6:
6211                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6212                                                   &item_flags, &tunnel);
6213                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6214                                                            last_item,
6215                                                            ether_type,
6216                                                            &nic_ipv6_mask,
6217                                                            error);
6218                         if (ret < 0)
6219                                 return ret;
6220                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6221                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6222                         if (items->mask != NULL &&
6223                             ((const struct rte_flow_item_ipv6 *)
6224                              items->mask)->hdr.proto) {
6225                                 item_ipv6_proto =
6226                                         ((const struct rte_flow_item_ipv6 *)
6227                                          items->spec)->hdr.proto;
6228                                 next_protocol =
6229                                         ((const struct rte_flow_item_ipv6 *)
6230                                          items->spec)->hdr.proto;
6231                                 next_protocol &=
6232                                         ((const struct rte_flow_item_ipv6 *)
6233                                          items->mask)->hdr.proto;
6234                         } else {
6235                                 /* Reset for inner layer. */
6236                                 next_protocol = 0xff;
6237                         }
6238                         break;
6239                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6240                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6241                                                                   item_flags,
6242                                                                   error);
6243                         if (ret < 0)
6244                                 return ret;
6245                         last_item = tunnel ?
6246                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6247                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6248                         if (items->mask != NULL &&
6249                             ((const struct rte_flow_item_ipv6_frag_ext *)
6250                              items->mask)->hdr.next_header) {
6251                                 next_protocol =
6252                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6253                                  items->spec)->hdr.next_header;
6254                                 next_protocol &=
6255                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6256                                  items->mask)->hdr.next_header;
6257                         } else {
6258                                 /* Reset for inner layer. */
6259                                 next_protocol = 0xff;
6260                         }
6261                         break;
6262                 case RTE_FLOW_ITEM_TYPE_TCP:
6263                         ret = mlx5_flow_validate_item_tcp
6264                                                 (items, item_flags,
6265                                                  next_protocol,
6266                                                  &nic_tcp_mask,
6267                                                  error);
6268                         if (ret < 0)
6269                                 return ret;
6270                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6271                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6272                         break;
6273                 case RTE_FLOW_ITEM_TYPE_UDP:
6274                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6275                                                           next_protocol,
6276                                                           error);
6277                         if (ret < 0)
6278                                 return ret;
6279                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6280                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6281                         break;
6282                 case RTE_FLOW_ITEM_TYPE_GRE:
6283                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6284                                                           next_protocol, error);
6285                         if (ret < 0)
6286                                 return ret;
6287                         gre_item = items;
6288                         last_item = MLX5_FLOW_LAYER_GRE;
6289                         break;
6290                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6291                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6292                                                             next_protocol,
6293                                                             error);
6294                         if (ret < 0)
6295                                 return ret;
6296                         last_item = MLX5_FLOW_LAYER_NVGRE;
6297                         break;
6298                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6299                         ret = mlx5_flow_validate_item_gre_key
6300                                 (items, item_flags, gre_item, error);
6301                         if (ret < 0)
6302                                 return ret;
6303                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6304                         break;
6305                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6306                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6307                                                             error);
6308                         if (ret < 0)
6309                                 return ret;
6310                         last_item = MLX5_FLOW_LAYER_VXLAN;
6311                         break;
6312                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6313                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6314                                                                 item_flags, dev,
6315                                                                 error);
6316                         if (ret < 0)
6317                                 return ret;
6318                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6319                         break;
6320                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6321                         ret = mlx5_flow_validate_item_geneve(items,
6322                                                              item_flags, dev,
6323                                                              error);
6324                         if (ret < 0)
6325                                 return ret;
6326                         geneve_item = items;
6327                         last_item = MLX5_FLOW_LAYER_GENEVE;
6328                         break;
6329                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6330                         ret = mlx5_flow_validate_item_geneve_opt(items,
6331                                                                  last_item,
6332                                                                  geneve_item,
6333                                                                  dev,
6334                                                                  error);
6335                         if (ret < 0)
6336                                 return ret;
6337                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6338                         break;
6339                 case RTE_FLOW_ITEM_TYPE_MPLS:
6340                         ret = mlx5_flow_validate_item_mpls(dev, items,
6341                                                            item_flags,
6342                                                            last_item, error);
6343                         if (ret < 0)
6344                                 return ret;
6345                         last_item = MLX5_FLOW_LAYER_MPLS;
6346                         break;
6347
6348                 case RTE_FLOW_ITEM_TYPE_MARK:
6349                         ret = flow_dv_validate_item_mark(dev, items, attr,
6350                                                          error);
6351                         if (ret < 0)
6352                                 return ret;
6353                         last_item = MLX5_FLOW_ITEM_MARK;
6354                         break;
6355                 case RTE_FLOW_ITEM_TYPE_META:
6356                         ret = flow_dv_validate_item_meta(dev, items, attr,
6357                                                          error);
6358                         if (ret < 0)
6359                                 return ret;
6360                         last_item = MLX5_FLOW_ITEM_METADATA;
6361                         break;
6362                 case RTE_FLOW_ITEM_TYPE_ICMP:
6363                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6364                                                            next_protocol,
6365                                                            error);
6366                         if (ret < 0)
6367                                 return ret;
6368                         last_item = MLX5_FLOW_LAYER_ICMP;
6369                         break;
6370                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6371                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6372                                                             next_protocol,
6373                                                             error);
6374                         if (ret < 0)
6375                                 return ret;
6376                         item_ipv6_proto = IPPROTO_ICMPV6;
6377                         last_item = MLX5_FLOW_LAYER_ICMP6;
6378                         break;
6379                 case RTE_FLOW_ITEM_TYPE_TAG:
6380                         ret = flow_dv_validate_item_tag(dev, items,
6381                                                         attr, error);
6382                         if (ret < 0)
6383                                 return ret;
6384                         last_item = MLX5_FLOW_ITEM_TAG;
6385                         break;
6386                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6387                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6388                         break;
6389                 case RTE_FLOW_ITEM_TYPE_GTP:
6390                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6391                                                         error);
6392                         if (ret < 0)
6393                                 return ret;
6394                         gtp_item = items;
6395                         last_item = MLX5_FLOW_LAYER_GTP;
6396                         break;
6397                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6398                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6399                                                             gtp_item, attr,
6400                                                             error);
6401                         if (ret < 0)
6402                                 return ret;
6403                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6404                         break;
6405                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6406                         /* Capacity will be checked in the translate stage. */
6407                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6408                                                             last_item,
6409                                                             ether_type,
6410                                                             &nic_ecpri_mask,
6411                                                             error);
6412                         if (ret < 0)
6413                                 return ret;
6414                         last_item = MLX5_FLOW_LAYER_ECPRI;
6415                         break;
6416                 default:
6417                         return rte_flow_error_set(error, ENOTSUP,
6418                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6419                                                   NULL, "item not supported");
6420                 }
6421                 item_flags |= last_item;
6422         }
6423         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6424                 int type = actions->type;
6425
6426                 if (!mlx5_flow_os_action_supported(type))
6427                         return rte_flow_error_set(error, ENOTSUP,
6428                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6429                                                   actions,
6430                                                   "action not supported");
6431                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
6432                         return rte_flow_error_set(error, ENOTSUP,
6433                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6434                                                   actions, "too many actions");
6435                 switch (type) {
6436                 case RTE_FLOW_ACTION_TYPE_VOID:
6437                         break;
6438                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
6439                         ret = flow_dv_validate_action_port_id(dev,
6440                                                               action_flags,
6441                                                               actions,
6442                                                               attr,
6443                                                               error);
6444                         if (ret)
6445                                 return ret;
6446                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6447                         ++actions_n;
6448                         break;
6449                 case RTE_FLOW_ACTION_TYPE_FLAG:
6450                         ret = flow_dv_validate_action_flag(dev, action_flags,
6451                                                            attr, error);
6452                         if (ret < 0)
6453                                 return ret;
6454                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6455                                 /* Count all modify-header actions as one. */
6456                                 if (!(action_flags &
6457                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6458                                         ++actions_n;
6459                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
6460                                                 MLX5_FLOW_ACTION_MARK_EXT;
6461                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6462                                         modify_after_mirror = 1;
6463
6464                         } else {
6465                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
6466                                 ++actions_n;
6467                         }
6468                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6469                         break;
6470                 case RTE_FLOW_ACTION_TYPE_MARK:
6471                         ret = flow_dv_validate_action_mark(dev, actions,
6472                                                            action_flags,
6473                                                            attr, error);
6474                         if (ret < 0)
6475                                 return ret;
6476                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6477                                 /* Count all modify-header actions as one. */
6478                                 if (!(action_flags &
6479                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6480                                         ++actions_n;
6481                                 action_flags |= MLX5_FLOW_ACTION_MARK |
6482                                                 MLX5_FLOW_ACTION_MARK_EXT;
6483                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6484                                         modify_after_mirror = 1;
6485                         } else {
6486                                 action_flags |= MLX5_FLOW_ACTION_MARK;
6487                                 ++actions_n;
6488                         }
6489                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6490                         break;
6491                 case RTE_FLOW_ACTION_TYPE_SET_META:
6492                         ret = flow_dv_validate_action_set_meta(dev, actions,
6493                                                                action_flags,
6494                                                                attr, error);
6495                         if (ret < 0)
6496                                 return ret;
6497                         /* Count all modify-header actions as one action. */
6498                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6499                                 ++actions_n;
6500                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6501                                 modify_after_mirror = 1;
6502                         action_flags |= MLX5_FLOW_ACTION_SET_META;
6503                         rw_act_num += MLX5_ACT_NUM_SET_META;
6504                         break;
6505                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
6506                         ret = flow_dv_validate_action_set_tag(dev, actions,
6507                                                               action_flags,
6508                                                               attr, error);
6509                         if (ret < 0)
6510                                 return ret;
6511                         /* Count all modify-header actions as one action. */
6512                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6513                                 ++actions_n;
6514                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6515                                 modify_after_mirror = 1;
6516                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6517                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6518                         break;
6519                 case RTE_FLOW_ACTION_TYPE_DROP:
6520                         ret = mlx5_flow_validate_action_drop(action_flags,
6521                                                              attr, error);
6522                         if (ret < 0)
6523                                 return ret;
6524                         action_flags |= MLX5_FLOW_ACTION_DROP;
6525                         ++actions_n;
6526                         break;
6527                 case RTE_FLOW_ACTION_TYPE_QUEUE:
6528                         ret = mlx5_flow_validate_action_queue(actions,
6529                                                               action_flags, dev,
6530                                                               attr, error);
6531                         if (ret < 0)
6532                                 return ret;
6533                         queue_index = ((const struct rte_flow_action_queue *)
6534                                                         (actions->conf))->index;
6535                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
6536                         ++actions_n;
6537                         break;
6538                 case RTE_FLOW_ACTION_TYPE_RSS:
6539                         rss = actions->conf;
6540                         ret = mlx5_flow_validate_action_rss(actions,
6541                                                             action_flags, dev,
6542                                                             attr, item_flags,
6543                                                             error);
6544                         if (ret < 0)
6545                                 return ret;
6546                         if (rss && sample_rss &&
6547                             (sample_rss->level != rss->level ||
6548                             sample_rss->types != rss->types))
6549                                 return rte_flow_error_set(error, ENOTSUP,
6550                                         RTE_FLOW_ERROR_TYPE_ACTION,
6551                                         NULL,
6552                                         "Can't use the different RSS types "
6553                                         "or level in the same flow");
6554                         if (rss != NULL && rss->queue_num)
6555                                 queue_index = rss->queue[0];
6556                         action_flags |= MLX5_FLOW_ACTION_RSS;
6557                         ++actions_n;
6558                         break;
6559                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6560                         ret =
6561                         mlx5_flow_validate_action_default_miss(action_flags,
6562                                         attr, error);
6563                         if (ret < 0)
6564                                 return ret;
6565                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6566                         ++actions_n;
6567                         break;
6568                 case RTE_FLOW_ACTION_TYPE_COUNT:
6569                         ret = flow_dv_validate_action_count(dev, actions,
6570                                                             action_flags,
6571                                                             error);
6572                         if (ret < 0)
6573                                 return ret;
6574                         count = actions->conf;
6575                         action_flags |= MLX5_FLOW_ACTION_COUNT;
6576                         ++actions_n;
6577                         break;
6578                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6579                         if (flow_dv_validate_action_pop_vlan(dev,
6580                                                              action_flags,
6581                                                              actions,
6582                                                              item_flags, attr,
6583                                                              error))
6584                                 return -rte_errno;
6585                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6586                         ++actions_n;
6587                         break;
6588                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6589                         ret = flow_dv_validate_action_push_vlan(dev,
6590                                                                 action_flags,
6591                                                                 vlan_m,
6592                                                                 actions, attr,
6593                                                                 error);
6594                         if (ret < 0)
6595                                 return ret;
6596                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6597                         ++actions_n;
6598                         break;
6599                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6600                         ret = flow_dv_validate_action_set_vlan_pcp
6601                                                 (action_flags, actions, error);
6602                         if (ret < 0)
6603                                 return ret;
6604                         /* Count PCP with push_vlan command. */
6605                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
6606                         break;
6607                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6608                         ret = flow_dv_validate_action_set_vlan_vid
6609                                                 (item_flags, action_flags,
6610                                                  actions, error);
6611                         if (ret < 0)
6612                                 return ret;
6613                         /* Count VID with push_vlan command. */
6614                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6615                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
6616                         break;
6617                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6618                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6619                         ret = flow_dv_validate_action_l2_encap(dev,
6620                                                                action_flags,
6621                                                                actions, attr,
6622                                                                error);
6623                         if (ret < 0)
6624                                 return ret;
6625                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
6626                         ++actions_n;
6627                         break;
6628                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6629                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6630                         ret = flow_dv_validate_action_decap(dev, action_flags,
6631                                                             actions, item_flags,
6632                                                             attr, error);
6633                         if (ret < 0)
6634                                 return ret;
6635                         action_flags |= MLX5_FLOW_ACTION_DECAP;
6636                         ++actions_n;
6637                         break;
6638                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6639                         ret = flow_dv_validate_action_raw_encap_decap
6640                                 (dev, NULL, actions->conf, attr, &action_flags,
6641                                  &actions_n, actions, item_flags, error);
6642                         if (ret < 0)
6643                                 return ret;
6644                         break;
6645                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6646                         decap = actions->conf;
6647                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
6648                                 ;
6649                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6650                                 encap = NULL;
6651                                 actions--;
6652                         } else {
6653                                 encap = actions->conf;
6654                         }
6655                         ret = flow_dv_validate_action_raw_encap_decap
6656                                            (dev,
6657                                             decap ? decap : &empty_decap, encap,
6658                                             attr, &action_flags, &actions_n,
6659                                             actions, item_flags, error);
6660                         if (ret < 0)
6661                                 return ret;
6662                         break;
6663                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6664                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6665                         ret = flow_dv_validate_action_modify_mac(action_flags,
6666                                                                  actions,
6667                                                                  item_flags,
6668                                                                  error);
6669                         if (ret < 0)
6670                                 return ret;
6671                         /* Count all modify-header actions as one action. */
6672                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6673                                 ++actions_n;
6674                         action_flags |= actions->type ==
6675                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
6676                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
6677                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
6678                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6679                                 modify_after_mirror = 1;
6680                         /*
6681                          * Even if the source and destination MAC addresses have
6682                          * overlap in the header with 4B alignment, the convert
6683                          * function will handle them separately and 4 SW actions
6684                          * will be created. And 2 actions will be added each
6685                          * time no matter how many bytes of address will be set.
6686                          */
6687                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
6688                         break;
6689                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6690                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6691                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
6692                                                                   actions,
6693                                                                   item_flags,
6694                                                                   error);
6695                         if (ret < 0)
6696                                 return ret;
6697                         /* Count all modify-header actions as one action. */
6698                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6699                                 ++actions_n;
6700                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6701                                 modify_after_mirror = 1;
6702                         action_flags |= actions->type ==
6703                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
6704                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
6705                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
6706                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
6707                         break;
6708                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
6709                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
6710                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
6711                                                                   actions,
6712                                                                   item_flags,
6713                                                                   error);
6714                         if (ret < 0)
6715                                 return ret;
6716                         if (item_ipv6_proto == IPPROTO_ICMPV6)
6717                                 return rte_flow_error_set(error, ENOTSUP,
6718                                         RTE_FLOW_ERROR_TYPE_ACTION,
6719                                         actions,
6720                                         "Can't change header "
6721                                         "with ICMPv6 proto");
6722                         /* Count all modify-header actions as one action. */
6723                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6724                                 ++actions_n;
6725                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6726                                 modify_after_mirror = 1;
6727                         action_flags |= actions->type ==
6728                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6729                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
6730                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
6731                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6732                         break;
6733                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6734                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6735                         ret = flow_dv_validate_action_modify_tp(action_flags,
6736                                                                 actions,
6737                                                                 item_flags,
6738                                                                 error);
6739                         if (ret < 0)
6740                                 return ret;
6741                         /* Count all modify-header actions as one action. */
6742                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6743                                 ++actions_n;
6744                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6745                                 modify_after_mirror = 1;
6746                         action_flags |= actions->type ==
6747                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6748                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
6749                                                 MLX5_FLOW_ACTION_SET_TP_DST;
6750                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
6751                         break;
6752                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
6753                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
6754                         ret = flow_dv_validate_action_modify_ttl(action_flags,
6755                                                                  actions,
6756                                                                  item_flags,
6757                                                                  error);
6758                         if (ret < 0)
6759                                 return ret;
6760                         /* Count all modify-header actions as one action. */
6761                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6762                                 ++actions_n;
6763                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6764                                 modify_after_mirror = 1;
6765                         action_flags |= actions->type ==
6766                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
6767                                                 MLX5_FLOW_ACTION_SET_TTL :
6768                                                 MLX5_FLOW_ACTION_DEC_TTL;
6769                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
6770                         break;
6771                 case RTE_FLOW_ACTION_TYPE_JUMP:
6772                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
6773                                                            action_flags,
6774                                                            attr, external,
6775                                                            error);
6776                         if (ret)
6777                                 return ret;
6778                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
6779                             fdb_mirror_limit)
6780                                 return rte_flow_error_set(error, EINVAL,
6781                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6782                                                   NULL,
6783                                                   "sample and jump action combination is not supported");
6784                         ++actions_n;
6785                         action_flags |= MLX5_FLOW_ACTION_JUMP;
6786                         break;
6787                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
6788                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
6789                         ret = flow_dv_validate_action_modify_tcp_seq
6790                                                                 (action_flags,
6791                                                                  actions,
6792                                                                  item_flags,
6793                                                                  error);
6794                         if (ret < 0)
6795                                 return ret;
6796                         /* Count all modify-header actions as one action. */
6797                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6798                                 ++actions_n;
6799                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6800                                 modify_after_mirror = 1;
6801                         action_flags |= actions->type ==
6802                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
6803                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
6804                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
6805                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
6806                         break;
6807                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6808                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6809                         ret = flow_dv_validate_action_modify_tcp_ack
6810                                                                 (action_flags,
6811                                                                  actions,
6812                                                                  item_flags,
6813                                                                  error);
6814                         if (ret < 0)
6815                                 return ret;
6816                         /* Count all modify-header actions as one action. */
6817                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6818                                 ++actions_n;
6819                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6820                                 modify_after_mirror = 1;
6821                         action_flags |= actions->type ==
6822                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6823                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
6824                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
6825                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
6826                         break;
6827                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
6828                         break;
6829                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6830                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6831                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6832                         break;
6833                 case RTE_FLOW_ACTION_TYPE_METER:
6834                         ret = mlx5_flow_validate_action_meter(dev,
6835                                                               action_flags,
6836                                                               actions, attr,
6837                                                               error);
6838                         if (ret < 0)
6839                                 return ret;
6840                         action_flags |= MLX5_FLOW_ACTION_METER;
6841                         ++actions_n;
6842                         /* Meter action will add one more TAG action. */
6843                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6844                         break;
6845                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
6846                         if (!attr->transfer && !attr->group)
6847                                 return rte_flow_error_set(error, ENOTSUP,
6848                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6849                                                                            NULL,
6850                           "Shared ASO age action is not supported for group 0");
6851                         action_flags |= MLX5_FLOW_ACTION_AGE;
6852                         ++actions_n;
6853                         break;
6854                 case RTE_FLOW_ACTION_TYPE_AGE:
6855                         ret = flow_dv_validate_action_age(action_flags,
6856                                                           actions, dev,
6857                                                           error);
6858                         if (ret < 0)
6859                                 return ret;
6860                         /*
6861                          * Validate the regular AGE action (using counter)
6862                          * mutual exclusion with share counter actions.
6863                          */
6864                         if (!priv->sh->flow_hit_aso_en) {
6865                                 if (count && count->shared)
6866                                         return rte_flow_error_set
6867                                                 (error, EINVAL,
6868                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6869                                                 NULL,
6870                                                 "old age and shared count combination is not supported");
6871                                 if (sample_count)
6872                                         return rte_flow_error_set
6873                                                 (error, EINVAL,
6874                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6875                                                 NULL,
6876                                                 "old age action and count must be in the same sub flow");
6877                         }
6878                         action_flags |= MLX5_FLOW_ACTION_AGE;
6879                         ++actions_n;
6880                         break;
6881                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6882                         ret = flow_dv_validate_action_modify_ipv4_dscp
6883                                                          (action_flags,
6884                                                           actions,
6885                                                           item_flags,
6886                                                           error);
6887                         if (ret < 0)
6888                                 return ret;
6889                         /* Count all modify-header actions as one action. */
6890                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6891                                 ++actions_n;
6892                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6893                                 modify_after_mirror = 1;
6894                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
6895                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6896                         break;
6897                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6898                         ret = flow_dv_validate_action_modify_ipv6_dscp
6899                                                                 (action_flags,
6900                                                                  actions,
6901                                                                  item_flags,
6902                                                                  error);
6903                         if (ret < 0)
6904                                 return ret;
6905                         /* Count all modify-header actions as one action. */
6906                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6907                                 ++actions_n;
6908                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6909                                 modify_after_mirror = 1;
6910                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6911                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6912                         break;
6913                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6914                         ret = flow_dv_validate_action_sample(&action_flags,
6915                                                              actions, dev,
6916                                                              attr, item_flags,
6917                                                              rss, &sample_rss,
6918                                                              &sample_count,
6919                                                              &fdb_mirror_limit,
6920                                                              error);
6921                         if (ret < 0)
6922                                 return ret;
6923                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6924                         ++actions_n;
6925                         break;
6926                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6927                         if (actions[0].type != (typeof(actions[0].type))
6928                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6929                                 return rte_flow_error_set
6930                                                 (error, EINVAL,
6931                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6932                                                 NULL, "MLX5 private action "
6933                                                 "must be the first");
6934
6935                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6936                         break;
6937                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6938                         if (!attr->transfer && !attr->group)
6939                                 return rte_flow_error_set(error, ENOTSUP,
6940                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6941                                                 NULL, "modify field action "
6942                                                 "is not supported for group 0");
6943                         ret = flow_dv_validate_action_modify_field(action_flags,
6944                                                                  actions,
6945                                                                  error);
6946                         if (ret < 0)
6947                                 return ret;
6948                         /* Count all modify-header actions as one action. */
6949                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
6950                                 ++actions_n;
6951                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6952                         rw_act_num += ret;
6953                         break;
6954                 default:
6955                         return rte_flow_error_set(error, ENOTSUP,
6956                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6957                                                   actions,
6958                                                   "action not supported");
6959                 }
6960         }
6961         /*
6962          * Validate actions in flow rules
6963          * - Explicit decap action is prohibited by the tunnel offload API.
6964          * - Drop action in tunnel steer rule is prohibited by the API.
6965          * - Application cannot use MARK action because it's value can mask
6966          *   tunnel default miss nitification.
6967          * - JUMP in tunnel match rule has no support in current PMD
6968          *   implementation.
6969          * - TAG & META are reserved for future uses.
6970          */
6971         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6972                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6973                                             MLX5_FLOW_ACTION_MARK     |
6974                                             MLX5_FLOW_ACTION_SET_TAG  |
6975                                             MLX5_FLOW_ACTION_SET_META |
6976                                             MLX5_FLOW_ACTION_DROP;
6977
6978                 if (action_flags & bad_actions_mask)
6979                         return rte_flow_error_set
6980                                         (error, EINVAL,
6981                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6982                                         "Invalid RTE action in tunnel "
6983                                         "set decap rule");
6984                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6985                         return rte_flow_error_set
6986                                         (error, EINVAL,
6987                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6988                                         "tunnel set decap rule must terminate "
6989                                         "with JUMP");
6990                 if (!attr->ingress)
6991                         return rte_flow_error_set
6992                                         (error, EINVAL,
6993                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6994                                         "tunnel flows for ingress traffic only");
6995         }
6996         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6997                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6998                                             MLX5_FLOW_ACTION_MARK    |
6999                                             MLX5_FLOW_ACTION_SET_TAG |
7000                                             MLX5_FLOW_ACTION_SET_META;
7001
7002                 if (action_flags & bad_actions_mask)
7003                         return rte_flow_error_set
7004                                         (error, EINVAL,
7005                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7006                                         "Invalid RTE action in tunnel "
7007                                         "set match rule");
7008         }
7009         /*
7010          * Validate the drop action mutual exclusion with other actions.
7011          * Drop action is mutually-exclusive with any other action, except for
7012          * Count action.
7013          * Drop action compatibility with tunnel offload was already validated.
7014          */
7015         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7016                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7017         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7018             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7019                 return rte_flow_error_set(error, EINVAL,
7020                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7021                                           "Drop action is mutually-exclusive "
7022                                           "with any other action, except for "
7023                                           "Count action");
7024         /* Eswitch has few restrictions on using items and actions */
7025         if (attr->transfer) {
7026                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7027                     action_flags & MLX5_FLOW_ACTION_FLAG)
7028                         return rte_flow_error_set(error, ENOTSUP,
7029                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7030                                                   NULL,
7031                                                   "unsupported action FLAG");
7032                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7033                     action_flags & MLX5_FLOW_ACTION_MARK)
7034                         return rte_flow_error_set(error, ENOTSUP,
7035                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7036                                                   NULL,
7037                                                   "unsupported action MARK");
7038                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7039                         return rte_flow_error_set(error, ENOTSUP,
7040                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7041                                                   NULL,
7042                                                   "unsupported action QUEUE");
7043                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7044                         return rte_flow_error_set(error, ENOTSUP,
7045                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7046                                                   NULL,
7047                                                   "unsupported action RSS");
7048                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7049                         return rte_flow_error_set(error, EINVAL,
7050                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7051                                                   actions,
7052                                                   "no fate action is found");
7053         } else {
7054                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7055                         return rte_flow_error_set(error, EINVAL,
7056                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7057                                                   actions,
7058                                                   "no fate action is found");
7059         }
7060         /*
7061          * Continue validation for Xcap and VLAN actions.
7062          * If hairpin is working in explicit TX rule mode, there is no actions
7063          * splitting and the validation of hairpin ingress flow should be the
7064          * same as other standard flows.
7065          */
7066         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7067                              MLX5_FLOW_VLAN_ACTIONS)) &&
7068             (queue_index == 0xFFFF ||
7069              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7070              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7071              conf->tx_explicit != 0))) {
7072                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7073                     MLX5_FLOW_XCAP_ACTIONS)
7074                         return rte_flow_error_set(error, ENOTSUP,
7075                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7076                                                   NULL, "encap and decap "
7077                                                   "combination aren't supported");
7078                 if (!attr->transfer && attr->ingress) {
7079                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7080                                 return rte_flow_error_set
7081                                                 (error, ENOTSUP,
7082                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7083                                                  NULL, "encap is not supported"
7084                                                  " for ingress traffic");
7085                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7086                                 return rte_flow_error_set
7087                                                 (error, ENOTSUP,
7088                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7089                                                  NULL, "push VLAN action not "
7090                                                  "supported for ingress");
7091                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7092                                         MLX5_FLOW_VLAN_ACTIONS)
7093                                 return rte_flow_error_set
7094                                                 (error, ENOTSUP,
7095                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7096                                                  NULL, "no support for "
7097                                                  "multiple VLAN actions");
7098                 }
7099         }
7100         /*
7101          * Hairpin flow will add one more TAG action in TX implicit mode.
7102          * In TX explicit mode, there will be no hairpin flow ID.
7103          */
7104         if (hairpin > 0)
7105                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7106         /* extra metadata enabled: one more TAG action will be add. */
7107         if (dev_conf->dv_flow_en &&
7108             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7109             mlx5_flow_ext_mreg_supported(dev))
7110                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7111         if (rw_act_num >
7112                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7113                 return rte_flow_error_set(error, ENOTSUP,
7114                                           RTE_FLOW_ERROR_TYPE_ACTION,
7115                                           NULL, "too many header modify"
7116                                           " actions to support");
7117         }
7118         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7119         if (fdb_mirror_limit && modify_after_mirror)
7120                 return rte_flow_error_set(error, EINVAL,
7121                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7122                                 "sample before modify action is not supported");
7123         return 0;
7124 }
7125
7126 /**
7127  * Internal preparation function. Allocates the DV flow size,
7128  * this size is constant.
7129  *
7130  * @param[in] dev
7131  *   Pointer to the rte_eth_dev structure.
7132  * @param[in] attr
7133  *   Pointer to the flow attributes.
7134  * @param[in] items
7135  *   Pointer to the list of items.
7136  * @param[in] actions
7137  *   Pointer to the list of actions.
7138  * @param[out] error
7139  *   Pointer to the error structure.
7140  *
7141  * @return
7142  *   Pointer to mlx5_flow object on success,
7143  *   otherwise NULL and rte_errno is set.
7144  */
7145 static struct mlx5_flow *
7146 flow_dv_prepare(struct rte_eth_dev *dev,
7147                 const struct rte_flow_attr *attr __rte_unused,
7148                 const struct rte_flow_item items[] __rte_unused,
7149                 const struct rte_flow_action actions[] __rte_unused,
7150                 struct rte_flow_error *error)
7151 {
7152         uint32_t handle_idx = 0;
7153         struct mlx5_flow *dev_flow;
7154         struct mlx5_flow_handle *dev_handle;
7155         struct mlx5_priv *priv = dev->data->dev_private;
7156         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7157
7158         MLX5_ASSERT(wks);
7159         /* In case of corrupting the memory. */
7160         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7161                 rte_flow_error_set(error, ENOSPC,
7162                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7163                                    "not free temporary device flow");
7164                 return NULL;
7165         }
7166         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7167                                    &handle_idx);
7168         if (!dev_handle) {
7169                 rte_flow_error_set(error, ENOMEM,
7170                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7171                                    "not enough memory to create flow handle");
7172                 return NULL;
7173         }
7174         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7175         dev_flow = &wks->flows[wks->flow_idx++];
7176         memset(dev_flow, 0, sizeof(*dev_flow));
7177         dev_flow->handle = dev_handle;
7178         dev_flow->handle_idx = handle_idx;
7179         /*
7180          * In some old rdma-core releases, before continuing, a check of the
7181          * length of matching parameter will be done at first. It needs to use
7182          * the length without misc4 param. If the flow has misc4 support, then
7183          * the length needs to be adjusted accordingly. Each param member is
7184          * aligned with a 64B boundary naturally.
7185          */
7186         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7187                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7188         dev_flow->ingress = attr->ingress;
7189         dev_flow->dv.transfer = attr->transfer;
7190         return dev_flow;
7191 }
7192
7193 #ifdef RTE_LIBRTE_MLX5_DEBUG
7194 /**
7195  * Sanity check for match mask and value. Similar to check_valid_spec() in
7196  * kernel driver. If unmasked bit is present in value, it returns failure.
7197  *
7198  * @param match_mask
7199  *   pointer to match mask buffer.
7200  * @param match_value
7201  *   pointer to match value buffer.
7202  *
7203  * @return
7204  *   0 if valid, -EINVAL otherwise.
7205  */
7206 static int
7207 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7208 {
7209         uint8_t *m = match_mask;
7210         uint8_t *v = match_value;
7211         unsigned int i;
7212
7213         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7214                 if (v[i] & ~m[i]) {
7215                         DRV_LOG(ERR,
7216                                 "match_value differs from match_criteria"
7217                                 " %p[%u] != %p[%u]",
7218                                 match_value, i, match_mask, i);
7219                         return -EINVAL;
7220                 }
7221         }
7222         return 0;
7223 }
7224 #endif
7225
7226 /**
7227  * Add match of ip_version.
7228  *
7229  * @param[in] group
7230  *   Flow group.
7231  * @param[in] headers_v
7232  *   Values header pointer.
7233  * @param[in] headers_m
7234  *   Masks header pointer.
7235  * @param[in] ip_version
7236  *   The IP version to set.
7237  */
7238 static inline void
7239 flow_dv_set_match_ip_version(uint32_t group,
7240                              void *headers_v,
7241                              void *headers_m,
7242                              uint8_t ip_version)
7243 {
7244         if (group == 0)
7245                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7246         else
7247                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7248                          ip_version);
7249         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7250         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7251         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7252 }
7253
7254 /**
7255  * Add Ethernet item to matcher and to the value.
7256  *
7257  * @param[in, out] matcher
7258  *   Flow matcher.
7259  * @param[in, out] key
7260  *   Flow matcher value.
7261  * @param[in] item
7262  *   Flow pattern to translate.
7263  * @param[in] inner
7264  *   Item is inner pattern.
7265  */
7266 static void
7267 flow_dv_translate_item_eth(void *matcher, void *key,
7268                            const struct rte_flow_item *item, int inner,
7269                            uint32_t group)
7270 {
7271         const struct rte_flow_item_eth *eth_m = item->mask;
7272         const struct rte_flow_item_eth *eth_v = item->spec;
7273         const struct rte_flow_item_eth nic_mask = {
7274                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7275                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7276                 .type = RTE_BE16(0xffff),
7277                 .has_vlan = 0,
7278         };
7279         void *hdrs_m;
7280         void *hdrs_v;
7281         char *l24_v;
7282         unsigned int i;
7283
7284         if (!eth_v)
7285                 return;
7286         if (!eth_m)
7287                 eth_m = &nic_mask;
7288         if (inner) {
7289                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7290                                          inner_headers);
7291                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7292         } else {
7293                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7294                                          outer_headers);
7295                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7296         }
7297         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7298                &eth_m->dst, sizeof(eth_m->dst));
7299         /* The value must be in the range of the mask. */
7300         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7301         for (i = 0; i < sizeof(eth_m->dst); ++i)
7302                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7303         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7304                &eth_m->src, sizeof(eth_m->src));
7305         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7306         /* The value must be in the range of the mask. */
7307         for (i = 0; i < sizeof(eth_m->dst); ++i)
7308                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7309         /*
7310          * HW supports match on one Ethertype, the Ethertype following the last
7311          * VLAN tag of the packet (see PRM).
7312          * Set match on ethertype only if ETH header is not followed by VLAN.
7313          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7314          * ethertype, and use ip_version field instead.
7315          * eCPRI over Ether layer will use type value 0xAEFE.
7316          */
7317         if (eth_m->type == 0xFFFF) {
7318                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7319                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7320                 switch (eth_v->type) {
7321                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7322                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7323                         return;
7324                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7325                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7326                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7327                         return;
7328                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7329                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7330                         return;
7331                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7332                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7333                         return;
7334                 default:
7335                         break;
7336                 }
7337         }
7338         if (eth_m->has_vlan) {
7339                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7340                 if (eth_v->has_vlan) {
7341                         /*
7342                          * Here, when also has_more_vlan field in VLAN item is
7343                          * not set, only single-tagged packets will be matched.
7344                          */
7345                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7346                         return;
7347                 }
7348         }
7349         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7350                  rte_be_to_cpu_16(eth_m->type));
7351         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7352         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7353 }
7354
7355 /**
7356  * Add VLAN item to matcher and to the value.
7357  *
7358  * @param[in, out] dev_flow
7359  *   Flow descriptor.
7360  * @param[in, out] matcher
7361  *   Flow matcher.
7362  * @param[in, out] key
7363  *   Flow matcher value.
7364  * @param[in] item
7365  *   Flow pattern to translate.
7366  * @param[in] inner
7367  *   Item is inner pattern.
7368  */
7369 static void
7370 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
7371                             void *matcher, void *key,
7372                             const struct rte_flow_item *item,
7373                             int inner, uint32_t group)
7374 {
7375         const struct rte_flow_item_vlan *vlan_m = item->mask;
7376         const struct rte_flow_item_vlan *vlan_v = item->spec;
7377         void *hdrs_m;
7378         void *hdrs_v;
7379         uint16_t tci_m;
7380         uint16_t tci_v;
7381
7382         if (inner) {
7383                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7384                                          inner_headers);
7385                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7386         } else {
7387                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7388                                          outer_headers);
7389                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7390                 /*
7391                  * This is workaround, masks are not supported,
7392                  * and pre-validated.
7393                  */
7394                 if (vlan_v)
7395                         dev_flow->handle->vf_vlan.tag =
7396                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
7397         }
7398         /*
7399          * When VLAN item exists in flow, mark packet as tagged,
7400          * even if TCI is not specified.
7401          */
7402         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
7403                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7404                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7405         }
7406         if (!vlan_v)
7407                 return;
7408         if (!vlan_m)
7409                 vlan_m = &rte_flow_item_vlan_mask;
7410         tci_m = rte_be_to_cpu_16(vlan_m->tci);
7411         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
7412         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
7413         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
7414         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
7415         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
7416         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
7417         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
7418         /*
7419          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7420          * ethertype, and use ip_version field instead.
7421          */
7422         if (vlan_m->inner_type == 0xFFFF) {
7423                 switch (vlan_v->inner_type) {
7424                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7425                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7426                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7427                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7428                         return;
7429                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7430                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7431                         return;
7432                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7433                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7434                         return;
7435                 default:
7436                         break;
7437                 }
7438         }
7439         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
7440                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7441                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7442                 /* Only one vlan_tag bit can be set. */
7443                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7444                 return;
7445         }
7446         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7447                  rte_be_to_cpu_16(vlan_m->inner_type));
7448         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
7449                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
7450 }
7451
7452 /**
7453  * Add IPV4 item to matcher and to the value.
7454  *
7455  * @param[in, out] matcher
7456  *   Flow matcher.
7457  * @param[in, out] key
7458  *   Flow matcher value.
7459  * @param[in] item
7460  *   Flow pattern to translate.
7461  * @param[in] inner
7462  *   Item is inner pattern.
7463  * @param[in] group
7464  *   The group to insert the rule.
7465  */
7466 static void
7467 flow_dv_translate_item_ipv4(void *matcher, void *key,
7468                             const struct rte_flow_item *item,
7469                             int inner, uint32_t group)
7470 {
7471         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
7472         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
7473         const struct rte_flow_item_ipv4 nic_mask = {
7474                 .hdr = {
7475                         .src_addr = RTE_BE32(0xffffffff),
7476                         .dst_addr = RTE_BE32(0xffffffff),
7477                         .type_of_service = 0xff,
7478                         .next_proto_id = 0xff,
7479                         .time_to_live = 0xff,
7480                 },
7481         };
7482         void *headers_m;
7483         void *headers_v;
7484         char *l24_m;
7485         char *l24_v;
7486         uint8_t tos;
7487
7488         if (inner) {
7489                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7490                                          inner_headers);
7491                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7492         } else {
7493                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7494                                          outer_headers);
7495                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7496         }
7497         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
7498         if (!ipv4_v)
7499                 return;
7500         if (!ipv4_m)
7501                 ipv4_m = &nic_mask;
7502         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7503                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7504         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7505                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7506         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
7507         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
7508         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7509                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7510         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7511                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7512         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
7513         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
7514         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
7515         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
7516                  ipv4_m->hdr.type_of_service);
7517         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
7518         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
7519                  ipv4_m->hdr.type_of_service >> 2);
7520         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
7521         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7522                  ipv4_m->hdr.next_proto_id);
7523         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7524                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
7525         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7526                  ipv4_m->hdr.time_to_live);
7527         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7528                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
7529         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7530                  !!(ipv4_m->hdr.fragment_offset));
7531         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7532                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
7533 }
7534
7535 /**
7536  * Add IPV6 item to matcher and to the value.
7537  *
7538  * @param[in, out] matcher
7539  *   Flow matcher.
7540  * @param[in, out] key
7541  *   Flow matcher value.
7542  * @param[in] item
7543  *   Flow pattern to translate.
7544  * @param[in] inner
7545  *   Item is inner pattern.
7546  * @param[in] group
7547  *   The group to insert the rule.
7548  */
7549 static void
7550 flow_dv_translate_item_ipv6(void *matcher, void *key,
7551                             const struct rte_flow_item *item,
7552                             int inner, uint32_t group)
7553 {
7554         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
7555         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
7556         const struct rte_flow_item_ipv6 nic_mask = {
7557                 .hdr = {
7558                         .src_addr =
7559                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7560                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7561                         .dst_addr =
7562                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7563                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7564                         .vtc_flow = RTE_BE32(0xffffffff),
7565                         .proto = 0xff,
7566                         .hop_limits = 0xff,
7567                 },
7568         };
7569         void *headers_m;
7570         void *headers_v;
7571         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7572         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7573         char *l24_m;
7574         char *l24_v;
7575         uint32_t vtc_m;
7576         uint32_t vtc_v;
7577         int i;
7578         int size;
7579
7580         if (inner) {
7581                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7582                                          inner_headers);
7583                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7584         } else {
7585                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7586                                          outer_headers);
7587                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7588         }
7589         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
7590         if (!ipv6_v)
7591                 return;
7592         if (!ipv6_m)
7593                 ipv6_m = &nic_mask;
7594         size = sizeof(ipv6_m->hdr.dst_addr);
7595         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7596                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7597         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7598                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7599         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
7600         for (i = 0; i < size; ++i)
7601                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
7602         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7603                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7604         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7605                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7606         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
7607         for (i = 0; i < size; ++i)
7608                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
7609         /* TOS. */
7610         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
7611         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
7612         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
7613         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
7614         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
7615         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
7616         /* Label. */
7617         if (inner) {
7618                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
7619                          vtc_m);
7620                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
7621                          vtc_v);
7622         } else {
7623                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
7624                          vtc_m);
7625                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
7626                          vtc_v);
7627         }
7628         /* Protocol. */
7629         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7630                  ipv6_m->hdr.proto);
7631         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7632                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
7633         /* Hop limit. */
7634         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7635                  ipv6_m->hdr.hop_limits);
7636         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7637                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
7638         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7639                  !!(ipv6_m->has_frag_ext));
7640         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7641                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
7642 }
7643
7644 /**
7645  * Add IPV6 fragment extension item to matcher and to the value.
7646  *
7647  * @param[in, out] matcher
7648  *   Flow matcher.
7649  * @param[in, out] key
7650  *   Flow matcher value.
7651  * @param[in] item
7652  *   Flow pattern to translate.
7653  * @param[in] inner
7654  *   Item is inner pattern.
7655  */
7656 static void
7657 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
7658                                      const struct rte_flow_item *item,
7659                                      int inner)
7660 {
7661         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
7662         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
7663         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
7664                 .hdr = {
7665                         .next_header = 0xff,
7666                         .frag_data = RTE_BE16(0xffff),
7667                 },
7668         };
7669         void *headers_m;
7670         void *headers_v;
7671
7672         if (inner) {
7673                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7674                                          inner_headers);
7675                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7676         } else {
7677                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7678                                          outer_headers);
7679                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7680         }
7681         /* IPv6 fragment extension item exists, so packet is IP fragment. */
7682         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7683         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
7684         if (!ipv6_frag_ext_v)
7685                 return;
7686         if (!ipv6_frag_ext_m)
7687                 ipv6_frag_ext_m = &nic_mask;
7688         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7689                  ipv6_frag_ext_m->hdr.next_header);
7690         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7691                  ipv6_frag_ext_v->hdr.next_header &
7692                  ipv6_frag_ext_m->hdr.next_header);
7693 }
7694
7695 /**
7696  * Add TCP item to matcher and to the value.
7697  *
7698  * @param[in, out] matcher
7699  *   Flow matcher.
7700  * @param[in, out] key
7701  *   Flow matcher value.
7702  * @param[in] item
7703  *   Flow pattern to translate.
7704  * @param[in] inner
7705  *   Item is inner pattern.
7706  */
7707 static void
7708 flow_dv_translate_item_tcp(void *matcher, void *key,
7709                            const struct rte_flow_item *item,
7710                            int inner)
7711 {
7712         const struct rte_flow_item_tcp *tcp_m = item->mask;
7713         const struct rte_flow_item_tcp *tcp_v = item->spec;
7714         void *headers_m;
7715         void *headers_v;
7716
7717         if (inner) {
7718                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7719                                          inner_headers);
7720                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7721         } else {
7722                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7723                                          outer_headers);
7724                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7725         }
7726         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7727         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
7728         if (!tcp_v)
7729                 return;
7730         if (!tcp_m)
7731                 tcp_m = &rte_flow_item_tcp_mask;
7732         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
7733                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
7734         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
7735                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
7736         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
7737                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
7738         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
7739                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
7740         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
7741                  tcp_m->hdr.tcp_flags);
7742         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
7743                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
7744 }
7745
7746 /**
7747  * Add UDP item to matcher and to the value.
7748  *
7749  * @param[in, out] matcher
7750  *   Flow matcher.
7751  * @param[in, out] key
7752  *   Flow matcher value.
7753  * @param[in] item
7754  *   Flow pattern to translate.
7755  * @param[in] inner
7756  *   Item is inner pattern.
7757  */
7758 static void
7759 flow_dv_translate_item_udp(void *matcher, void *key,
7760                            const struct rte_flow_item *item,
7761                            int inner)
7762 {
7763         const struct rte_flow_item_udp *udp_m = item->mask;
7764         const struct rte_flow_item_udp *udp_v = item->spec;
7765         void *headers_m;
7766         void *headers_v;
7767
7768         if (inner) {
7769                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7770                                          inner_headers);
7771                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7772         } else {
7773                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7774                                          outer_headers);
7775                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7776         }
7777         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7778         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
7779         if (!udp_v)
7780                 return;
7781         if (!udp_m)
7782                 udp_m = &rte_flow_item_udp_mask;
7783         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
7784                  rte_be_to_cpu_16(udp_m->hdr.src_port));
7785         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
7786                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
7787         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
7788                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
7789         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7790                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
7791 }
7792
7793 /**
7794  * Add GRE optional Key item to matcher and to the value.
7795  *
7796  * @param[in, out] matcher
7797  *   Flow matcher.
7798  * @param[in, out] key
7799  *   Flow matcher value.
7800  * @param[in] item
7801  *   Flow pattern to translate.
7802  * @param[in] inner
7803  *   Item is inner pattern.
7804  */
7805 static void
7806 flow_dv_translate_item_gre_key(void *matcher, void *key,
7807                                    const struct rte_flow_item *item)
7808 {
7809         const rte_be32_t *key_m = item->mask;
7810         const rte_be32_t *key_v = item->spec;
7811         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7812         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7813         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7814
7815         /* GRE K bit must be on and should already be validated */
7816         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
7817         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
7818         if (!key_v)
7819                 return;
7820         if (!key_m)
7821                 key_m = &gre_key_default_mask;
7822         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
7823                  rte_be_to_cpu_32(*key_m) >> 8);
7824         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
7825                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
7826         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
7827                  rte_be_to_cpu_32(*key_m) & 0xFF);
7828         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
7829                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
7830 }
7831
7832 /**
7833  * Add GRE item to matcher and to the value.
7834  *
7835  * @param[in, out] matcher
7836  *   Flow matcher.
7837  * @param[in, out] key
7838  *   Flow matcher value.
7839  * @param[in] item
7840  *   Flow pattern to translate.
7841  * @param[in] inner
7842  *   Item is inner pattern.
7843  */
7844 static void
7845 flow_dv_translate_item_gre(void *matcher, void *key,
7846                            const struct rte_flow_item *item,
7847                            int inner)
7848 {
7849         const struct rte_flow_item_gre *gre_m = item->mask;
7850         const struct rte_flow_item_gre *gre_v = item->spec;
7851         void *headers_m;
7852         void *headers_v;
7853         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7854         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7855         struct {
7856                 union {
7857                         __extension__
7858                         struct {
7859                                 uint16_t version:3;
7860                                 uint16_t rsvd0:9;
7861                                 uint16_t s_present:1;
7862                                 uint16_t k_present:1;
7863                                 uint16_t rsvd_bit1:1;
7864                                 uint16_t c_present:1;
7865                         };
7866                         uint16_t value;
7867                 };
7868         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
7869
7870         if (inner) {
7871                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7872                                          inner_headers);
7873                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7874         } else {
7875                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7876                                          outer_headers);
7877                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7878         }
7879         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7880         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
7881         if (!gre_v)
7882                 return;
7883         if (!gre_m)
7884                 gre_m = &rte_flow_item_gre_mask;
7885         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
7886                  rte_be_to_cpu_16(gre_m->protocol));
7887         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7888                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
7889         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
7890         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
7891         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
7892                  gre_crks_rsvd0_ver_m.c_present);
7893         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
7894                  gre_crks_rsvd0_ver_v.c_present &
7895                  gre_crks_rsvd0_ver_m.c_present);
7896         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
7897                  gre_crks_rsvd0_ver_m.k_present);
7898         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
7899                  gre_crks_rsvd0_ver_v.k_present &
7900                  gre_crks_rsvd0_ver_m.k_present);
7901         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
7902                  gre_crks_rsvd0_ver_m.s_present);
7903         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
7904                  gre_crks_rsvd0_ver_v.s_present &
7905                  gre_crks_rsvd0_ver_m.s_present);
7906 }
7907
7908 /**
7909  * Add NVGRE item to matcher and to the value.
7910  *
7911  * @param[in, out] matcher
7912  *   Flow matcher.
7913  * @param[in, out] key
7914  *   Flow matcher value.
7915  * @param[in] item
7916  *   Flow pattern to translate.
7917  * @param[in] inner
7918  *   Item is inner pattern.
7919  */
7920 static void
7921 flow_dv_translate_item_nvgre(void *matcher, void *key,
7922                              const struct rte_flow_item *item,
7923                              int inner)
7924 {
7925         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
7926         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
7927         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7928         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7929         const char *tni_flow_id_m;
7930         const char *tni_flow_id_v;
7931         char *gre_key_m;
7932         char *gre_key_v;
7933         int size;
7934         int i;
7935
7936         /* For NVGRE, GRE header fields must be set with defined values. */
7937         const struct rte_flow_item_gre gre_spec = {
7938                 .c_rsvd0_ver = RTE_BE16(0x2000),
7939                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7940         };
7941         const struct rte_flow_item_gre gre_mask = {
7942                 .c_rsvd0_ver = RTE_BE16(0xB000),
7943                 .protocol = RTE_BE16(UINT16_MAX),
7944         };
7945         const struct rte_flow_item gre_item = {
7946                 .spec = &gre_spec,
7947                 .mask = &gre_mask,
7948                 .last = NULL,
7949         };
7950         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7951         if (!nvgre_v)
7952                 return;
7953         if (!nvgre_m)
7954                 nvgre_m = &rte_flow_item_nvgre_mask;
7955         tni_flow_id_m = (const char *)nvgre_m->tni;
7956         tni_flow_id_v = (const char *)nvgre_v->tni;
7957         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7958         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7959         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7960         memcpy(gre_key_m, tni_flow_id_m, size);
7961         for (i = 0; i < size; ++i)
7962                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7963 }
7964
7965 /**
7966  * Add VXLAN item to matcher and to the value.
7967  *
7968  * @param[in, out] matcher
7969  *   Flow matcher.
7970  * @param[in, out] key
7971  *   Flow matcher value.
7972  * @param[in] item
7973  *   Flow pattern to translate.
7974  * @param[in] inner
7975  *   Item is inner pattern.
7976  */
7977 static void
7978 flow_dv_translate_item_vxlan(void *matcher, void *key,
7979                              const struct rte_flow_item *item,
7980                              int inner)
7981 {
7982         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7983         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7984         void *headers_m;
7985         void *headers_v;
7986         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7987         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7988         char *vni_m;
7989         char *vni_v;
7990         uint16_t dport;
7991         int size;
7992         int i;
7993
7994         if (inner) {
7995                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7996                                          inner_headers);
7997                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7998         } else {
7999                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8000                                          outer_headers);
8001                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8002         }
8003         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8004                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8005         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8006                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8007                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8008         }
8009         if (!vxlan_v)
8010                 return;
8011         if (!vxlan_m)
8012                 vxlan_m = &rte_flow_item_vxlan_mask;
8013         size = sizeof(vxlan_m->vni);
8014         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8015         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8016         memcpy(vni_m, vxlan_m->vni, size);
8017         for (i = 0; i < size; ++i)
8018                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8019 }
8020
8021 /**
8022  * Add VXLAN-GPE item to matcher and to the value.
8023  *
8024  * @param[in, out] matcher
8025  *   Flow matcher.
8026  * @param[in, out] key
8027  *   Flow matcher value.
8028  * @param[in] item
8029  *   Flow pattern to translate.
8030  * @param[in] inner
8031  *   Item is inner pattern.
8032  */
8033
8034 static void
8035 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8036                                  const struct rte_flow_item *item, int inner)
8037 {
8038         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8039         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8040         void *headers_m;
8041         void *headers_v;
8042         void *misc_m =
8043                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8044         void *misc_v =
8045                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8046         char *vni_m;
8047         char *vni_v;
8048         uint16_t dport;
8049         int size;
8050         int i;
8051         uint8_t flags_m = 0xff;
8052         uint8_t flags_v = 0xc;
8053
8054         if (inner) {
8055                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8056                                          inner_headers);
8057                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8058         } else {
8059                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8060                                          outer_headers);
8061                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8062         }
8063         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8064                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8065         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8066                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8067                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8068         }
8069         if (!vxlan_v)
8070                 return;
8071         if (!vxlan_m)
8072                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8073         size = sizeof(vxlan_m->vni);
8074         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8075         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8076         memcpy(vni_m, vxlan_m->vni, size);
8077         for (i = 0; i < size; ++i)
8078                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8079         if (vxlan_m->flags) {
8080                 flags_m = vxlan_m->flags;
8081                 flags_v = vxlan_v->flags;
8082         }
8083         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8084         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8085         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8086                  vxlan_m->protocol);
8087         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8088                  vxlan_v->protocol);
8089 }
8090
8091 /**
8092  * Add Geneve item to matcher and to the value.
8093  *
8094  * @param[in, out] matcher
8095  *   Flow matcher.
8096  * @param[in, out] key
8097  *   Flow matcher value.
8098  * @param[in] item
8099  *   Flow pattern to translate.
8100  * @param[in] inner
8101  *   Item is inner pattern.
8102  */
8103
8104 static void
8105 flow_dv_translate_item_geneve(void *matcher, void *key,
8106                               const struct rte_flow_item *item, int inner)
8107 {
8108         const struct rte_flow_item_geneve *geneve_m = item->mask;
8109         const struct rte_flow_item_geneve *geneve_v = item->spec;
8110         void *headers_m;
8111         void *headers_v;
8112         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8113         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8114         uint16_t dport;
8115         uint16_t gbhdr_m;
8116         uint16_t gbhdr_v;
8117         char *vni_m;
8118         char *vni_v;
8119         size_t size, i;
8120
8121         if (inner) {
8122                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8123                                          inner_headers);
8124                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8125         } else {
8126                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8127                                          outer_headers);
8128                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8129         }
8130         dport = MLX5_UDP_PORT_GENEVE;
8131         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8132                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8133                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8134         }
8135         if (!geneve_v)
8136                 return;
8137         if (!geneve_m)
8138                 geneve_m = &rte_flow_item_geneve_mask;
8139         size = sizeof(geneve_m->vni);
8140         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8141         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8142         memcpy(vni_m, geneve_m->vni, size);
8143         for (i = 0; i < size; ++i)
8144                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8145         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8146                  rte_be_to_cpu_16(geneve_m->protocol));
8147         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8148                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8149         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8150         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8151         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8152                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8153         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8154                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8155         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8156                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8157         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8158                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8159                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8160 }
8161
8162 /**
8163  * Create Geneve TLV option resource.
8164  *
8165  * @param dev[in, out]
8166  *   Pointer to rte_eth_dev structure.
8167  * @param[in, out] tag_be24
8168  *   Tag value in big endian then R-shift 8.
8169  * @parm[in, out] dev_flow
8170  *   Pointer to the dev_flow.
8171  * @param[out] error
8172  *   pointer to error structure.
8173  *
8174  * @return
8175  *   0 on success otherwise -errno and errno is set.
8176  */
8177
8178 int
8179 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8180                                              const struct rte_flow_item *item,
8181                                              struct rte_flow_error *error)
8182 {
8183         struct mlx5_priv *priv = dev->data->dev_private;
8184         struct mlx5_dev_ctx_shared *sh = priv->sh;
8185         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8186                         sh->geneve_tlv_option_resource;
8187         struct mlx5_devx_obj *obj;
8188         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8189         int ret = 0;
8190
8191         if (!geneve_opt_v)
8192                 return -1;
8193         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8194         if (geneve_opt_resource != NULL) {
8195                 if (geneve_opt_resource->option_class ==
8196                         geneve_opt_v->option_class &&
8197                         geneve_opt_resource->option_type ==
8198                         geneve_opt_v->option_type &&
8199                         geneve_opt_resource->length ==
8200                         geneve_opt_v->option_len) {
8201                         /* We already have GENVE TLV option obj allocated. */
8202                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8203                                            __ATOMIC_RELAXED);
8204                 } else {
8205                         ret = rte_flow_error_set(error, ENOMEM,
8206                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8207                                 "Only one GENEVE TLV option supported");
8208                         goto exit;
8209                 }
8210         } else {
8211                 /* Create a GENEVE TLV object and resource. */
8212                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8213                                 geneve_opt_v->option_class,
8214                                 geneve_opt_v->option_type,
8215                                 geneve_opt_v->option_len);
8216                 if (!obj) {
8217                         ret = rte_flow_error_set(error, ENODATA,
8218                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8219                                 "Failed to create GENEVE TLV Devx object");
8220                         goto exit;
8221                 }
8222                 sh->geneve_tlv_option_resource =
8223                                 mlx5_malloc(MLX5_MEM_ZERO,
8224                                                 sizeof(*geneve_opt_resource),
8225                                                 0, SOCKET_ID_ANY);
8226                 if (!sh->geneve_tlv_option_resource) {
8227                         claim_zero(mlx5_devx_cmd_destroy(obj));
8228                         ret = rte_flow_error_set(error, ENOMEM,
8229                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8230                                 "GENEVE TLV object memory allocation failed");
8231                         goto exit;
8232                 }
8233                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8234                 geneve_opt_resource->obj = obj;
8235                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8236                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8237                 geneve_opt_resource->length = geneve_opt_v->option_len;
8238                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8239                                 __ATOMIC_RELAXED);
8240         }
8241 exit:
8242         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8243         return ret;
8244 }
8245
8246 /**
8247  * Add Geneve TLV option item to matcher.
8248  *
8249  * @param[in, out] dev
8250  *   Pointer to rte_eth_dev structure.
8251  * @param[in, out] matcher
8252  *   Flow matcher.
8253  * @param[in, out] key
8254  *   Flow matcher value.
8255  * @param[in] item
8256  *   Flow pattern to translate.
8257  * @param[out] error
8258  *   Pointer to error structure.
8259  */
8260 static int
8261 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8262                                   void *key, const struct rte_flow_item *item,
8263                                   struct rte_flow_error *error)
8264 {
8265         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8266         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8267         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8268         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8269         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8270                         misc_parameters_3);
8271         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8272         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8273         int ret = 0;
8274
8275         if (!geneve_opt_v)
8276                 return -1;
8277         if (!geneve_opt_m)
8278                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8279         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8280                                                            error);
8281         if (ret) {
8282                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8283                 return ret;
8284         }
8285         /*
8286          * Set the option length in GENEVE header if not requested.
8287          * The GENEVE TLV option length is expressed by the option length field
8288          * in the GENEVE header.
8289          * If the option length was not requested but the GENEVE TLV option item
8290          * is present we set the option length field implicitly.
8291          */
8292         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8293                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8294                          MLX5_GENEVE_OPTLEN_MASK);
8295                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8296                          geneve_opt_v->option_len + 1);
8297         }
8298         /* Set the data. */
8299         if (geneve_opt_v->data) {
8300                 memcpy(&opt_data_key, geneve_opt_v->data,
8301                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8302                                 sizeof(opt_data_key)));
8303                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8304                                 sizeof(opt_data_key));
8305                 memcpy(&opt_data_mask, geneve_opt_m->data,
8306                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8307                                 sizeof(opt_data_mask)));
8308                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8309                                 sizeof(opt_data_mask));
8310                 MLX5_SET(fte_match_set_misc3, misc3_m,
8311                                 geneve_tlv_option_0_data,
8312                                 rte_be_to_cpu_32(opt_data_mask));
8313                 MLX5_SET(fte_match_set_misc3, misc3_v,
8314                                 geneve_tlv_option_0_data,
8315                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8316         }
8317         return ret;
8318 }
8319
8320 /**
8321  * Add MPLS item to matcher and to the value.
8322  *
8323  * @param[in, out] matcher
8324  *   Flow matcher.
8325  * @param[in, out] key
8326  *   Flow matcher value.
8327  * @param[in] item
8328  *   Flow pattern to translate.
8329  * @param[in] prev_layer
8330  *   The protocol layer indicated in previous item.
8331  * @param[in] inner
8332  *   Item is inner pattern.
8333  */
8334 static void
8335 flow_dv_translate_item_mpls(void *matcher, void *key,
8336                             const struct rte_flow_item *item,
8337                             uint64_t prev_layer,
8338                             int inner)
8339 {
8340         const uint32_t *in_mpls_m = item->mask;
8341         const uint32_t *in_mpls_v = item->spec;
8342         uint32_t *out_mpls_m = 0;
8343         uint32_t *out_mpls_v = 0;
8344         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8345         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8346         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8347                                      misc_parameters_2);
8348         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8349         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8350         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8351
8352         switch (prev_layer) {
8353         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8354                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
8355                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8356                          MLX5_UDP_PORT_MPLS);
8357                 break;
8358         case MLX5_FLOW_LAYER_GRE:
8359                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
8360                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8361                          RTE_ETHER_TYPE_MPLS);
8362                 break;
8363         default:
8364                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8365                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8366                          IPPROTO_MPLS);
8367                 break;
8368         }
8369         if (!in_mpls_v)
8370                 return;
8371         if (!in_mpls_m)
8372                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
8373         switch (prev_layer) {
8374         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8375                 out_mpls_m =
8376                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8377                                                  outer_first_mpls_over_udp);
8378                 out_mpls_v =
8379                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8380                                                  outer_first_mpls_over_udp);
8381                 break;
8382         case MLX5_FLOW_LAYER_GRE:
8383                 out_mpls_m =
8384                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8385                                                  outer_first_mpls_over_gre);
8386                 out_mpls_v =
8387                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8388                                                  outer_first_mpls_over_gre);
8389                 break;
8390         default:
8391                 /* Inner MPLS not over GRE is not supported. */
8392                 if (!inner) {
8393                         out_mpls_m =
8394                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8395                                                          misc2_m,
8396                                                          outer_first_mpls);
8397                         out_mpls_v =
8398                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8399                                                          misc2_v,
8400                                                          outer_first_mpls);
8401                 }
8402                 break;
8403         }
8404         if (out_mpls_m && out_mpls_v) {
8405                 *out_mpls_m = *in_mpls_m;
8406                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
8407         }
8408 }
8409
8410 /**
8411  * Add metadata register item to matcher
8412  *
8413  * @param[in, out] matcher
8414  *   Flow matcher.
8415  * @param[in, out] key
8416  *   Flow matcher value.
8417  * @param[in] reg_type
8418  *   Type of device metadata register
8419  * @param[in] value
8420  *   Register value
8421  * @param[in] mask
8422  *   Register mask
8423  */
8424 static void
8425 flow_dv_match_meta_reg(void *matcher, void *key,
8426                        enum modify_reg reg_type,
8427                        uint32_t data, uint32_t mask)
8428 {
8429         void *misc2_m =
8430                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
8431         void *misc2_v =
8432                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8433         uint32_t temp;
8434
8435         data &= mask;
8436         switch (reg_type) {
8437         case REG_A:
8438                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
8439                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
8440                 break;
8441         case REG_B:
8442                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
8443                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
8444                 break;
8445         case REG_C_0:
8446                 /*
8447                  * The metadata register C0 field might be divided into
8448                  * source vport index and META item value, we should set
8449                  * this field according to specified mask, not as whole one.
8450                  */
8451                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
8452                 temp |= mask;
8453                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
8454                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
8455                 temp &= ~mask;
8456                 temp |= data;
8457                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
8458                 break;
8459         case REG_C_1:
8460                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
8461                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
8462                 break;
8463         case REG_C_2:
8464                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
8465                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
8466                 break;
8467         case REG_C_3:
8468                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
8469                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
8470                 break;
8471         case REG_C_4:
8472                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
8473                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
8474                 break;
8475         case REG_C_5:
8476                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
8477                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
8478                 break;
8479         case REG_C_6:
8480                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
8481                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
8482                 break;
8483         case REG_C_7:
8484                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
8485                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
8486                 break;
8487         default:
8488                 MLX5_ASSERT(false);
8489                 break;
8490         }
8491 }
8492
8493 /**
8494  * Add MARK item to matcher
8495  *
8496  * @param[in] dev
8497  *   The device to configure through.
8498  * @param[in, out] matcher
8499  *   Flow matcher.
8500  * @param[in, out] key
8501  *   Flow matcher value.
8502  * @param[in] item
8503  *   Flow pattern to translate.
8504  */
8505 static void
8506 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
8507                             void *matcher, void *key,
8508                             const struct rte_flow_item *item)
8509 {
8510         struct mlx5_priv *priv = dev->data->dev_private;
8511         const struct rte_flow_item_mark *mark;
8512         uint32_t value;
8513         uint32_t mask;
8514
8515         mark = item->mask ? (const void *)item->mask :
8516                             &rte_flow_item_mark_mask;
8517         mask = mark->id & priv->sh->dv_mark_mask;
8518         mark = (const void *)item->spec;
8519         MLX5_ASSERT(mark);
8520         value = mark->id & priv->sh->dv_mark_mask & mask;
8521         if (mask) {
8522                 enum modify_reg reg;
8523
8524                 /* Get the metadata register index for the mark. */
8525                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
8526                 MLX5_ASSERT(reg > 0);
8527                 if (reg == REG_C_0) {
8528                         struct mlx5_priv *priv = dev->data->dev_private;
8529                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8530                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8531
8532                         mask &= msk_c0;
8533                         mask <<= shl_c0;
8534                         value <<= shl_c0;
8535                 }
8536                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8537         }
8538 }
8539
8540 /**
8541  * Add META item to matcher
8542  *
8543  * @param[in] dev
8544  *   The devich to configure through.
8545  * @param[in, out] matcher
8546  *   Flow matcher.
8547  * @param[in, out] key
8548  *   Flow matcher value.
8549  * @param[in] attr
8550  *   Attributes of flow that includes this item.
8551  * @param[in] item
8552  *   Flow pattern to translate.
8553  */
8554 static void
8555 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
8556                             void *matcher, void *key,
8557                             const struct rte_flow_attr *attr,
8558                             const struct rte_flow_item *item)
8559 {
8560         const struct rte_flow_item_meta *meta_m;
8561         const struct rte_flow_item_meta *meta_v;
8562
8563         meta_m = (const void *)item->mask;
8564         if (!meta_m)
8565                 meta_m = &rte_flow_item_meta_mask;
8566         meta_v = (const void *)item->spec;
8567         if (meta_v) {
8568                 int reg;
8569                 uint32_t value = meta_v->data;
8570                 uint32_t mask = meta_m->data;
8571
8572                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
8573                 if (reg < 0)
8574                         return;
8575                 MLX5_ASSERT(reg != REG_NON);
8576                 /*
8577                  * In datapath code there is no endianness
8578                  * coversions for perfromance reasons, all
8579                  * pattern conversions are done in rte_flow.
8580                  */
8581                 value = rte_cpu_to_be_32(value);
8582                 mask = rte_cpu_to_be_32(mask);
8583                 if (reg == REG_C_0) {
8584                         struct mlx5_priv *priv = dev->data->dev_private;
8585                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8586                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8587 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
8588                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
8589
8590                         value >>= shr_c0;
8591                         mask >>= shr_c0;
8592 #endif
8593                         value <<= shl_c0;
8594                         mask <<= shl_c0;
8595                         MLX5_ASSERT(msk_c0);
8596                         MLX5_ASSERT(!(~msk_c0 & mask));
8597                 }
8598                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8599         }
8600 }
8601
8602 /**
8603  * Add vport metadata Reg C0 item to matcher
8604  *
8605  * @param[in, out] matcher
8606  *   Flow matcher.
8607  * @param[in, out] key
8608  *   Flow matcher value.
8609  * @param[in] reg
8610  *   Flow pattern to translate.
8611  */
8612 static void
8613 flow_dv_translate_item_meta_vport(void *matcher, void *key,
8614                                   uint32_t value, uint32_t mask)
8615 {
8616         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
8617 }
8618
8619 /**
8620  * Add tag item to matcher
8621  *
8622  * @param[in] dev
8623  *   The devich to configure through.
8624  * @param[in, out] matcher
8625  *   Flow matcher.
8626  * @param[in, out] key
8627  *   Flow matcher value.
8628  * @param[in] item
8629  *   Flow pattern to translate.
8630  */
8631 static void
8632 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
8633                                 void *matcher, void *key,
8634                                 const struct rte_flow_item *item)
8635 {
8636         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
8637         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
8638         uint32_t mask, value;
8639
8640         MLX5_ASSERT(tag_v);
8641         value = tag_v->data;
8642         mask = tag_m ? tag_m->data : UINT32_MAX;
8643         if (tag_v->id == REG_C_0) {
8644                 struct mlx5_priv *priv = dev->data->dev_private;
8645                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8646                 uint32_t shl_c0 = rte_bsf32(msk_c0);
8647
8648                 mask &= msk_c0;
8649                 mask <<= shl_c0;
8650                 value <<= shl_c0;
8651         }
8652         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
8653 }
8654
8655 /**
8656  * Add TAG item to matcher
8657  *
8658  * @param[in] dev
8659  *   The devich to configure through.
8660  * @param[in, out] matcher
8661  *   Flow matcher.
8662  * @param[in, out] key
8663  *   Flow matcher value.
8664  * @param[in] item
8665  *   Flow pattern to translate.
8666  */
8667 static void
8668 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
8669                            void *matcher, void *key,
8670                            const struct rte_flow_item *item)
8671 {
8672         const struct rte_flow_item_tag *tag_v = item->spec;
8673         const struct rte_flow_item_tag *tag_m = item->mask;
8674         enum modify_reg reg;
8675
8676         MLX5_ASSERT(tag_v);
8677         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
8678         /* Get the metadata register index for the tag. */
8679         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
8680         MLX5_ASSERT(reg > 0);
8681         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
8682 }
8683
8684 /**
8685  * Add source vport match to the specified matcher.
8686  *
8687  * @param[in, out] matcher
8688  *   Flow matcher.
8689  * @param[in, out] key
8690  *   Flow matcher value.
8691  * @param[in] port
8692  *   Source vport value to match
8693  * @param[in] mask
8694  *   Mask
8695  */
8696 static void
8697 flow_dv_translate_item_source_vport(void *matcher, void *key,
8698                                     int16_t port, uint16_t mask)
8699 {
8700         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8701         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8702
8703         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
8704         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
8705 }
8706
8707 /**
8708  * Translate port-id item to eswitch match on  port-id.
8709  *
8710  * @param[in] dev
8711  *   The devich to configure through.
8712  * @param[in, out] matcher
8713  *   Flow matcher.
8714  * @param[in, out] key
8715  *   Flow matcher value.
8716  * @param[in] item
8717  *   Flow pattern to translate.
8718  * @param[in]
8719  *   Flow attributes.
8720  *
8721  * @return
8722  *   0 on success, a negative errno value otherwise.
8723  */
8724 static int
8725 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
8726                                void *key, const struct rte_flow_item *item,
8727                                const struct rte_flow_attr *attr)
8728 {
8729         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
8730         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
8731         struct mlx5_priv *priv;
8732         uint16_t mask, id;
8733
8734         mask = pid_m ? pid_m->id : 0xffff;
8735         id = pid_v ? pid_v->id : dev->data->port_id;
8736         priv = mlx5_port_to_eswitch_info(id, item == NULL);
8737         if (!priv)
8738                 return -rte_errno;
8739         /*
8740          * Translate to vport field or to metadata, depending on mode.
8741          * Kernel can use either misc.source_port or half of C0 metadata
8742          * register.
8743          */
8744         if (priv->vport_meta_mask) {
8745                 /*
8746                  * Provide the hint for SW steering library
8747                  * to insert the flow into ingress domain and
8748                  * save the extra vport match.
8749                  */
8750                 if (mask == 0xffff && priv->vport_id == 0xffff &&
8751                     priv->pf_bond < 0 && attr->transfer)
8752                         flow_dv_translate_item_source_vport
8753                                 (matcher, key, priv->vport_id, mask);
8754                 /*
8755                  * We should always set the vport metadata register,
8756                  * otherwise the SW steering library can drop
8757                  * the rule if wire vport metadata value is not zero,
8758                  * it depends on kernel configuration.
8759                  */
8760                 flow_dv_translate_item_meta_vport(matcher, key,
8761                                                   priv->vport_meta_tag,
8762                                                   priv->vport_meta_mask);
8763         } else {
8764                 flow_dv_translate_item_source_vport(matcher, key,
8765                                                     priv->vport_id, mask);
8766         }
8767         return 0;
8768 }
8769
8770 /**
8771  * Add ICMP6 item to matcher and to the value.
8772  *
8773  * @param[in, out] matcher
8774  *   Flow matcher.
8775  * @param[in, out] key
8776  *   Flow matcher value.
8777  * @param[in] item
8778  *   Flow pattern to translate.
8779  * @param[in] inner
8780  *   Item is inner pattern.
8781  */
8782 static void
8783 flow_dv_translate_item_icmp6(void *matcher, void *key,
8784                               const struct rte_flow_item *item,
8785                               int inner)
8786 {
8787         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
8788         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
8789         void *headers_m;
8790         void *headers_v;
8791         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8792                                      misc_parameters_3);
8793         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8794         if (inner) {
8795                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8796                                          inner_headers);
8797                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8798         } else {
8799                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8800                                          outer_headers);
8801                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8802         }
8803         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8804         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
8805         if (!icmp6_v)
8806                 return;
8807         if (!icmp6_m)
8808                 icmp6_m = &rte_flow_item_icmp6_mask;
8809         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
8810         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
8811                  icmp6_v->type & icmp6_m->type);
8812         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
8813         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
8814                  icmp6_v->code & icmp6_m->code);
8815 }
8816
8817 /**
8818  * Add ICMP item to matcher and to the value.
8819  *
8820  * @param[in, out] matcher
8821  *   Flow matcher.
8822  * @param[in, out] key
8823  *   Flow matcher value.
8824  * @param[in] item
8825  *   Flow pattern to translate.
8826  * @param[in] inner
8827  *   Item is inner pattern.
8828  */
8829 static void
8830 flow_dv_translate_item_icmp(void *matcher, void *key,
8831                             const struct rte_flow_item *item,
8832                             int inner)
8833 {
8834         const struct rte_flow_item_icmp *icmp_m = item->mask;
8835         const struct rte_flow_item_icmp *icmp_v = item->spec;
8836         uint32_t icmp_header_data_m = 0;
8837         uint32_t icmp_header_data_v = 0;
8838         void *headers_m;
8839         void *headers_v;
8840         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8841                                      misc_parameters_3);
8842         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8843         if (inner) {
8844                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8845                                          inner_headers);
8846                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8847         } else {
8848                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8849                                          outer_headers);
8850                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8851         }
8852         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8853         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
8854         if (!icmp_v)
8855                 return;
8856         if (!icmp_m)
8857                 icmp_m = &rte_flow_item_icmp_mask;
8858         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
8859                  icmp_m->hdr.icmp_type);
8860         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
8861                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
8862         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
8863                  icmp_m->hdr.icmp_code);
8864         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
8865                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
8866         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
8867         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
8868         if (icmp_header_data_m) {
8869                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
8870                 icmp_header_data_v |=
8871                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
8872                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
8873                          icmp_header_data_m);
8874                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
8875                          icmp_header_data_v & icmp_header_data_m);
8876         }
8877 }
8878
8879 /**
8880  * Add GTP item to matcher and to the value.
8881  *
8882  * @param[in, out] matcher
8883  *   Flow matcher.
8884  * @param[in, out] key
8885  *   Flow matcher value.
8886  * @param[in] item
8887  *   Flow pattern to translate.
8888  * @param[in] inner
8889  *   Item is inner pattern.
8890  */
8891 static void
8892 flow_dv_translate_item_gtp(void *matcher, void *key,
8893                            const struct rte_flow_item *item, int inner)
8894 {
8895         const struct rte_flow_item_gtp *gtp_m = item->mask;
8896         const struct rte_flow_item_gtp *gtp_v = item->spec;
8897         void *headers_m;
8898         void *headers_v;
8899         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8900                                      misc_parameters_3);
8901         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8902         uint16_t dport = RTE_GTPU_UDP_PORT;
8903
8904         if (inner) {
8905                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8906                                          inner_headers);
8907                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8908         } else {
8909                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8910                                          outer_headers);
8911                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8912         }
8913         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8914                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8915                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8916         }
8917         if (!gtp_v)
8918                 return;
8919         if (!gtp_m)
8920                 gtp_m = &rte_flow_item_gtp_mask;
8921         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
8922                  gtp_m->v_pt_rsv_flags);
8923         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
8924                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
8925         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
8926         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
8927                  gtp_v->msg_type & gtp_m->msg_type);
8928         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
8929                  rte_be_to_cpu_32(gtp_m->teid));
8930         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
8931                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
8932 }
8933
8934 /**
8935  * Add GTP PSC item to matcher.
8936  *
8937  * @param[in, out] matcher
8938  *   Flow matcher.
8939  * @param[in, out] key
8940  *   Flow matcher value.
8941  * @param[in] item
8942  *   Flow pattern to translate.
8943  */
8944 static int
8945 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
8946                                const struct rte_flow_item *item)
8947 {
8948         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
8949         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
8950         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8951                         misc_parameters_3);
8952         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8953         union {
8954                 uint32_t w32;
8955                 struct {
8956                         uint16_t seq_num;
8957                         uint8_t npdu_num;
8958                         uint8_t next_ext_header_type;
8959                 };
8960         } dw_2;
8961         uint8_t gtp_flags;
8962
8963         /* Always set E-flag match on one, regardless of GTP item settings. */
8964         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
8965         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8966         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
8967         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
8968         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8969         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
8970         /*Set next extension header type. */
8971         dw_2.seq_num = 0;
8972         dw_2.npdu_num = 0;
8973         dw_2.next_ext_header_type = 0xff;
8974         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
8975                  rte_cpu_to_be_32(dw_2.w32));
8976         dw_2.seq_num = 0;
8977         dw_2.npdu_num = 0;
8978         dw_2.next_ext_header_type = 0x85;
8979         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
8980                  rte_cpu_to_be_32(dw_2.w32));
8981         if (gtp_psc_v) {
8982                 union {
8983                         uint32_t w32;
8984                         struct {
8985                                 uint8_t len;
8986                                 uint8_t type_flags;
8987                                 uint8_t qfi;
8988                                 uint8_t reserved;
8989                         };
8990                 } dw_0;
8991
8992                 /*Set extension header PDU type and Qos. */
8993                 if (!gtp_psc_m)
8994                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
8995                 dw_0.w32 = 0;
8996                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
8997                 dw_0.qfi = gtp_psc_m->qfi;
8998                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
8999                          rte_cpu_to_be_32(dw_0.w32));
9000                 dw_0.w32 = 0;
9001                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9002                                                         gtp_psc_m->pdu_type);
9003                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9004                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9005                          rte_cpu_to_be_32(dw_0.w32));
9006         }
9007         return 0;
9008 }
9009
9010 /**
9011  * Add eCPRI item to matcher and to the value.
9012  *
9013  * @param[in] dev
9014  *   The devich to configure through.
9015  * @param[in, out] matcher
9016  *   Flow matcher.
9017  * @param[in, out] key
9018  *   Flow matcher value.
9019  * @param[in] item
9020  *   Flow pattern to translate.
9021  * @param[in] samples
9022  *   Sample IDs to be used in the matching.
9023  */
9024 static void
9025 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9026                              void *key, const struct rte_flow_item *item)
9027 {
9028         struct mlx5_priv *priv = dev->data->dev_private;
9029         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9030         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9031         struct rte_ecpri_common_hdr common;
9032         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9033                                      misc_parameters_4);
9034         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9035         uint32_t *samples;
9036         void *dw_m;
9037         void *dw_v;
9038
9039         if (!ecpri_v)
9040                 return;
9041         if (!ecpri_m)
9042                 ecpri_m = &rte_flow_item_ecpri_mask;
9043         /*
9044          * Maximal four DW samples are supported in a single matching now.
9045          * Two are used now for a eCPRI matching:
9046          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9047          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9048          *    if any.
9049          */
9050         if (!ecpri_m->hdr.common.u32)
9051                 return;
9052         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9053         /* Need to take the whole DW as the mask to fill the entry. */
9054         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9055                             prog_sample_field_value_0);
9056         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9057                             prog_sample_field_value_0);
9058         /* Already big endian (network order) in the header. */
9059         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9060         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9061         /* Sample#0, used for matching type, offset 0. */
9062         MLX5_SET(fte_match_set_misc4, misc4_m,
9063                  prog_sample_field_id_0, samples[0]);
9064         /* It makes no sense to set the sample ID in the mask field. */
9065         MLX5_SET(fte_match_set_misc4, misc4_v,
9066                  prog_sample_field_id_0, samples[0]);
9067         /*
9068          * Checking if message body part needs to be matched.
9069          * Some wildcard rules only matching type field should be supported.
9070          */
9071         if (ecpri_m->hdr.dummy[0]) {
9072                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9073                 switch (common.type) {
9074                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9075                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9076                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9077                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9078                                             prog_sample_field_value_1);
9079                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9080                                             prog_sample_field_value_1);
9081                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9082                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9083                                             ecpri_m->hdr.dummy[0];
9084                         /* Sample#1, to match message body, offset 4. */
9085                         MLX5_SET(fte_match_set_misc4, misc4_m,
9086                                  prog_sample_field_id_1, samples[1]);
9087                         MLX5_SET(fte_match_set_misc4, misc4_v,
9088                                  prog_sample_field_id_1, samples[1]);
9089                         break;
9090                 default:
9091                         /* Others, do not match any sample ID. */
9092                         break;
9093                 }
9094         }
9095 }
9096
9097 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9098
9099 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9100         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9101                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9102
9103 /**
9104  * Calculate flow matcher enable bitmap.
9105  *
9106  * @param match_criteria
9107  *   Pointer to flow matcher criteria.
9108  *
9109  * @return
9110  *   Bitmap of enabled fields.
9111  */
9112 static uint8_t
9113 flow_dv_matcher_enable(uint32_t *match_criteria)
9114 {
9115         uint8_t match_criteria_enable;
9116
9117         match_criteria_enable =
9118                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9119                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9120         match_criteria_enable |=
9121                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9122                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9123         match_criteria_enable |=
9124                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9125                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9126         match_criteria_enable |=
9127                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9128                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9129         match_criteria_enable |=
9130                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9131                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9132         match_criteria_enable |=
9133                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9134                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9135         return match_criteria_enable;
9136 }
9137
9138 struct mlx5_hlist_entry *
9139 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9140 {
9141         struct mlx5_dev_ctx_shared *sh = list->ctx;
9142         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9143         struct rte_eth_dev *dev = ctx->dev;
9144         struct mlx5_flow_tbl_data_entry *tbl_data;
9145         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9146         struct rte_flow_error *error = ctx->error;
9147         union mlx5_flow_tbl_key key = { .v64 = key64 };
9148         struct mlx5_flow_tbl_resource *tbl;
9149         void *domain;
9150         uint32_t idx = 0;
9151         int ret;
9152
9153         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9154         if (!tbl_data) {
9155                 rte_flow_error_set(error, ENOMEM,
9156                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9157                                    NULL,
9158                                    "cannot allocate flow table data entry");
9159                 return NULL;
9160         }
9161         tbl_data->idx = idx;
9162         tbl_data->tunnel = tt_prm->tunnel;
9163         tbl_data->group_id = tt_prm->group_id;
9164         tbl_data->external = !!tt_prm->external;
9165         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9166         tbl_data->is_egress = !!key.direction;
9167         tbl_data->is_transfer = !!key.domain;
9168         tbl_data->dummy = !!key.dummy;
9169         tbl_data->table_id = key.table_id;
9170         tbl = &tbl_data->tbl;
9171         if (key.dummy)
9172                 return &tbl_data->entry;
9173         if (key.domain)
9174                 domain = sh->fdb_domain;
9175         else if (key.direction)
9176                 domain = sh->tx_domain;
9177         else
9178                 domain = sh->rx_domain;
9179         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
9180         if (ret) {
9181                 rte_flow_error_set(error, ENOMEM,
9182                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9183                                    NULL, "cannot create flow table object");
9184                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9185                 return NULL;
9186         }
9187         if (key.table_id) {
9188                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9189                                         (tbl->obj, &tbl_data->jump.action);
9190                 if (ret) {
9191                         rte_flow_error_set(error, ENOMEM,
9192                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9193                                            NULL,
9194                                            "cannot create flow jump action");
9195                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9196                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9197                         return NULL;
9198                 }
9199         }
9200         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
9201               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
9202               key.table_id);
9203         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9204                              flow_dv_matcher_create_cb,
9205                              flow_dv_matcher_match_cb,
9206                              flow_dv_matcher_remove_cb);
9207         return &tbl_data->entry;
9208 }
9209
9210 int
9211 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9212                      struct mlx5_hlist_entry *entry, uint64_t key64,
9213                      void *cb_ctx __rte_unused)
9214 {
9215         struct mlx5_flow_tbl_data_entry *tbl_data =
9216                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9217         union mlx5_flow_tbl_key key = { .v64 = key64 };
9218
9219         return tbl_data->table_id != key.table_id ||
9220                tbl_data->dummy != key.dummy ||
9221                tbl_data->is_transfer != key.domain ||
9222                tbl_data->is_egress != key.direction;
9223 }
9224
9225 /**
9226  * Get a flow table.
9227  *
9228  * @param[in, out] dev
9229  *   Pointer to rte_eth_dev structure.
9230  * @param[in] table_id
9231  *   Table id to use.
9232  * @param[in] egress
9233  *   Direction of the table.
9234  * @param[in] transfer
9235  *   E-Switch or NIC flow.
9236  * @param[in] dummy
9237  *   Dummy entry for dv API.
9238  * @param[out] error
9239  *   pointer to error structure.
9240  *
9241  * @return
9242  *   Returns tables resource based on the index, NULL in case of failed.
9243  */
9244 struct mlx5_flow_tbl_resource *
9245 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9246                          uint32_t table_id, uint8_t egress,
9247                          uint8_t transfer,
9248                          bool external,
9249                          const struct mlx5_flow_tunnel *tunnel,
9250                          uint32_t group_id, uint8_t dummy,
9251                          struct rte_flow_error *error)
9252 {
9253         struct mlx5_priv *priv = dev->data->dev_private;
9254         union mlx5_flow_tbl_key table_key = {
9255                 {
9256                         .table_id = table_id,
9257                         .dummy = dummy,
9258                         .domain = !!transfer,
9259                         .direction = !!egress,
9260                 }
9261         };
9262         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9263                 .tunnel = tunnel,
9264                 .group_id = group_id,
9265                 .external = external,
9266         };
9267         struct mlx5_flow_cb_ctx ctx = {
9268                 .dev = dev,
9269                 .error = error,
9270                 .data = &tt_prm,
9271         };
9272         struct mlx5_hlist_entry *entry;
9273         struct mlx5_flow_tbl_data_entry *tbl_data;
9274
9275         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9276         if (!entry) {
9277                 rte_flow_error_set(error, ENOMEM,
9278                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9279                                    "cannot get table");
9280                 return NULL;
9281         }
9282         DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
9283                 table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
9284         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9285         return &tbl_data->tbl;
9286 }
9287
9288 void
9289 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
9290                       struct mlx5_hlist_entry *entry)
9291 {
9292         struct mlx5_dev_ctx_shared *sh = list->ctx;
9293         struct mlx5_flow_tbl_data_entry *tbl_data =
9294                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9295
9296         MLX5_ASSERT(entry && sh);
9297         if (tbl_data->jump.action)
9298                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
9299         if (tbl_data->tbl.obj)
9300                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
9301         if (tbl_data->tunnel_offload && tbl_data->external) {
9302                 struct mlx5_hlist_entry *he;
9303                 struct mlx5_hlist *tunnel_grp_hash;
9304                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
9305                 union tunnel_tbl_key tunnel_key = {
9306                         .tunnel_id = tbl_data->tunnel ?
9307                                         tbl_data->tunnel->tunnel_id : 0,
9308                         .group = tbl_data->group_id
9309                 };
9310                 uint32_t table_id = tbl_data->table_id;
9311
9312                 tunnel_grp_hash = tbl_data->tunnel ?
9313                                         tbl_data->tunnel->groups :
9314                                         thub->groups;
9315                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
9316                 if (he)
9317                         mlx5_hlist_unregister(tunnel_grp_hash, he);
9318                 DRV_LOG(DEBUG,
9319                         "Table_id %u tunnel %u group %u released.",
9320                         table_id,
9321                         tbl_data->tunnel ?
9322                         tbl_data->tunnel->tunnel_id : 0,
9323                         tbl_data->group_id);
9324         }
9325         mlx5_cache_list_destroy(&tbl_data->matchers);
9326         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
9327 }
9328
9329 /**
9330  * Release a flow table.
9331  *
9332  * @param[in] sh
9333  *   Pointer to device shared structure.
9334  * @param[in] tbl
9335  *   Table resource to be released.
9336  *
9337  * @return
9338  *   Returns 0 if table was released, else return 1;
9339  */
9340 static int
9341 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
9342                              struct mlx5_flow_tbl_resource *tbl)
9343 {
9344         struct mlx5_flow_tbl_data_entry *tbl_data =
9345                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9346
9347         if (!tbl)
9348                 return 0;
9349         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
9350 }
9351
9352 int
9353 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
9354                          struct mlx5_cache_entry *entry, void *cb_ctx)
9355 {
9356         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9357         struct mlx5_flow_dv_matcher *ref = ctx->data;
9358         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
9359                                                         entry);
9360
9361         return cur->crc != ref->crc ||
9362                cur->priority != ref->priority ||
9363                memcmp((const void *)cur->mask.buf,
9364                       (const void *)ref->mask.buf, ref->mask.size);
9365 }
9366
9367 struct mlx5_cache_entry *
9368 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
9369                           struct mlx5_cache_entry *entry __rte_unused,
9370                           void *cb_ctx)
9371 {
9372         struct mlx5_dev_ctx_shared *sh = list->ctx;
9373         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9374         struct mlx5_flow_dv_matcher *ref = ctx->data;
9375         struct mlx5_flow_dv_matcher *cache;
9376         struct mlx5dv_flow_matcher_attr dv_attr = {
9377                 .type = IBV_FLOW_ATTR_NORMAL,
9378                 .match_mask = (void *)&ref->mask,
9379         };
9380         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
9381                                                             typeof(*tbl), tbl);
9382         int ret;
9383
9384         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
9385         if (!cache) {
9386                 rte_flow_error_set(ctx->error, ENOMEM,
9387                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9388                                    "cannot create matcher");
9389                 return NULL;
9390         }
9391         *cache = *ref;
9392         dv_attr.match_criteria_enable =
9393                 flow_dv_matcher_enable(cache->mask.buf);
9394         dv_attr.priority = ref->priority;
9395         if (tbl->is_egress)
9396                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
9397         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
9398                                                &cache->matcher_object);
9399         if (ret) {
9400                 mlx5_free(cache);
9401                 rte_flow_error_set(ctx->error, ENOMEM,
9402                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9403                                    "cannot create matcher");
9404                 return NULL;
9405         }
9406         return &cache->entry;
9407 }
9408
9409 /**
9410  * Register the flow matcher.
9411  *
9412  * @param[in, out] dev
9413  *   Pointer to rte_eth_dev structure.
9414  * @param[in, out] matcher
9415  *   Pointer to flow matcher.
9416  * @param[in, out] key
9417  *   Pointer to flow table key.
9418  * @parm[in, out] dev_flow
9419  *   Pointer to the dev_flow.
9420  * @param[out] error
9421  *   pointer to error structure.
9422  *
9423  * @return
9424  *   0 on success otherwise -errno and errno is set.
9425  */
9426 static int
9427 flow_dv_matcher_register(struct rte_eth_dev *dev,
9428                          struct mlx5_flow_dv_matcher *ref,
9429                          union mlx5_flow_tbl_key *key,
9430                          struct mlx5_flow *dev_flow,
9431                          const struct mlx5_flow_tunnel *tunnel,
9432                          uint32_t group_id,
9433                          struct rte_flow_error *error)
9434 {
9435         struct mlx5_cache_entry *entry;
9436         struct mlx5_flow_dv_matcher *cache;
9437         struct mlx5_flow_tbl_resource *tbl;
9438         struct mlx5_flow_tbl_data_entry *tbl_data;
9439         struct mlx5_flow_cb_ctx ctx = {
9440                 .error = error,
9441                 .data = ref,
9442         };
9443
9444         /**
9445          * tunnel offload API requires this registration for cases when
9446          * tunnel match rule was inserted before tunnel set rule.
9447          */
9448         tbl = flow_dv_tbl_resource_get(dev, key->table_id,
9449                                        key->direction, key->domain,
9450                                        dev_flow->external, tunnel,
9451                                        group_id, 0, error);
9452         if (!tbl)
9453                 return -rte_errno;      /* No need to refill the error info */
9454         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9455         ref->tbl = tbl;
9456         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
9457         if (!entry) {
9458                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9459                 return rte_flow_error_set(error, ENOMEM,
9460                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9461                                           "cannot allocate ref memory");
9462         }
9463         cache = container_of(entry, typeof(*cache), entry);
9464         dev_flow->handle->dvh.matcher = cache;
9465         return 0;
9466 }
9467
9468 struct mlx5_hlist_entry *
9469 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
9470 {
9471         struct mlx5_dev_ctx_shared *sh = list->ctx;
9472         struct rte_flow_error *error = ctx;
9473         struct mlx5_flow_dv_tag_resource *entry;
9474         uint32_t idx = 0;
9475         int ret;
9476
9477         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
9478         if (!entry) {
9479                 rte_flow_error_set(error, ENOMEM,
9480                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9481                                    "cannot allocate resource memory");
9482                 return NULL;
9483         }
9484         entry->idx = idx;
9485         entry->tag_id = key;
9486         ret = mlx5_flow_os_create_flow_action_tag(key,
9487                                                   &entry->action);
9488         if (ret) {
9489                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
9490                 rte_flow_error_set(error, ENOMEM,
9491                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9492                                    NULL, "cannot create action");
9493                 return NULL;
9494         }
9495         return &entry->entry;
9496 }
9497
9498 int
9499 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
9500                      struct mlx5_hlist_entry *entry, uint64_t key,
9501                      void *cb_ctx __rte_unused)
9502 {
9503         struct mlx5_flow_dv_tag_resource *tag =
9504                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9505
9506         return key != tag->tag_id;
9507 }
9508
9509 /**
9510  * Find existing tag resource or create and register a new one.
9511  *
9512  * @param dev[in, out]
9513  *   Pointer to rte_eth_dev structure.
9514  * @param[in, out] tag_be24
9515  *   Tag value in big endian then R-shift 8.
9516  * @parm[in, out] dev_flow
9517  *   Pointer to the dev_flow.
9518  * @param[out] error
9519  *   pointer to error structure.
9520  *
9521  * @return
9522  *   0 on success otherwise -errno and errno is set.
9523  */
9524 static int
9525 flow_dv_tag_resource_register
9526                         (struct rte_eth_dev *dev,
9527                          uint32_t tag_be24,
9528                          struct mlx5_flow *dev_flow,
9529                          struct rte_flow_error *error)
9530 {
9531         struct mlx5_priv *priv = dev->data->dev_private;
9532         struct mlx5_flow_dv_tag_resource *cache_resource;
9533         struct mlx5_hlist_entry *entry;
9534
9535         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
9536         if (entry) {
9537                 cache_resource = container_of
9538                         (entry, struct mlx5_flow_dv_tag_resource, entry);
9539                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
9540                 dev_flow->dv.tag_resource = cache_resource;
9541                 return 0;
9542         }
9543         return -rte_errno;
9544 }
9545
9546 void
9547 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
9548                       struct mlx5_hlist_entry *entry)
9549 {
9550         struct mlx5_dev_ctx_shared *sh = list->ctx;
9551         struct mlx5_flow_dv_tag_resource *tag =
9552                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9553
9554         MLX5_ASSERT(tag && sh && tag->action);
9555         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
9556         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
9557         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
9558 }
9559
9560 /**
9561  * Release the tag.
9562  *
9563  * @param dev
9564  *   Pointer to Ethernet device.
9565  * @param tag_idx
9566  *   Tag index.
9567  *
9568  * @return
9569  *   1 while a reference on it exists, 0 when freed.
9570  */
9571 static int
9572 flow_dv_tag_release(struct rte_eth_dev *dev,
9573                     uint32_t tag_idx)
9574 {
9575         struct mlx5_priv *priv = dev->data->dev_private;
9576         struct mlx5_flow_dv_tag_resource *tag;
9577
9578         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
9579         if (!tag)
9580                 return 0;
9581         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
9582                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
9583         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
9584 }
9585
9586 /**
9587  * Translate port ID action to vport.
9588  *
9589  * @param[in] dev
9590  *   Pointer to rte_eth_dev structure.
9591  * @param[in] action
9592  *   Pointer to the port ID action.
9593  * @param[out] dst_port_id
9594  *   The target port ID.
9595  * @param[out] error
9596  *   Pointer to the error structure.
9597  *
9598  * @return
9599  *   0 on success, a negative errno value otherwise and rte_errno is set.
9600  */
9601 static int
9602 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
9603                                  const struct rte_flow_action *action,
9604                                  uint32_t *dst_port_id,
9605                                  struct rte_flow_error *error)
9606 {
9607         uint32_t port;
9608         struct mlx5_priv *priv;
9609         const struct rte_flow_action_port_id *conf =
9610                         (const struct rte_flow_action_port_id *)action->conf;
9611
9612         port = conf->original ? dev->data->port_id : conf->id;
9613         priv = mlx5_port_to_eswitch_info(port, false);
9614         if (!priv)
9615                 return rte_flow_error_set(error, -rte_errno,
9616                                           RTE_FLOW_ERROR_TYPE_ACTION,
9617                                           NULL,
9618                                           "No eswitch info was found for port");
9619 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
9620         /*
9621          * This parameter is transferred to
9622          * mlx5dv_dr_action_create_dest_ib_port().
9623          */
9624         *dst_port_id = priv->dev_port;
9625 #else
9626         /*
9627          * Legacy mode, no LAG configurations is supported.
9628          * This parameter is transferred to
9629          * mlx5dv_dr_action_create_dest_vport().
9630          */
9631         *dst_port_id = priv->vport_id;
9632 #endif
9633         return 0;
9634 }
9635
9636 /**
9637  * Create a counter with aging configuration.
9638  *
9639  * @param[in] dev
9640  *   Pointer to rte_eth_dev structure.
9641  * @param[out] count
9642  *   Pointer to the counter action configuration.
9643  * @param[in] age
9644  *   Pointer to the aging action configuration.
9645  *
9646  * @return
9647  *   Index to flow counter on success, 0 otherwise.
9648  */
9649 static uint32_t
9650 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
9651                                 struct mlx5_flow *dev_flow,
9652                                 const struct rte_flow_action_count *count,
9653                                 const struct rte_flow_action_age *age)
9654 {
9655         uint32_t counter;
9656         struct mlx5_age_param *age_param;
9657
9658         if (count && count->shared)
9659                 counter = flow_dv_counter_get_shared(dev, count->id);
9660         else
9661                 counter = flow_dv_counter_alloc(dev, !!age);
9662         if (!counter || age == NULL)
9663                 return counter;
9664         age_param  = flow_dv_counter_idx_get_age(dev, counter);
9665         age_param->context = age->context ? age->context :
9666                 (void *)(uintptr_t)(dev_flow->flow_idx);
9667         age_param->timeout = age->timeout;
9668         age_param->port_id = dev->data->port_id;
9669         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
9670         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
9671         return counter;
9672 }
9673
9674 /**
9675  * Add Tx queue matcher
9676  *
9677  * @param[in] dev
9678  *   Pointer to the dev struct.
9679  * @param[in, out] matcher
9680  *   Flow matcher.
9681  * @param[in, out] key
9682  *   Flow matcher value.
9683  * @param[in] item
9684  *   Flow pattern to translate.
9685  * @param[in] inner
9686  *   Item is inner pattern.
9687  */
9688 static void
9689 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
9690                                 void *matcher, void *key,
9691                                 const struct rte_flow_item *item)
9692 {
9693         const struct mlx5_rte_flow_item_tx_queue *queue_m;
9694         const struct mlx5_rte_flow_item_tx_queue *queue_v;
9695         void *misc_m =
9696                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9697         void *misc_v =
9698                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9699         struct mlx5_txq_ctrl *txq;
9700         uint32_t queue;
9701
9702
9703         queue_m = (const void *)item->mask;
9704         if (!queue_m)
9705                 return;
9706         queue_v = (const void *)item->spec;
9707         if (!queue_v)
9708                 return;
9709         txq = mlx5_txq_get(dev, queue_v->queue);
9710         if (!txq)
9711                 return;
9712         queue = txq->obj->sq->id;
9713         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
9714         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
9715                  queue & queue_m->queue);
9716         mlx5_txq_release(dev, queue_v->queue);
9717 }
9718
9719 /**
9720  * Set the hash fields according to the @p flow information.
9721  *
9722  * @param[in] dev_flow
9723  *   Pointer to the mlx5_flow.
9724  * @param[in] rss_desc
9725  *   Pointer to the mlx5_flow_rss_desc.
9726  */
9727 static void
9728 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
9729                        struct mlx5_flow_rss_desc *rss_desc)
9730 {
9731         uint64_t items = dev_flow->handle->layers;
9732         int rss_inner = 0;
9733         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
9734
9735         dev_flow->hash_fields = 0;
9736 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
9737         if (rss_desc->level >= 2) {
9738                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
9739                 rss_inner = 1;
9740         }
9741 #endif
9742         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
9743             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
9744                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
9745                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
9746                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
9747                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
9748                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
9749                         else
9750                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
9751                 }
9752         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
9753                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
9754                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
9755                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
9756                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
9757                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
9758                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
9759                         else
9760                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
9761                 }
9762         }
9763         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
9764             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
9765                 if (rss_types & ETH_RSS_UDP) {
9766                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
9767                                 dev_flow->hash_fields |=
9768                                                 IBV_RX_HASH_SRC_PORT_UDP;
9769                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
9770                                 dev_flow->hash_fields |=
9771                                                 IBV_RX_HASH_DST_PORT_UDP;
9772                         else
9773                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
9774                 }
9775         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
9776                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
9777                 if (rss_types & ETH_RSS_TCP) {
9778                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
9779                                 dev_flow->hash_fields |=
9780                                                 IBV_RX_HASH_SRC_PORT_TCP;
9781                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
9782                                 dev_flow->hash_fields |=
9783                                                 IBV_RX_HASH_DST_PORT_TCP;
9784                         else
9785                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
9786                 }
9787         }
9788 }
9789
9790 /**
9791  * Prepare an Rx Hash queue.
9792  *
9793  * @param dev
9794  *   Pointer to Ethernet device.
9795  * @param[in] dev_flow
9796  *   Pointer to the mlx5_flow.
9797  * @param[in] rss_desc
9798  *   Pointer to the mlx5_flow_rss_desc.
9799  * @param[out] hrxq_idx
9800  *   Hash Rx queue index.
9801  *
9802  * @return
9803  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
9804  */
9805 static struct mlx5_hrxq *
9806 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
9807                      struct mlx5_flow *dev_flow,
9808                      struct mlx5_flow_rss_desc *rss_desc,
9809                      uint32_t *hrxq_idx)
9810 {
9811         struct mlx5_priv *priv = dev->data->dev_private;
9812         struct mlx5_flow_handle *dh = dev_flow->handle;
9813         struct mlx5_hrxq *hrxq;
9814
9815         MLX5_ASSERT(rss_desc->queue_num);
9816         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
9817         rss_desc->hash_fields = dev_flow->hash_fields;
9818         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
9819         rss_desc->shared_rss = 0;
9820         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
9821         if (!*hrxq_idx)
9822                 return NULL;
9823         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
9824                               *hrxq_idx);
9825         return hrxq;
9826 }
9827
9828 /**
9829  * Release sample sub action resource.
9830  *
9831  * @param[in, out] dev
9832  *   Pointer to rte_eth_dev structure.
9833  * @param[in] act_res
9834  *   Pointer to sample sub action resource.
9835  */
9836 static void
9837 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
9838                                    struct mlx5_flow_sub_actions_idx *act_res)
9839 {
9840         if (act_res->rix_hrxq) {
9841                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
9842                 act_res->rix_hrxq = 0;
9843         }
9844         if (act_res->rix_encap_decap) {
9845                 flow_dv_encap_decap_resource_release(dev,
9846                                                      act_res->rix_encap_decap);
9847                 act_res->rix_encap_decap = 0;
9848         }
9849         if (act_res->rix_port_id_action) {
9850                 flow_dv_port_id_action_resource_release(dev,
9851                                                 act_res->rix_port_id_action);
9852                 act_res->rix_port_id_action = 0;
9853         }
9854         if (act_res->rix_tag) {
9855                 flow_dv_tag_release(dev, act_res->rix_tag);
9856                 act_res->rix_tag = 0;
9857         }
9858         if (act_res->rix_jump) {
9859                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
9860                 act_res->rix_jump = 0;
9861         }
9862 }
9863
9864 int
9865 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
9866                         struct mlx5_cache_entry *entry, void *cb_ctx)
9867 {
9868         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9869         struct rte_eth_dev *dev = ctx->dev;
9870         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9871         struct mlx5_flow_dv_sample_resource *cache_resource =
9872                         container_of(entry, typeof(*cache_resource), entry);
9873
9874         if (resource->ratio == cache_resource->ratio &&
9875             resource->ft_type == cache_resource->ft_type &&
9876             resource->ft_id == cache_resource->ft_id &&
9877             resource->set_action == cache_resource->set_action &&
9878             !memcmp((void *)&resource->sample_act,
9879                     (void *)&cache_resource->sample_act,
9880                     sizeof(struct mlx5_flow_sub_actions_list))) {
9881                 /*
9882                  * Existing sample action should release the prepared
9883                  * sub-actions reference counter.
9884                  */
9885                 flow_dv_sample_sub_actions_release(dev,
9886                                                 &resource->sample_idx);
9887                 return 0;
9888         }
9889         return 1;
9890 }
9891
9892 struct mlx5_cache_entry *
9893 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
9894                          struct mlx5_cache_entry *entry __rte_unused,
9895                          void *cb_ctx)
9896 {
9897         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9898         struct rte_eth_dev *dev = ctx->dev;
9899         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9900         void **sample_dv_actions = resource->sub_actions;
9901         struct mlx5_flow_dv_sample_resource *cache_resource;
9902         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
9903         struct mlx5_priv *priv = dev->data->dev_private;
9904         struct mlx5_dev_ctx_shared *sh = priv->sh;
9905         struct mlx5_flow_tbl_resource *tbl;
9906         uint32_t idx = 0;
9907         const uint32_t next_ft_step = 1;
9908         uint32_t next_ft_id = resource->ft_id + next_ft_step;
9909         uint8_t is_egress = 0;
9910         uint8_t is_transfer = 0;
9911         struct rte_flow_error *error = ctx->error;
9912
9913         /* Register new sample resource. */
9914         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
9915         if (!cache_resource) {
9916                 rte_flow_error_set(error, ENOMEM,
9917                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9918                                           NULL,
9919                                           "cannot allocate resource memory");
9920                 return NULL;
9921         }
9922         *cache_resource = *resource;
9923         /* Create normal path table level */
9924         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9925                 is_transfer = 1;
9926         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
9927                 is_egress = 1;
9928         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
9929                                         is_egress, is_transfer,
9930                                         true, NULL, 0, 0, error);
9931         if (!tbl) {
9932                 rte_flow_error_set(error, ENOMEM,
9933                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9934                                           NULL,
9935                                           "fail to create normal path table "
9936                                           "for sample");
9937                 goto error;
9938         }
9939         cache_resource->normal_path_tbl = tbl;
9940         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
9941                 if (!sh->default_miss_action) {
9942                         rte_flow_error_set(error, ENOMEM,
9943                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9944                                                 NULL,
9945                                                 "default miss action was not "
9946                                                 "created");
9947                         goto error;
9948                 }
9949                 sample_dv_actions[resource->sample_act.actions_num++] =
9950                                                 sh->default_miss_action;
9951         }
9952         /* Create a DR sample action */
9953         sampler_attr.sample_ratio = cache_resource->ratio;
9954         sampler_attr.default_next_table = tbl->obj;
9955         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
9956         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
9957                                                         &sample_dv_actions[0];
9958         sampler_attr.action = cache_resource->set_action;
9959         if (mlx5_os_flow_dr_create_flow_action_sampler
9960                         (&sampler_attr, &cache_resource->verbs_action)) {
9961                 rte_flow_error_set(error, ENOMEM,
9962                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9963                                         NULL, "cannot create sample action");
9964                 goto error;
9965         }
9966         cache_resource->idx = idx;
9967         cache_resource->dev = dev;
9968         return &cache_resource->entry;
9969 error:
9970         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
9971                 flow_dv_sample_sub_actions_release(dev,
9972                                                    &cache_resource->sample_idx);
9973         if (cache_resource->normal_path_tbl)
9974                 flow_dv_tbl_resource_release(MLX5_SH(dev),
9975                                 cache_resource->normal_path_tbl);
9976         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
9977         return NULL;
9978
9979 }
9980
9981 /**
9982  * Find existing sample resource or create and register a new one.
9983  *
9984  * @param[in, out] dev
9985  *   Pointer to rte_eth_dev structure.
9986  * @param[in] resource
9987  *   Pointer to sample resource.
9988  * @parm[in, out] dev_flow
9989  *   Pointer to the dev_flow.
9990  * @param[out] error
9991  *   pointer to error structure.
9992  *
9993  * @return
9994  *   0 on success otherwise -errno and errno is set.
9995  */
9996 static int
9997 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
9998                          struct mlx5_flow_dv_sample_resource *resource,
9999                          struct mlx5_flow *dev_flow,
10000                          struct rte_flow_error *error)
10001 {
10002         struct mlx5_flow_dv_sample_resource *cache_resource;
10003         struct mlx5_cache_entry *entry;
10004         struct mlx5_priv *priv = dev->data->dev_private;
10005         struct mlx5_flow_cb_ctx ctx = {
10006                 .dev = dev,
10007                 .error = error,
10008                 .data = resource,
10009         };
10010
10011         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10012         if (!entry)
10013                 return -rte_errno;
10014         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10015         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10016         dev_flow->dv.sample_res = cache_resource;
10017         return 0;
10018 }
10019
10020 int
10021 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10022                             struct mlx5_cache_entry *entry, void *cb_ctx)
10023 {
10024         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10025         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10026         struct rte_eth_dev *dev = ctx->dev;
10027         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10028                         container_of(entry, typeof(*cache_resource), entry);
10029         uint32_t idx = 0;
10030
10031         if (resource->num_of_dest == cache_resource->num_of_dest &&
10032             resource->ft_type == cache_resource->ft_type &&
10033             !memcmp((void *)cache_resource->sample_act,
10034                     (void *)resource->sample_act,
10035                    (resource->num_of_dest *
10036                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10037                 /*
10038                  * Existing sample action should release the prepared
10039                  * sub-actions reference counter.
10040                  */
10041                 for (idx = 0; idx < resource->num_of_dest; idx++)
10042                         flow_dv_sample_sub_actions_release(dev,
10043                                         &resource->sample_idx[idx]);
10044                 return 0;
10045         }
10046         return 1;
10047 }
10048
10049 struct mlx5_cache_entry *
10050 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10051                          struct mlx5_cache_entry *entry __rte_unused,
10052                          void *cb_ctx)
10053 {
10054         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10055         struct rte_eth_dev *dev = ctx->dev;
10056         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10057         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10058         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10059         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10060         struct mlx5_priv *priv = dev->data->dev_private;
10061         struct mlx5_dev_ctx_shared *sh = priv->sh;
10062         struct mlx5_flow_sub_actions_list *sample_act;
10063         struct mlx5dv_dr_domain *domain;
10064         uint32_t idx = 0, res_idx = 0;
10065         struct rte_flow_error *error = ctx->error;
10066         uint64_t action_flags;
10067         int ret;
10068
10069         /* Register new destination array resource. */
10070         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10071                                             &res_idx);
10072         if (!cache_resource) {
10073                 rte_flow_error_set(error, ENOMEM,
10074                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10075                                           NULL,
10076                                           "cannot allocate resource memory");
10077                 return NULL;
10078         }
10079         *cache_resource = *resource;
10080         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10081                 domain = sh->fdb_domain;
10082         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10083                 domain = sh->rx_domain;
10084         else
10085                 domain = sh->tx_domain;
10086         for (idx = 0; idx < resource->num_of_dest; idx++) {
10087                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10088                                  mlx5_malloc(MLX5_MEM_ZERO,
10089                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10090                                  0, SOCKET_ID_ANY);
10091                 if (!dest_attr[idx]) {
10092                         rte_flow_error_set(error, ENOMEM,
10093                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10094                                            NULL,
10095                                            "cannot allocate resource memory");
10096                         goto error;
10097                 }
10098                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10099                 sample_act = &resource->sample_act[idx];
10100                 action_flags = sample_act->action_flags;
10101                 switch (action_flags) {
10102                 case MLX5_FLOW_ACTION_QUEUE:
10103                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10104                         break;
10105                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10106                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10107                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10108                         dest_attr[idx]->dest_reformat->reformat =
10109                                         sample_act->dr_encap_action;
10110                         dest_attr[idx]->dest_reformat->dest =
10111                                         sample_act->dr_port_id_action;
10112                         break;
10113                 case MLX5_FLOW_ACTION_PORT_ID:
10114                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10115                         break;
10116                 case MLX5_FLOW_ACTION_JUMP:
10117                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10118                         break;
10119                 default:
10120                         rte_flow_error_set(error, EINVAL,
10121                                            RTE_FLOW_ERROR_TYPE_ACTION,
10122                                            NULL,
10123                                            "unsupported actions type");
10124                         goto error;
10125                 }
10126         }
10127         /* create a dest array actioin */
10128         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10129                                                 (domain,
10130                                                  cache_resource->num_of_dest,
10131                                                  dest_attr,
10132                                                  &cache_resource->action);
10133         if (ret) {
10134                 rte_flow_error_set(error, ENOMEM,
10135                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10136                                    NULL,
10137                                    "cannot create destination array action");
10138                 goto error;
10139         }
10140         cache_resource->idx = res_idx;
10141         cache_resource->dev = dev;
10142         for (idx = 0; idx < resource->num_of_dest; idx++)
10143                 mlx5_free(dest_attr[idx]);
10144         return &cache_resource->entry;
10145 error:
10146         for (idx = 0; idx < resource->num_of_dest; idx++) {
10147                 struct mlx5_flow_sub_actions_idx *act_res =
10148                                         &cache_resource->sample_idx[idx];
10149                 if (act_res->rix_hrxq &&
10150                     !mlx5_hrxq_release(dev,
10151                                 act_res->rix_hrxq))
10152                         act_res->rix_hrxq = 0;
10153                 if (act_res->rix_encap_decap &&
10154                         !flow_dv_encap_decap_resource_release(dev,
10155                                 act_res->rix_encap_decap))
10156                         act_res->rix_encap_decap = 0;
10157                 if (act_res->rix_port_id_action &&
10158                         !flow_dv_port_id_action_resource_release(dev,
10159                                 act_res->rix_port_id_action))
10160                         act_res->rix_port_id_action = 0;
10161                 if (act_res->rix_jump &&
10162                         !flow_dv_jump_tbl_resource_release(dev,
10163                                 act_res->rix_jump))
10164                         act_res->rix_jump = 0;
10165                 if (dest_attr[idx])
10166                         mlx5_free(dest_attr[idx]);
10167         }
10168
10169         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10170         return NULL;
10171 }
10172
10173 /**
10174  * Find existing destination array resource or create and register a new one.
10175  *
10176  * @param[in, out] dev
10177  *   Pointer to rte_eth_dev structure.
10178  * @param[in] resource
10179  *   Pointer to destination array resource.
10180  * @parm[in, out] dev_flow
10181  *   Pointer to the dev_flow.
10182  * @param[out] error
10183  *   pointer to error structure.
10184  *
10185  * @return
10186  *   0 on success otherwise -errno and errno is set.
10187  */
10188 static int
10189 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10190                          struct mlx5_flow_dv_dest_array_resource *resource,
10191                          struct mlx5_flow *dev_flow,
10192                          struct rte_flow_error *error)
10193 {
10194         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10195         struct mlx5_priv *priv = dev->data->dev_private;
10196         struct mlx5_cache_entry *entry;
10197         struct mlx5_flow_cb_ctx ctx = {
10198                 .dev = dev,
10199                 .error = error,
10200                 .data = resource,
10201         };
10202
10203         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10204         if (!entry)
10205                 return -rte_errno;
10206         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10207         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10208         dev_flow->dv.dest_array_res = cache_resource;
10209         return 0;
10210 }
10211
10212 /**
10213  * Convert Sample action to DV specification.
10214  *
10215  * @param[in] dev
10216  *   Pointer to rte_eth_dev structure.
10217  * @param[in] action
10218  *   Pointer to sample action structure.
10219  * @param[in, out] dev_flow
10220  *   Pointer to the mlx5_flow.
10221  * @param[in] attr
10222  *   Pointer to the flow attributes.
10223  * @param[in, out] num_of_dest
10224  *   Pointer to the num of destination.
10225  * @param[in, out] sample_actions
10226  *   Pointer to sample actions list.
10227  * @param[in, out] res
10228  *   Pointer to sample resource.
10229  * @param[out] error
10230  *   Pointer to the error structure.
10231  *
10232  * @return
10233  *   0 on success, a negative errno value otherwise and rte_errno is set.
10234  */
10235 static int
10236 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10237                                 const struct rte_flow_action_sample *action,
10238                                 struct mlx5_flow *dev_flow,
10239                                 const struct rte_flow_attr *attr,
10240                                 uint32_t *num_of_dest,
10241                                 void **sample_actions,
10242                                 struct mlx5_flow_dv_sample_resource *res,
10243                                 struct rte_flow_error *error)
10244 {
10245         struct mlx5_priv *priv = dev->data->dev_private;
10246         const struct rte_flow_action *sub_actions;
10247         struct mlx5_flow_sub_actions_list *sample_act;
10248         struct mlx5_flow_sub_actions_idx *sample_idx;
10249         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10250         struct rte_flow *flow = dev_flow->flow;
10251         struct mlx5_flow_rss_desc *rss_desc;
10252         uint64_t action_flags = 0;
10253
10254         MLX5_ASSERT(wks);
10255         rss_desc = &wks->rss_desc;
10256         sample_act = &res->sample_act;
10257         sample_idx = &res->sample_idx;
10258         res->ratio = action->ratio;
10259         sub_actions = action->actions;
10260         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10261                 int type = sub_actions->type;
10262                 uint32_t pre_rix = 0;
10263                 void *pre_r;
10264                 switch (type) {
10265                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10266                 {
10267                         const struct rte_flow_action_queue *queue;
10268                         struct mlx5_hrxq *hrxq;
10269                         uint32_t hrxq_idx;
10270
10271                         queue = sub_actions->conf;
10272                         rss_desc->queue_num = 1;
10273                         rss_desc->queue[0] = queue->index;
10274                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10275                                                     rss_desc, &hrxq_idx);
10276                         if (!hrxq)
10277                                 return rte_flow_error_set
10278                                         (error, rte_errno,
10279                                          RTE_FLOW_ERROR_TYPE_ACTION,
10280                                          NULL,
10281                                          "cannot create fate queue");
10282                         sample_act->dr_queue_action = hrxq->action;
10283                         sample_idx->rix_hrxq = hrxq_idx;
10284                         sample_actions[sample_act->actions_num++] =
10285                                                 hrxq->action;
10286                         (*num_of_dest)++;
10287                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10288                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10289                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10290                         dev_flow->handle->fate_action =
10291                                         MLX5_FLOW_FATE_QUEUE;
10292                         break;
10293                 }
10294                 case RTE_FLOW_ACTION_TYPE_RSS:
10295                 {
10296                         struct mlx5_hrxq *hrxq;
10297                         uint32_t hrxq_idx;
10298                         const struct rte_flow_action_rss *rss;
10299                         const uint8_t *rss_key;
10300
10301                         rss = sub_actions->conf;
10302                         memcpy(rss_desc->queue, rss->queue,
10303                                rss->queue_num * sizeof(uint16_t));
10304                         rss_desc->queue_num = rss->queue_num;
10305                         /* NULL RSS key indicates default RSS key. */
10306                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10307                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10308                         /*
10309                          * rss->level and rss.types should be set in advance
10310                          * when expanding items for RSS.
10311                          */
10312                         flow_dv_hashfields_set(dev_flow, rss_desc);
10313                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10314                                                     rss_desc, &hrxq_idx);
10315                         if (!hrxq)
10316                                 return rte_flow_error_set
10317                                         (error, rte_errno,
10318                                          RTE_FLOW_ERROR_TYPE_ACTION,
10319                                          NULL,
10320                                          "cannot create fate queue");
10321                         sample_act->dr_queue_action = hrxq->action;
10322                         sample_idx->rix_hrxq = hrxq_idx;
10323                         sample_actions[sample_act->actions_num++] =
10324                                                 hrxq->action;
10325                         (*num_of_dest)++;
10326                         action_flags |= MLX5_FLOW_ACTION_RSS;
10327                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10328                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10329                         dev_flow->handle->fate_action =
10330                                         MLX5_FLOW_FATE_QUEUE;
10331                         break;
10332                 }
10333                 case RTE_FLOW_ACTION_TYPE_MARK:
10334                 {
10335                         uint32_t tag_be = mlx5_flow_mark_set
10336                                 (((const struct rte_flow_action_mark *)
10337                                 (sub_actions->conf))->id);
10338
10339                         dev_flow->handle->mark = 1;
10340                         pre_rix = dev_flow->handle->dvh.rix_tag;
10341                         /* Save the mark resource before sample */
10342                         pre_r = dev_flow->dv.tag_resource;
10343                         if (flow_dv_tag_resource_register(dev, tag_be,
10344                                                   dev_flow, error))
10345                                 return -rte_errno;
10346                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10347                         sample_act->dr_tag_action =
10348                                 dev_flow->dv.tag_resource->action;
10349                         sample_idx->rix_tag =
10350                                 dev_flow->handle->dvh.rix_tag;
10351                         sample_actions[sample_act->actions_num++] =
10352                                                 sample_act->dr_tag_action;
10353                         /* Recover the mark resource after sample */
10354                         dev_flow->dv.tag_resource = pre_r;
10355                         dev_flow->handle->dvh.rix_tag = pre_rix;
10356                         action_flags |= MLX5_FLOW_ACTION_MARK;
10357                         break;
10358                 }
10359                 case RTE_FLOW_ACTION_TYPE_COUNT:
10360                 {
10361                         if (!flow->counter) {
10362                                 flow->counter =
10363                                         flow_dv_translate_create_counter(dev,
10364                                                 dev_flow, sub_actions->conf,
10365                                                 0);
10366                                 if (!flow->counter)
10367                                         return rte_flow_error_set
10368                                                 (error, rte_errno,
10369                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10370                                                 NULL,
10371                                                 "cannot create counter"
10372                                                 " object.");
10373                         }
10374                         sample_act->dr_cnt_action =
10375                                   (flow_dv_counter_get_by_idx(dev,
10376                                   flow->counter, NULL))->action;
10377                         sample_actions[sample_act->actions_num++] =
10378                                                 sample_act->dr_cnt_action;
10379                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10380                         break;
10381                 }
10382                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10383                 {
10384                         struct mlx5_flow_dv_port_id_action_resource
10385                                         port_id_resource;
10386                         uint32_t port_id = 0;
10387
10388                         memset(&port_id_resource, 0, sizeof(port_id_resource));
10389                         /* Save the port id resource before sample */
10390                         pre_rix = dev_flow->handle->rix_port_id_action;
10391                         pre_r = dev_flow->dv.port_id_action;
10392                         if (flow_dv_translate_action_port_id(dev, sub_actions,
10393                                                              &port_id, error))
10394                                 return -rte_errno;
10395                         port_id_resource.port_id = port_id;
10396                         if (flow_dv_port_id_action_resource_register
10397                             (dev, &port_id_resource, dev_flow, error))
10398                                 return -rte_errno;
10399                         sample_act->dr_port_id_action =
10400                                 dev_flow->dv.port_id_action->action;
10401                         sample_idx->rix_port_id_action =
10402                                 dev_flow->handle->rix_port_id_action;
10403                         sample_actions[sample_act->actions_num++] =
10404                                                 sample_act->dr_port_id_action;
10405                         /* Recover the port id resource after sample */
10406                         dev_flow->dv.port_id_action = pre_r;
10407                         dev_flow->handle->rix_port_id_action = pre_rix;
10408                         (*num_of_dest)++;
10409                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10410                         break;
10411                 }
10412                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10413                         /* Save the encap resource before sample */
10414                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
10415                         pre_r = dev_flow->dv.encap_decap;
10416                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
10417                                                            dev_flow,
10418                                                            attr->transfer,
10419                                                            error))
10420                                 return -rte_errno;
10421                         sample_act->dr_encap_action =
10422                                 dev_flow->dv.encap_decap->action;
10423                         sample_idx->rix_encap_decap =
10424                                 dev_flow->handle->dvh.rix_encap_decap;
10425                         sample_actions[sample_act->actions_num++] =
10426                                                 sample_act->dr_encap_action;
10427                         /* Recover the encap resource after sample */
10428                         dev_flow->dv.encap_decap = pre_r;
10429                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
10430                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10431                         break;
10432                 default:
10433                         return rte_flow_error_set(error, EINVAL,
10434                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10435                                 NULL,
10436                                 "Not support for sampler action");
10437                 }
10438         }
10439         sample_act->action_flags = action_flags;
10440         res->ft_id = dev_flow->dv.group;
10441         if (attr->transfer) {
10442                 union {
10443                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
10444                         uint64_t set_action;
10445                 } action_ctx = { .set_action = 0 };
10446
10447                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10448                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
10449                          MLX5_MODIFICATION_TYPE_SET);
10450                 MLX5_SET(set_action_in, action_ctx.action_in, field,
10451                          MLX5_MODI_META_REG_C_0);
10452                 MLX5_SET(set_action_in, action_ctx.action_in, data,
10453                          priv->vport_meta_tag);
10454                 res->set_action = action_ctx.set_action;
10455         } else if (attr->ingress) {
10456                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10457         } else {
10458                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
10459         }
10460         return 0;
10461 }
10462
10463 /**
10464  * Convert Sample action to DV specification.
10465  *
10466  * @param[in] dev
10467  *   Pointer to rte_eth_dev structure.
10468  * @param[in, out] dev_flow
10469  *   Pointer to the mlx5_flow.
10470  * @param[in] num_of_dest
10471  *   The num of destination.
10472  * @param[in, out] res
10473  *   Pointer to sample resource.
10474  * @param[in, out] mdest_res
10475  *   Pointer to destination array resource.
10476  * @param[in] sample_actions
10477  *   Pointer to sample path actions list.
10478  * @param[in] action_flags
10479  *   Holds the actions detected until now.
10480  * @param[out] error
10481  *   Pointer to the error structure.
10482  *
10483  * @return
10484  *   0 on success, a negative errno value otherwise and rte_errno is set.
10485  */
10486 static int
10487 flow_dv_create_action_sample(struct rte_eth_dev *dev,
10488                              struct mlx5_flow *dev_flow,
10489                              uint32_t num_of_dest,
10490                              struct mlx5_flow_dv_sample_resource *res,
10491                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
10492                              void **sample_actions,
10493                              uint64_t action_flags,
10494                              struct rte_flow_error *error)
10495 {
10496         /* update normal path action resource into last index of array */
10497         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
10498         struct mlx5_flow_sub_actions_list *sample_act =
10499                                         &mdest_res->sample_act[dest_index];
10500         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10501         struct mlx5_flow_rss_desc *rss_desc;
10502         uint32_t normal_idx = 0;
10503         struct mlx5_hrxq *hrxq;
10504         uint32_t hrxq_idx;
10505
10506         MLX5_ASSERT(wks);
10507         rss_desc = &wks->rss_desc;
10508         if (num_of_dest > 1) {
10509                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
10510                         /* Handle QP action for mirroring */
10511                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10512                                                     rss_desc, &hrxq_idx);
10513                         if (!hrxq)
10514                                 return rte_flow_error_set
10515                                      (error, rte_errno,
10516                                       RTE_FLOW_ERROR_TYPE_ACTION,
10517                                       NULL,
10518                                       "cannot create rx queue");
10519                         normal_idx++;
10520                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
10521                         sample_act->dr_queue_action = hrxq->action;
10522                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10523                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10524                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10525                 }
10526                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
10527                         normal_idx++;
10528                         mdest_res->sample_idx[dest_index].rix_encap_decap =
10529                                 dev_flow->handle->dvh.rix_encap_decap;
10530                         sample_act->dr_encap_action =
10531                                 dev_flow->dv.encap_decap->action;
10532                 }
10533                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
10534                         normal_idx++;
10535                         mdest_res->sample_idx[dest_index].rix_port_id_action =
10536                                 dev_flow->handle->rix_port_id_action;
10537                         sample_act->dr_port_id_action =
10538                                 dev_flow->dv.port_id_action->action;
10539                 }
10540                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
10541                         normal_idx++;
10542                         mdest_res->sample_idx[dest_index].rix_jump =
10543                                 dev_flow->handle->rix_jump;
10544                         sample_act->dr_jump_action =
10545                                 dev_flow->dv.jump->action;
10546                         dev_flow->handle->rix_jump = 0;
10547                 }
10548                 sample_act->actions_num = normal_idx;
10549                 /* update sample action resource into first index of array */
10550                 mdest_res->ft_type = res->ft_type;
10551                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
10552                                 sizeof(struct mlx5_flow_sub_actions_idx));
10553                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
10554                                 sizeof(struct mlx5_flow_sub_actions_list));
10555                 mdest_res->num_of_dest = num_of_dest;
10556                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
10557                                                          dev_flow, error))
10558                         return rte_flow_error_set(error, EINVAL,
10559                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10560                                                   NULL, "can't create sample "
10561                                                   "action");
10562         } else {
10563                 res->sub_actions = sample_actions;
10564                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
10565                         return rte_flow_error_set(error, EINVAL,
10566                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10567                                                   NULL,
10568                                                   "can't create sample action");
10569         }
10570         return 0;
10571 }
10572
10573 /**
10574  * Remove an ASO age action from age actions list.
10575  *
10576  * @param[in] dev
10577  *   Pointer to the Ethernet device structure.
10578  * @param[in] age
10579  *   Pointer to the aso age action handler.
10580  */
10581 static void
10582 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
10583                                 struct mlx5_aso_age_action *age)
10584 {
10585         struct mlx5_age_info *age_info;
10586         struct mlx5_age_param *age_param = &age->age_params;
10587         struct mlx5_priv *priv = dev->data->dev_private;
10588         uint16_t expected = AGE_CANDIDATE;
10589
10590         age_info = GET_PORT_AGE_INFO(priv);
10591         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
10592                                          AGE_FREE, false, __ATOMIC_RELAXED,
10593                                          __ATOMIC_RELAXED)) {
10594                 /**
10595                  * We need the lock even it is age timeout,
10596                  * since age action may still in process.
10597                  */
10598                 rte_spinlock_lock(&age_info->aged_sl);
10599                 LIST_REMOVE(age, next);
10600                 rte_spinlock_unlock(&age_info->aged_sl);
10601                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
10602         }
10603 }
10604
10605 /**
10606  * Release an ASO age action.
10607  *
10608  * @param[in] dev
10609  *   Pointer to the Ethernet device structure.
10610  * @param[in] age_idx
10611  *   Index of ASO age action to release.
10612  * @param[in] flow
10613  *   True if the release operation is during flow destroy operation.
10614  *   False if the release operation is during action destroy operation.
10615  *
10616  * @return
10617  *   0 when age action was removed, otherwise the number of references.
10618  */
10619 static int
10620 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
10621 {
10622         struct mlx5_priv *priv = dev->data->dev_private;
10623         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10624         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
10625         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
10626
10627         if (!ret) {
10628                 flow_dv_aso_age_remove_from_age(dev, age);
10629                 rte_spinlock_lock(&mng->free_sl);
10630                 LIST_INSERT_HEAD(&mng->free, age, next);
10631                 rte_spinlock_unlock(&mng->free_sl);
10632         }
10633         return ret;
10634 }
10635
10636 /**
10637  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
10638  *
10639  * @param[in] dev
10640  *   Pointer to the Ethernet device structure.
10641  *
10642  * @return
10643  *   0 on success, otherwise negative errno value and rte_errno is set.
10644  */
10645 static int
10646 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
10647 {
10648         struct mlx5_priv *priv = dev->data->dev_private;
10649         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10650         void *old_pools = mng->pools;
10651         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
10652         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
10653         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
10654
10655         if (!pools) {
10656                 rte_errno = ENOMEM;
10657                 return -ENOMEM;
10658         }
10659         if (old_pools) {
10660                 memcpy(pools, old_pools,
10661                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
10662                 mlx5_free(old_pools);
10663         } else {
10664                 /* First ASO flow hit allocation - starting ASO data-path. */
10665                 int ret = mlx5_aso_queue_start(priv->sh);
10666
10667                 if (ret) {
10668                         mlx5_free(pools);
10669                         return ret;
10670                 }
10671         }
10672         mng->n = resize;
10673         mng->pools = pools;
10674         return 0;
10675 }
10676
10677 /**
10678  * Create and initialize a new ASO aging pool.
10679  *
10680  * @param[in] dev
10681  *   Pointer to the Ethernet device structure.
10682  * @param[out] age_free
10683  *   Where to put the pointer of a new age action.
10684  *
10685  * @return
10686  *   The age actions pool pointer and @p age_free is set on success,
10687  *   NULL otherwise and rte_errno is set.
10688  */
10689 static struct mlx5_aso_age_pool *
10690 flow_dv_age_pool_create(struct rte_eth_dev *dev,
10691                         struct mlx5_aso_age_action **age_free)
10692 {
10693         struct mlx5_priv *priv = dev->data->dev_private;
10694         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10695         struct mlx5_aso_age_pool *pool = NULL;
10696         struct mlx5_devx_obj *obj = NULL;
10697         uint32_t i;
10698
10699         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
10700                                                     priv->sh->pdn);
10701         if (!obj) {
10702                 rte_errno = ENODATA;
10703                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
10704                 return NULL;
10705         }
10706         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10707         if (!pool) {
10708                 claim_zero(mlx5_devx_cmd_destroy(obj));
10709                 rte_errno = ENOMEM;
10710                 return NULL;
10711         }
10712         pool->flow_hit_aso_obj = obj;
10713         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
10714         rte_spinlock_lock(&mng->resize_sl);
10715         pool->index = mng->next;
10716         /* Resize pools array if there is no room for the new pool in it. */
10717         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
10718                 claim_zero(mlx5_devx_cmd_destroy(obj));
10719                 mlx5_free(pool);
10720                 rte_spinlock_unlock(&mng->resize_sl);
10721                 return NULL;
10722         }
10723         mng->pools[pool->index] = pool;
10724         mng->next++;
10725         rte_spinlock_unlock(&mng->resize_sl);
10726         /* Assign the first action in the new pool, the rest go to free list. */
10727         *age_free = &pool->actions[0];
10728         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
10729                 pool->actions[i].offset = i;
10730                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
10731         }
10732         return pool;
10733 }
10734
10735 /**
10736  * Allocate a ASO aging bit.
10737  *
10738  * @param[in] dev
10739  *   Pointer to the Ethernet device structure.
10740  * @param[out] error
10741  *   Pointer to the error structure.
10742  *
10743  * @return
10744  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
10745  */
10746 static uint32_t
10747 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
10748 {
10749         struct mlx5_priv *priv = dev->data->dev_private;
10750         const struct mlx5_aso_age_pool *pool;
10751         struct mlx5_aso_age_action *age_free = NULL;
10752         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10753
10754         MLX5_ASSERT(mng);
10755         /* Try to get the next free age action bit. */
10756         rte_spinlock_lock(&mng->free_sl);
10757         age_free = LIST_FIRST(&mng->free);
10758         if (age_free) {
10759                 LIST_REMOVE(age_free, next);
10760         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
10761                 rte_spinlock_unlock(&mng->free_sl);
10762                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
10763                                    NULL, "failed to create ASO age pool");
10764                 return 0; /* 0 is an error. */
10765         }
10766         rte_spinlock_unlock(&mng->free_sl);
10767         pool = container_of
10768           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
10769                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
10770                                                                        actions);
10771         if (!age_free->dr_action) {
10772                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
10773                                                  error);
10774
10775                 if (reg_c < 0) {
10776                         rte_flow_error_set(error, rte_errno,
10777                                            RTE_FLOW_ERROR_TYPE_ACTION,
10778                                            NULL, "failed to get reg_c "
10779                                            "for ASO flow hit");
10780                         return 0; /* 0 is an error. */
10781                 }
10782 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
10783                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
10784                                 (priv->sh->rx_domain,
10785                                  pool->flow_hit_aso_obj->obj, age_free->offset,
10786                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
10787                                  (reg_c - REG_C_0));
10788 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
10789                 if (!age_free->dr_action) {
10790                         rte_errno = errno;
10791                         rte_spinlock_lock(&mng->free_sl);
10792                         LIST_INSERT_HEAD(&mng->free, age_free, next);
10793                         rte_spinlock_unlock(&mng->free_sl);
10794                         rte_flow_error_set(error, rte_errno,
10795                                            RTE_FLOW_ERROR_TYPE_ACTION,
10796                                            NULL, "failed to create ASO "
10797                                            "flow hit action");
10798                         return 0; /* 0 is an error. */
10799                 }
10800         }
10801         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
10802         return pool->index | ((age_free->offset + 1) << 16);
10803 }
10804
10805 /**
10806  * Create a age action using ASO mechanism.
10807  *
10808  * @param[in] dev
10809  *   Pointer to rte_eth_dev structure.
10810  * @param[in] age
10811  *   Pointer to the aging action configuration.
10812  * @param[out] error
10813  *   Pointer to the error structure.
10814  *
10815  * @return
10816  *   Index to flow counter on success, 0 otherwise.
10817  */
10818 static uint32_t
10819 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
10820                                  const struct rte_flow_action_age *age,
10821                                  struct rte_flow_error *error)
10822 {
10823         uint32_t age_idx = 0;
10824         struct mlx5_aso_age_action *aso_age;
10825
10826         age_idx = flow_dv_aso_age_alloc(dev, error);
10827         if (!age_idx)
10828                 return 0;
10829         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
10830         aso_age->age_params.context = age->context;
10831         aso_age->age_params.timeout = age->timeout;
10832         aso_age->age_params.port_id = dev->data->port_id;
10833         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
10834                          __ATOMIC_RELAXED);
10835         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
10836                          __ATOMIC_RELAXED);
10837         return age_idx;
10838 }
10839
10840 /**
10841  * Fill the flow with DV spec, lock free
10842  * (mutex should be acquired by caller).
10843  *
10844  * @param[in] dev
10845  *   Pointer to rte_eth_dev structure.
10846  * @param[in, out] dev_flow
10847  *   Pointer to the sub flow.
10848  * @param[in] attr
10849  *   Pointer to the flow attributes.
10850  * @param[in] items
10851  *   Pointer to the list of items.
10852  * @param[in] actions
10853  *   Pointer to the list of actions.
10854  * @param[out] error
10855  *   Pointer to the error structure.
10856  *
10857  * @return
10858  *   0 on success, a negative errno value otherwise and rte_errno is set.
10859  */
10860 static int
10861 flow_dv_translate(struct rte_eth_dev *dev,
10862                   struct mlx5_flow *dev_flow,
10863                   const struct rte_flow_attr *attr,
10864                   const struct rte_flow_item items[],
10865                   const struct rte_flow_action actions[],
10866                   struct rte_flow_error *error)
10867 {
10868         struct mlx5_priv *priv = dev->data->dev_private;
10869         struct mlx5_dev_config *dev_conf = &priv->config;
10870         struct rte_flow *flow = dev_flow->flow;
10871         struct mlx5_flow_handle *handle = dev_flow->handle;
10872         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10873         struct mlx5_flow_rss_desc *rss_desc;
10874         uint64_t item_flags = 0;
10875         uint64_t last_item = 0;
10876         uint64_t action_flags = 0;
10877         struct mlx5_flow_dv_matcher matcher = {
10878                 .mask = {
10879                         .size = sizeof(matcher.mask.buf) -
10880                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
10881                 },
10882         };
10883         int actions_n = 0;
10884         bool actions_end = false;
10885         union {
10886                 struct mlx5_flow_dv_modify_hdr_resource res;
10887                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
10888                             sizeof(struct mlx5_modification_cmd) *
10889                             (MLX5_MAX_MODIFY_NUM + 1)];
10890         } mhdr_dummy;
10891         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
10892         const struct rte_flow_action_count *count = NULL;
10893         const struct rte_flow_action_age *age = NULL;
10894         union flow_dv_attr flow_attr = { .attr = 0 };
10895         uint32_t tag_be;
10896         union mlx5_flow_tbl_key tbl_key;
10897         uint32_t modify_action_position = UINT32_MAX;
10898         void *match_mask = matcher.mask.buf;
10899         void *match_value = dev_flow->dv.value.buf;
10900         uint8_t next_protocol = 0xff;
10901         struct rte_vlan_hdr vlan = { 0 };
10902         struct mlx5_flow_dv_dest_array_resource mdest_res;
10903         struct mlx5_flow_dv_sample_resource sample_res;
10904         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10905         const struct rte_flow_action_sample *sample = NULL;
10906         struct mlx5_flow_sub_actions_list *sample_act;
10907         uint32_t sample_act_pos = UINT32_MAX;
10908         uint32_t num_of_dest = 0;
10909         int tmp_actions_n = 0;
10910         uint32_t table;
10911         int ret = 0;
10912         const struct mlx5_flow_tunnel *tunnel;
10913         struct flow_grp_info grp_info = {
10914                 .external = !!dev_flow->external,
10915                 .transfer = !!attr->transfer,
10916                 .fdb_def_rule = !!priv->fdb_def_rule,
10917                 .skip_scale = dev_flow->skip_scale &
10918                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
10919         };
10920
10921         if (!wks)
10922                 return rte_flow_error_set(error, ENOMEM,
10923                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10924                                           NULL,
10925                                           "failed to push flow workspace");
10926         rss_desc = &wks->rss_desc;
10927         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
10928         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
10929         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10930                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10931         /* update normal path action resource into last index of array */
10932         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
10933         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
10934                  flow_items_to_tunnel(items) :
10935                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
10936                  flow_actions_to_tunnel(actions) :
10937                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
10938         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10939                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10940         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
10941                                 (dev, tunnel, attr, items, actions);
10942         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
10943                                        &grp_info, error);
10944         if (ret)
10945                 return ret;
10946         dev_flow->dv.group = table;
10947         if (attr->transfer)
10948                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10949         /* number of actions must be set to 0 in case of dirty stack. */
10950         mhdr_res->actions_num = 0;
10951         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
10952                 /*
10953                  * do not add decap action if match rule drops packet
10954                  * HW rejects rules with decap & drop
10955                  *
10956                  * if tunnel match rule was inserted before matching tunnel set
10957                  * rule flow table used in the match rule must be registered.
10958                  * current implementation handles that in the
10959                  * flow_dv_match_register() at the function end.
10960                  */
10961                 bool add_decap = true;
10962                 const struct rte_flow_action *ptr = actions;
10963
10964                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
10965                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
10966                                 add_decap = false;
10967                                 break;
10968                         }
10969                 }
10970                 if (add_decap) {
10971                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10972                                                            attr->transfer,
10973                                                            error))
10974                                 return -rte_errno;
10975                         dev_flow->dv.actions[actions_n++] =
10976                                         dev_flow->dv.encap_decap->action;
10977                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10978                 }
10979         }
10980         for (; !actions_end ; actions++) {
10981                 const struct rte_flow_action_queue *queue;
10982                 const struct rte_flow_action_rss *rss;
10983                 const struct rte_flow_action *action = actions;
10984                 const uint8_t *rss_key;
10985                 const struct rte_flow_action_meter *mtr;
10986                 struct mlx5_flow_tbl_resource *tbl;
10987                 struct mlx5_aso_age_action *age_act;
10988                 uint32_t port_id = 0;
10989                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
10990                 int action_type = actions->type;
10991                 const struct rte_flow_action *found_action = NULL;
10992                 struct mlx5_flow_meter *fm = NULL;
10993                 uint32_t jump_group = 0;
10994
10995                 if (!mlx5_flow_os_action_supported(action_type))
10996                         return rte_flow_error_set(error, ENOTSUP,
10997                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10998                                                   actions,
10999                                                   "action not supported");
11000                 switch (action_type) {
11001                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
11002                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
11003                         break;
11004                 case RTE_FLOW_ACTION_TYPE_VOID:
11005                         break;
11006                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11007                         if (flow_dv_translate_action_port_id(dev, action,
11008                                                              &port_id, error))
11009                                 return -rte_errno;
11010                         port_id_resource.port_id = port_id;
11011                         MLX5_ASSERT(!handle->rix_port_id_action);
11012                         if (flow_dv_port_id_action_resource_register
11013                             (dev, &port_id_resource, dev_flow, error))
11014                                 return -rte_errno;
11015                         dev_flow->dv.actions[actions_n++] =
11016                                         dev_flow->dv.port_id_action->action;
11017                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11018                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
11019                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11020                         num_of_dest++;
11021                         break;
11022                 case RTE_FLOW_ACTION_TYPE_FLAG:
11023                         action_flags |= MLX5_FLOW_ACTION_FLAG;
11024                         dev_flow->handle->mark = 1;
11025                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11026                                 struct rte_flow_action_mark mark = {
11027                                         .id = MLX5_FLOW_MARK_DEFAULT,
11028                                 };
11029
11030                                 if (flow_dv_convert_action_mark(dev, &mark,
11031                                                                 mhdr_res,
11032                                                                 error))
11033                                         return -rte_errno;
11034                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11035                                 break;
11036                         }
11037                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
11038                         /*
11039                          * Only one FLAG or MARK is supported per device flow
11040                          * right now. So the pointer to the tag resource must be
11041                          * zero before the register process.
11042                          */
11043                         MLX5_ASSERT(!handle->dvh.rix_tag);
11044                         if (flow_dv_tag_resource_register(dev, tag_be,
11045                                                           dev_flow, error))
11046                                 return -rte_errno;
11047                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11048                         dev_flow->dv.actions[actions_n++] =
11049                                         dev_flow->dv.tag_resource->action;
11050                         break;
11051                 case RTE_FLOW_ACTION_TYPE_MARK:
11052                         action_flags |= MLX5_FLOW_ACTION_MARK;
11053                         dev_flow->handle->mark = 1;
11054                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11055                                 const struct rte_flow_action_mark *mark =
11056                                         (const struct rte_flow_action_mark *)
11057                                                 actions->conf;
11058
11059                                 if (flow_dv_convert_action_mark(dev, mark,
11060                                                                 mhdr_res,
11061                                                                 error))
11062                                         return -rte_errno;
11063                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11064                                 break;
11065                         }
11066                         /* Fall-through */
11067                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
11068                         /* Legacy (non-extensive) MARK action. */
11069                         tag_be = mlx5_flow_mark_set
11070                               (((const struct rte_flow_action_mark *)
11071                                (actions->conf))->id);
11072                         MLX5_ASSERT(!handle->dvh.rix_tag);
11073                         if (flow_dv_tag_resource_register(dev, tag_be,
11074                                                           dev_flow, error))
11075                                 return -rte_errno;
11076                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11077                         dev_flow->dv.actions[actions_n++] =
11078                                         dev_flow->dv.tag_resource->action;
11079                         break;
11080                 case RTE_FLOW_ACTION_TYPE_SET_META:
11081                         if (flow_dv_convert_action_set_meta
11082                                 (dev, mhdr_res, attr,
11083                                  (const struct rte_flow_action_set_meta *)
11084                                   actions->conf, error))
11085                                 return -rte_errno;
11086                         action_flags |= MLX5_FLOW_ACTION_SET_META;
11087                         break;
11088                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
11089                         if (flow_dv_convert_action_set_tag
11090                                 (dev, mhdr_res,
11091                                  (const struct rte_flow_action_set_tag *)
11092                                   actions->conf, error))
11093                                 return -rte_errno;
11094                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11095                         break;
11096                 case RTE_FLOW_ACTION_TYPE_DROP:
11097                         action_flags |= MLX5_FLOW_ACTION_DROP;
11098                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
11099                         break;
11100                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11101                         queue = actions->conf;
11102                         rss_desc->queue_num = 1;
11103                         rss_desc->queue[0] = queue->index;
11104                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11105                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11106                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
11107                         num_of_dest++;
11108                         break;
11109                 case RTE_FLOW_ACTION_TYPE_RSS:
11110                         rss = actions->conf;
11111                         memcpy(rss_desc->queue, rss->queue,
11112                                rss->queue_num * sizeof(uint16_t));
11113                         rss_desc->queue_num = rss->queue_num;
11114                         /* NULL RSS key indicates default RSS key. */
11115                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11116                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11117                         /*
11118                          * rss->level and rss.types should be set in advance
11119                          * when expanding items for RSS.
11120                          */
11121                         action_flags |= MLX5_FLOW_ACTION_RSS;
11122                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
11123                                 MLX5_FLOW_FATE_SHARED_RSS :
11124                                 MLX5_FLOW_FATE_QUEUE;
11125                         break;
11126                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
11127                         flow->age = (uint32_t)(uintptr_t)(action->conf);
11128                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
11129                         __atomic_fetch_add(&age_act->refcnt, 1,
11130                                            __ATOMIC_RELAXED);
11131                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
11132                         action_flags |= MLX5_FLOW_ACTION_AGE;
11133                         break;
11134                 case RTE_FLOW_ACTION_TYPE_AGE:
11135                         if (priv->sh->flow_hit_aso_en && attr->group) {
11136                                 /*
11137                                  * Create one shared age action, to be used
11138                                  * by all sub-flows.
11139                                  */
11140                                 if (!flow->age) {
11141                                         flow->age =
11142                                                 flow_dv_translate_create_aso_age
11143                                                         (dev, action->conf,
11144                                                          error);
11145                                         if (!flow->age)
11146                                                 return rte_flow_error_set
11147                                                 (error, rte_errno,
11148                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11149                                                  NULL,
11150                                                  "can't create ASO age action");
11151                                 }
11152                                 dev_flow->dv.actions[actions_n++] =
11153                                           (flow_aso_age_get_by_idx
11154                                                 (dev, flow->age))->dr_action;
11155                                 action_flags |= MLX5_FLOW_ACTION_AGE;
11156                                 break;
11157                         }
11158                         /* Fall-through */
11159                 case RTE_FLOW_ACTION_TYPE_COUNT:
11160                         if (!dev_conf->devx) {
11161                                 return rte_flow_error_set
11162                                               (error, ENOTSUP,
11163                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11164                                                NULL,
11165                                                "count action not supported");
11166                         }
11167                         /* Save information first, will apply later. */
11168                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
11169                                 count = action->conf;
11170                         else
11171                                 age = action->conf;
11172                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11173                         break;
11174                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
11175                         dev_flow->dv.actions[actions_n++] =
11176                                                 priv->sh->pop_vlan_action;
11177                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
11178                         break;
11179                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
11180                         if (!(action_flags &
11181                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
11182                                 flow_dev_get_vlan_info_from_items(items, &vlan);
11183                         vlan.eth_proto = rte_be_to_cpu_16
11184                              ((((const struct rte_flow_action_of_push_vlan *)
11185                                                    actions->conf)->ethertype));
11186                         found_action = mlx5_flow_find_action
11187                                         (actions + 1,
11188                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
11189                         if (found_action)
11190                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11191                         found_action = mlx5_flow_find_action
11192                                         (actions + 1,
11193                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
11194                         if (found_action)
11195                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11196                         if (flow_dv_create_action_push_vlan
11197                                             (dev, attr, &vlan, dev_flow, error))
11198                                 return -rte_errno;
11199                         dev_flow->dv.actions[actions_n++] =
11200                                         dev_flow->dv.push_vlan_res->action;
11201                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
11202                         break;
11203                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
11204                         /* of_vlan_push action handled this action */
11205                         MLX5_ASSERT(action_flags &
11206                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
11207                         break;
11208                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
11209                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
11210                                 break;
11211                         flow_dev_get_vlan_info_from_items(items, &vlan);
11212                         mlx5_update_vlan_vid_pcp(actions, &vlan);
11213                         /* If no VLAN push - this is a modify header action */
11214                         if (flow_dv_convert_action_modify_vlan_vid
11215                                                 (mhdr_res, actions, error))
11216                                 return -rte_errno;
11217                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
11218                         break;
11219                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11220                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11221                         if (flow_dv_create_action_l2_encap(dev, actions,
11222                                                            dev_flow,
11223                                                            attr->transfer,
11224                                                            error))
11225                                 return -rte_errno;
11226                         dev_flow->dv.actions[actions_n++] =
11227                                         dev_flow->dv.encap_decap->action;
11228                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11229                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11230                                 sample_act->action_flags |=
11231                                                         MLX5_FLOW_ACTION_ENCAP;
11232                         break;
11233                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
11234                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
11235                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11236                                                            attr->transfer,
11237                                                            error))
11238                                 return -rte_errno;
11239                         dev_flow->dv.actions[actions_n++] =
11240                                         dev_flow->dv.encap_decap->action;
11241                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11242                         break;
11243                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11244                         /* Handle encap with preceding decap. */
11245                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
11246                                 if (flow_dv_create_action_raw_encap
11247                                         (dev, actions, dev_flow, attr, error))
11248                                         return -rte_errno;
11249                                 dev_flow->dv.actions[actions_n++] =
11250                                         dev_flow->dv.encap_decap->action;
11251                         } else {
11252                                 /* Handle encap without preceding decap. */
11253                                 if (flow_dv_create_action_l2_encap
11254                                     (dev, actions, dev_flow, attr->transfer,
11255                                      error))
11256                                         return -rte_errno;
11257                                 dev_flow->dv.actions[actions_n++] =
11258                                         dev_flow->dv.encap_decap->action;
11259                         }
11260                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11261                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11262                                 sample_act->action_flags |=
11263                                                         MLX5_FLOW_ACTION_ENCAP;
11264                         break;
11265                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11266                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
11267                                 ;
11268                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11269                                 if (flow_dv_create_action_l2_decap
11270                                     (dev, dev_flow, attr->transfer, error))
11271                                         return -rte_errno;
11272                                 dev_flow->dv.actions[actions_n++] =
11273                                         dev_flow->dv.encap_decap->action;
11274                         }
11275                         /* If decap is followed by encap, handle it at encap. */
11276                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11277                         break;
11278                 case RTE_FLOW_ACTION_TYPE_JUMP:
11279                         jump_group = ((const struct rte_flow_action_jump *)
11280                                                         action->conf)->group;
11281                         grp_info.std_tbl_fix = 0;
11282                         if (dev_flow->skip_scale &
11283                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
11284                                 grp_info.skip_scale = 1;
11285                         else
11286                                 grp_info.skip_scale = 0;
11287                         ret = mlx5_flow_group_to_table(dev, tunnel,
11288                                                        jump_group,
11289                                                        &table,
11290                                                        &grp_info, error);
11291                         if (ret)
11292                                 return ret;
11293                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
11294                                                        attr->transfer,
11295                                                        !!dev_flow->external,
11296                                                        tunnel, jump_group, 0,
11297                                                        error);
11298                         if (!tbl)
11299                                 return rte_flow_error_set
11300                                                 (error, errno,
11301                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11302                                                  NULL,
11303                                                  "cannot create jump action.");
11304                         if (flow_dv_jump_tbl_resource_register
11305                             (dev, tbl, dev_flow, error)) {
11306                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11307                                 return rte_flow_error_set
11308                                                 (error, errno,
11309                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11310                                                  NULL,
11311                                                  "cannot create jump action.");
11312                         }
11313                         dev_flow->dv.actions[actions_n++] =
11314                                         dev_flow->dv.jump->action;
11315                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11316                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
11317                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
11318                         num_of_dest++;
11319                         break;
11320                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
11321                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
11322                         if (flow_dv_convert_action_modify_mac
11323                                         (mhdr_res, actions, error))
11324                                 return -rte_errno;
11325                         action_flags |= actions->type ==
11326                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
11327                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
11328                                         MLX5_FLOW_ACTION_SET_MAC_DST;
11329                         break;
11330                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
11331                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
11332                         if (flow_dv_convert_action_modify_ipv4
11333                                         (mhdr_res, actions, error))
11334                                 return -rte_errno;
11335                         action_flags |= actions->type ==
11336                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
11337                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
11338                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
11339                         break;
11340                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
11341                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
11342                         if (flow_dv_convert_action_modify_ipv6
11343                                         (mhdr_res, actions, error))
11344                                 return -rte_errno;
11345                         action_flags |= actions->type ==
11346                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
11347                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
11348                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
11349                         break;
11350                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
11351                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
11352                         if (flow_dv_convert_action_modify_tp
11353                                         (mhdr_res, actions, items,
11354                                          &flow_attr, dev_flow, !!(action_flags &
11355                                          MLX5_FLOW_ACTION_DECAP), error))
11356                                 return -rte_errno;
11357                         action_flags |= actions->type ==
11358                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
11359                                         MLX5_FLOW_ACTION_SET_TP_SRC :
11360                                         MLX5_FLOW_ACTION_SET_TP_DST;
11361                         break;
11362                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
11363                         if (flow_dv_convert_action_modify_dec_ttl
11364                                         (mhdr_res, items, &flow_attr, dev_flow,
11365                                          !!(action_flags &
11366                                          MLX5_FLOW_ACTION_DECAP), error))
11367                                 return -rte_errno;
11368                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
11369                         break;
11370                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
11371                         if (flow_dv_convert_action_modify_ttl
11372                                         (mhdr_res, actions, items, &flow_attr,
11373                                          dev_flow, !!(action_flags &
11374                                          MLX5_FLOW_ACTION_DECAP), error))
11375                                 return -rte_errno;
11376                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
11377                         break;
11378                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
11379                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
11380                         if (flow_dv_convert_action_modify_tcp_seq
11381                                         (mhdr_res, actions, error))
11382                                 return -rte_errno;
11383                         action_flags |= actions->type ==
11384                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
11385                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
11386                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
11387                         break;
11388
11389                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
11390                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
11391                         if (flow_dv_convert_action_modify_tcp_ack
11392                                         (mhdr_res, actions, error))
11393                                 return -rte_errno;
11394                         action_flags |= actions->type ==
11395                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
11396                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
11397                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
11398                         break;
11399                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
11400                         if (flow_dv_convert_action_set_reg
11401                                         (mhdr_res, actions, error))
11402                                 return -rte_errno;
11403                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11404                         break;
11405                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
11406                         if (flow_dv_convert_action_copy_mreg
11407                                         (dev, mhdr_res, actions, error))
11408                                 return -rte_errno;
11409                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11410                         break;
11411                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
11412                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
11413                         dev_flow->handle->fate_action =
11414                                         MLX5_FLOW_FATE_DEFAULT_MISS;
11415                         break;
11416                 case RTE_FLOW_ACTION_TYPE_METER:
11417                         mtr = actions->conf;
11418                         if (!flow->meter) {
11419                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
11420                                                             attr, error);
11421                                 if (!fm)
11422                                         return rte_flow_error_set(error,
11423                                                 rte_errno,
11424                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11425                                                 NULL,
11426                                                 "meter not found "
11427                                                 "or invalid parameters");
11428                                 flow->meter = fm->idx;
11429                         }
11430                         /* Set the meter action. */
11431                         if (!fm) {
11432                                 fm = mlx5_ipool_get(priv->sh->ipool
11433                                                 [MLX5_IPOOL_MTR], flow->meter);
11434                                 if (!fm)
11435                                         return rte_flow_error_set(error,
11436                                                 rte_errno,
11437                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11438                                                 NULL,
11439                                                 "meter not found "
11440                                                 "or invalid parameters");
11441                         }
11442                         dev_flow->dv.actions[actions_n++] =
11443                                 fm->mfts->meter_action;
11444                         action_flags |= MLX5_FLOW_ACTION_METER;
11445                         break;
11446                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
11447                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
11448                                                               actions, error))
11449                                 return -rte_errno;
11450                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
11451                         break;
11452                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
11453                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
11454                                                               actions, error))
11455                                 return -rte_errno;
11456                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
11457                         break;
11458                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
11459                         sample_act_pos = actions_n;
11460                         sample = (const struct rte_flow_action_sample *)
11461                                  action->conf;
11462                         actions_n++;
11463                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
11464                         /* put encap action into group if work with port id */
11465                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
11466                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
11467                                 sample_act->action_flags |=
11468                                                         MLX5_FLOW_ACTION_ENCAP;
11469                         break;
11470                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
11471                         if (flow_dv_convert_action_modify_field
11472                                         (dev, mhdr_res, actions, attr, error))
11473                                 return -rte_errno;
11474                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
11475                         break;
11476                 case RTE_FLOW_ACTION_TYPE_END:
11477                         actions_end = true;
11478                         if (mhdr_res->actions_num) {
11479                                 /* create modify action if needed. */
11480                                 if (flow_dv_modify_hdr_resource_register
11481                                         (dev, mhdr_res, dev_flow, error))
11482                                         return -rte_errno;
11483                                 dev_flow->dv.actions[modify_action_position] =
11484                                         handle->dvh.modify_hdr->action;
11485                         }
11486                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
11487                                 /*
11488                                  * Create one count action, to be used
11489                                  * by all sub-flows.
11490                                  */
11491                                 if (!flow->counter) {
11492                                         flow->counter =
11493                                                 flow_dv_translate_create_counter
11494                                                         (dev, dev_flow, count,
11495                                                          age);
11496                                         if (!flow->counter)
11497                                                 return rte_flow_error_set
11498                                                 (error, rte_errno,
11499                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11500                                                  NULL, "cannot create counter"
11501                                                  " object.");
11502                                 }
11503                                 dev_flow->dv.actions[actions_n] =
11504                                           (flow_dv_counter_get_by_idx(dev,
11505                                           flow->counter, NULL))->action;
11506                                 actions_n++;
11507                         }
11508                 default:
11509                         break;
11510                 }
11511                 if (mhdr_res->actions_num &&
11512                     modify_action_position == UINT32_MAX)
11513                         modify_action_position = actions_n++;
11514         }
11515         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
11516                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
11517                 int item_type = items->type;
11518
11519                 if (!mlx5_flow_os_item_supported(item_type))
11520                         return rte_flow_error_set(error, ENOTSUP,
11521                                                   RTE_FLOW_ERROR_TYPE_ITEM,
11522                                                   NULL, "item not supported");
11523                 switch (item_type) {
11524                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
11525                         flow_dv_translate_item_port_id
11526                                 (dev, match_mask, match_value, items, attr);
11527                         last_item = MLX5_FLOW_ITEM_PORT_ID;
11528                         break;
11529                 case RTE_FLOW_ITEM_TYPE_ETH:
11530                         flow_dv_translate_item_eth(match_mask, match_value,
11531                                                    items, tunnel,
11532                                                    dev_flow->dv.group);
11533                         matcher.priority = action_flags &
11534                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
11535                                         !dev_flow->external ?
11536                                         MLX5_PRIORITY_MAP_L3 :
11537                                         MLX5_PRIORITY_MAP_L2;
11538                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
11539                                              MLX5_FLOW_LAYER_OUTER_L2;
11540                         break;
11541                 case RTE_FLOW_ITEM_TYPE_VLAN:
11542                         flow_dv_translate_item_vlan(dev_flow,
11543                                                     match_mask, match_value,
11544                                                     items, tunnel,
11545                                                     dev_flow->dv.group);
11546                         matcher.priority = MLX5_PRIORITY_MAP_L2;
11547                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
11548                                               MLX5_FLOW_LAYER_INNER_VLAN) :
11549                                              (MLX5_FLOW_LAYER_OUTER_L2 |
11550                                               MLX5_FLOW_LAYER_OUTER_VLAN);
11551                         break;
11552                 case RTE_FLOW_ITEM_TYPE_IPV4:
11553                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11554                                                   &item_flags, &tunnel);
11555                         flow_dv_translate_item_ipv4(match_mask, match_value,
11556                                                     items, tunnel,
11557                                                     dev_flow->dv.group);
11558                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11559                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
11560                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
11561                         if (items->mask != NULL &&
11562                             ((const struct rte_flow_item_ipv4 *)
11563                              items->mask)->hdr.next_proto_id) {
11564                                 next_protocol =
11565                                         ((const struct rte_flow_item_ipv4 *)
11566                                          (items->spec))->hdr.next_proto_id;
11567                                 next_protocol &=
11568                                         ((const struct rte_flow_item_ipv4 *)
11569                                          (items->mask))->hdr.next_proto_id;
11570                         } else {
11571                                 /* Reset for inner layer. */
11572                                 next_protocol = 0xff;
11573                         }
11574                         break;
11575                 case RTE_FLOW_ITEM_TYPE_IPV6:
11576                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11577                                                   &item_flags, &tunnel);
11578                         flow_dv_translate_item_ipv6(match_mask, match_value,
11579                                                     items, tunnel,
11580                                                     dev_flow->dv.group);
11581                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11582                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
11583                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
11584                         if (items->mask != NULL &&
11585                             ((const struct rte_flow_item_ipv6 *)
11586                              items->mask)->hdr.proto) {
11587                                 next_protocol =
11588                                         ((const struct rte_flow_item_ipv6 *)
11589                                          items->spec)->hdr.proto;
11590                                 next_protocol &=
11591                                         ((const struct rte_flow_item_ipv6 *)
11592                                          items->mask)->hdr.proto;
11593                         } else {
11594                                 /* Reset for inner layer. */
11595                                 next_protocol = 0xff;
11596                         }
11597                         break;
11598                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
11599                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
11600                                                              match_value,
11601                                                              items, tunnel);
11602                         last_item = tunnel ?
11603                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
11604                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
11605                         if (items->mask != NULL &&
11606                             ((const struct rte_flow_item_ipv6_frag_ext *)
11607                              items->mask)->hdr.next_header) {
11608                                 next_protocol =
11609                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11610                                  items->spec)->hdr.next_header;
11611                                 next_protocol &=
11612                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11613                                  items->mask)->hdr.next_header;
11614                         } else {
11615                                 /* Reset for inner layer. */
11616                                 next_protocol = 0xff;
11617                         }
11618                         break;
11619                 case RTE_FLOW_ITEM_TYPE_TCP:
11620                         flow_dv_translate_item_tcp(match_mask, match_value,
11621                                                    items, tunnel);
11622                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11623                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
11624                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
11625                         break;
11626                 case RTE_FLOW_ITEM_TYPE_UDP:
11627                         flow_dv_translate_item_udp(match_mask, match_value,
11628                                                    items, tunnel);
11629                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11630                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
11631                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
11632                         break;
11633                 case RTE_FLOW_ITEM_TYPE_GRE:
11634                         flow_dv_translate_item_gre(match_mask, match_value,
11635                                                    items, tunnel);
11636                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11637                         last_item = MLX5_FLOW_LAYER_GRE;
11638                         break;
11639                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
11640                         flow_dv_translate_item_gre_key(match_mask,
11641                                                        match_value, items);
11642                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
11643                         break;
11644                 case RTE_FLOW_ITEM_TYPE_NVGRE:
11645                         flow_dv_translate_item_nvgre(match_mask, match_value,
11646                                                      items, tunnel);
11647                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11648                         last_item = MLX5_FLOW_LAYER_GRE;
11649                         break;
11650                 case RTE_FLOW_ITEM_TYPE_VXLAN:
11651                         flow_dv_translate_item_vxlan(match_mask, match_value,
11652                                                      items, tunnel);
11653                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11654                         last_item = MLX5_FLOW_LAYER_VXLAN;
11655                         break;
11656                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
11657                         flow_dv_translate_item_vxlan_gpe(match_mask,
11658                                                          match_value, items,
11659                                                          tunnel);
11660                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11661                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
11662                         break;
11663                 case RTE_FLOW_ITEM_TYPE_GENEVE:
11664                         flow_dv_translate_item_geneve(match_mask, match_value,
11665                                                       items, tunnel);
11666                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11667                         last_item = MLX5_FLOW_LAYER_GENEVE;
11668                         break;
11669                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
11670                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
11671                                                           match_value,
11672                                                           items, error);
11673                         if (ret)
11674                                 return rte_flow_error_set(error, -ret,
11675                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11676                                         "cannot create GENEVE TLV option");
11677                         flow->geneve_tlv_option = 1;
11678                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
11679                         break;
11680                 case RTE_FLOW_ITEM_TYPE_MPLS:
11681                         flow_dv_translate_item_mpls(match_mask, match_value,
11682                                                     items, last_item, tunnel);
11683                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11684                         last_item = MLX5_FLOW_LAYER_MPLS;
11685                         break;
11686                 case RTE_FLOW_ITEM_TYPE_MARK:
11687                         flow_dv_translate_item_mark(dev, match_mask,
11688                                                     match_value, items);
11689                         last_item = MLX5_FLOW_ITEM_MARK;
11690                         break;
11691                 case RTE_FLOW_ITEM_TYPE_META:
11692                         flow_dv_translate_item_meta(dev, match_mask,
11693                                                     match_value, attr, items);
11694                         last_item = MLX5_FLOW_ITEM_METADATA;
11695                         break;
11696                 case RTE_FLOW_ITEM_TYPE_ICMP:
11697                         flow_dv_translate_item_icmp(match_mask, match_value,
11698                                                     items, tunnel);
11699                         last_item = MLX5_FLOW_LAYER_ICMP;
11700                         break;
11701                 case RTE_FLOW_ITEM_TYPE_ICMP6:
11702                         flow_dv_translate_item_icmp6(match_mask, match_value,
11703                                                       items, tunnel);
11704                         last_item = MLX5_FLOW_LAYER_ICMP6;
11705                         break;
11706                 case RTE_FLOW_ITEM_TYPE_TAG:
11707                         flow_dv_translate_item_tag(dev, match_mask,
11708                                                    match_value, items);
11709                         last_item = MLX5_FLOW_ITEM_TAG;
11710                         break;
11711                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
11712                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
11713                                                         match_value, items);
11714                         last_item = MLX5_FLOW_ITEM_TAG;
11715                         break;
11716                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
11717                         flow_dv_translate_item_tx_queue(dev, match_mask,
11718                                                         match_value,
11719                                                         items);
11720                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
11721                         break;
11722                 case RTE_FLOW_ITEM_TYPE_GTP:
11723                         flow_dv_translate_item_gtp(match_mask, match_value,
11724                                                    items, tunnel);
11725                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11726                         last_item = MLX5_FLOW_LAYER_GTP;
11727                         break;
11728                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
11729                         ret = flow_dv_translate_item_gtp_psc(match_mask,
11730                                                           match_value,
11731                                                           items);
11732                         if (ret)
11733                                 return rte_flow_error_set(error, -ret,
11734                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11735                                         "cannot create GTP PSC item");
11736                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
11737                         break;
11738                 case RTE_FLOW_ITEM_TYPE_ECPRI:
11739                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
11740                                 /* Create it only the first time to be used. */
11741                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
11742                                 if (ret)
11743                                         return rte_flow_error_set
11744                                                 (error, -ret,
11745                                                 RTE_FLOW_ERROR_TYPE_ITEM,
11746                                                 NULL,
11747                                                 "cannot create eCPRI parser");
11748                         }
11749                         /* Adjust the length matcher and device flow value. */
11750                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
11751                         dev_flow->dv.value.size =
11752                                         MLX5_ST_SZ_BYTES(fte_match_param);
11753                         flow_dv_translate_item_ecpri(dev, match_mask,
11754                                                      match_value, items);
11755                         /* No other protocol should follow eCPRI layer. */
11756                         last_item = MLX5_FLOW_LAYER_ECPRI;
11757                         break;
11758                 default:
11759                         break;
11760                 }
11761                 item_flags |= last_item;
11762         }
11763         /*
11764          * When E-Switch mode is enabled, we have two cases where we need to
11765          * set the source port manually.
11766          * The first one, is in case of Nic steering rule, and the second is
11767          * E-Switch rule where no port_id item was found. In both cases
11768          * the source port is set according the current port in use.
11769          */
11770         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
11771             (priv->representor || priv->master)) {
11772                 if (flow_dv_translate_item_port_id(dev, match_mask,
11773                                                    match_value, NULL, attr))
11774                         return -rte_errno;
11775         }
11776 #ifdef RTE_LIBRTE_MLX5_DEBUG
11777         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
11778                                               dev_flow->dv.value.buf));
11779 #endif
11780         /*
11781          * Layers may be already initialized from prefix flow if this dev_flow
11782          * is the suffix flow.
11783          */
11784         handle->layers |= item_flags;
11785         if (action_flags & MLX5_FLOW_ACTION_RSS)
11786                 flow_dv_hashfields_set(dev_flow, rss_desc);
11787         /* If has RSS action in the sample action, the Sample/Mirror resource
11788          * should be registered after the hash filed be update.
11789          */
11790         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
11791                 ret = flow_dv_translate_action_sample(dev,
11792                                                       sample,
11793                                                       dev_flow, attr,
11794                                                       &num_of_dest,
11795                                                       sample_actions,
11796                                                       &sample_res,
11797                                                       error);
11798                 if (ret < 0)
11799                         return ret;
11800                 ret = flow_dv_create_action_sample(dev,
11801                                                    dev_flow,
11802                                                    num_of_dest,
11803                                                    &sample_res,
11804                                                    &mdest_res,
11805                                                    sample_actions,
11806                                                    action_flags,
11807                                                    error);
11808                 if (ret < 0)
11809                         return rte_flow_error_set
11810                                                 (error, rte_errno,
11811                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11812                                                 NULL,
11813                                                 "cannot create sample action");
11814                 if (num_of_dest > 1) {
11815                         dev_flow->dv.actions[sample_act_pos] =
11816                         dev_flow->dv.dest_array_res->action;
11817                 } else {
11818                         dev_flow->dv.actions[sample_act_pos] =
11819                         dev_flow->dv.sample_res->verbs_action;
11820                 }
11821         }
11822         /*
11823          * For multiple destination (sample action with ratio=1), the encap
11824          * action and port id action will be combined into group action.
11825          * So need remove the original these actions in the flow and only
11826          * use the sample action instead of.
11827          */
11828         if (num_of_dest > 1 &&
11829             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
11830                 int i;
11831                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11832
11833                 for (i = 0; i < actions_n; i++) {
11834                         if ((sample_act->dr_encap_action &&
11835                                 sample_act->dr_encap_action ==
11836                                 dev_flow->dv.actions[i]) ||
11837                                 (sample_act->dr_port_id_action &&
11838                                 sample_act->dr_port_id_action ==
11839                                 dev_flow->dv.actions[i]) ||
11840                                 (sample_act->dr_jump_action &&
11841                                 sample_act->dr_jump_action ==
11842                                 dev_flow->dv.actions[i]))
11843                                 continue;
11844                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
11845                 }
11846                 memcpy((void *)dev_flow->dv.actions,
11847                                 (void *)temp_actions,
11848                                 tmp_actions_n * sizeof(void *));
11849                 actions_n = tmp_actions_n;
11850         }
11851         dev_flow->dv.actions_n = actions_n;
11852         dev_flow->act_flags = action_flags;
11853         /* Register matcher. */
11854         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
11855                                     matcher.mask.size);
11856         matcher.priority = mlx5_get_matcher_priority(dev, attr,
11857                                         matcher.priority);
11858         /* reserved field no needs to be set to 0 here. */
11859         tbl_key.domain = attr->transfer;
11860         tbl_key.direction = attr->egress;
11861         tbl_key.table_id = dev_flow->dv.group;
11862         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
11863                                      tunnel, attr->group, error))
11864                 return -rte_errno;
11865         return 0;
11866 }
11867
11868 /**
11869  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11870  * and tunnel.
11871  *
11872  * @param[in, out] action
11873  *   Shred RSS action holding hash RX queue objects.
11874  * @param[in] hash_fields
11875  *   Defines combination of packet fields to participate in RX hash.
11876  * @param[in] tunnel
11877  *   Tunnel type
11878  * @param[in] hrxq_idx
11879  *   Hash RX queue index to set.
11880  *
11881  * @return
11882  *   0 on success, otherwise negative errno value.
11883  */
11884 static int
11885 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
11886                               const uint64_t hash_fields,
11887                               const int tunnel,
11888                               uint32_t hrxq_idx)
11889 {
11890         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
11891
11892         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11893         case MLX5_RSS_HASH_IPV4:
11894                 hrxqs[0] = hrxq_idx;
11895                 return 0;
11896         case MLX5_RSS_HASH_IPV4_TCP:
11897                 hrxqs[1] = hrxq_idx;
11898                 return 0;
11899         case MLX5_RSS_HASH_IPV4_UDP:
11900                 hrxqs[2] = hrxq_idx;
11901                 return 0;
11902         case MLX5_RSS_HASH_IPV6:
11903                 hrxqs[3] = hrxq_idx;
11904                 return 0;
11905         case MLX5_RSS_HASH_IPV6_TCP:
11906                 hrxqs[4] = hrxq_idx;
11907                 return 0;
11908         case MLX5_RSS_HASH_IPV6_UDP:
11909                 hrxqs[5] = hrxq_idx;
11910                 return 0;
11911         case MLX5_RSS_HASH_NONE:
11912                 hrxqs[6] = hrxq_idx;
11913                 return 0;
11914         default:
11915                 return -1;
11916         }
11917 }
11918
11919 /**
11920  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11921  * and tunnel.
11922  *
11923  * @param[in] dev
11924  *   Pointer to the Ethernet device structure.
11925  * @param[in] idx
11926  *   Shared RSS action ID holding hash RX queue objects.
11927  * @param[in] hash_fields
11928  *   Defines combination of packet fields to participate in RX hash.
11929  * @param[in] tunnel
11930  *   Tunnel type
11931  *
11932  * @return
11933  *   Valid hash RX queue index, otherwise 0.
11934  */
11935 static uint32_t
11936 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
11937                                  const uint64_t hash_fields,
11938                                  const int tunnel)
11939 {
11940         struct mlx5_priv *priv = dev->data->dev_private;
11941         struct mlx5_shared_action_rss *shared_rss =
11942             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11943         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
11944                                                         shared_rss->hrxq_tunnel;
11945
11946         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11947         case MLX5_RSS_HASH_IPV4:
11948                 return hrxqs[0];
11949         case MLX5_RSS_HASH_IPV4_TCP:
11950                 return hrxqs[1];
11951         case MLX5_RSS_HASH_IPV4_UDP:
11952                 return hrxqs[2];
11953         case MLX5_RSS_HASH_IPV6:
11954                 return hrxqs[3];
11955         case MLX5_RSS_HASH_IPV6_TCP:
11956                 return hrxqs[4];
11957         case MLX5_RSS_HASH_IPV6_UDP:
11958                 return hrxqs[5];
11959         case MLX5_RSS_HASH_NONE:
11960                 return hrxqs[6];
11961         default:
11962                 return 0;
11963         }
11964 }
11965
11966 /**
11967  * Apply the flow to the NIC, lock free,
11968  * (mutex should be acquired by caller).
11969  *
11970  * @param[in] dev
11971  *   Pointer to the Ethernet device structure.
11972  * @param[in, out] flow
11973  *   Pointer to flow structure.
11974  * @param[out] error
11975  *   Pointer to error structure.
11976  *
11977  * @return
11978  *   0 on success, a negative errno value otherwise and rte_errno is set.
11979  */
11980 static int
11981 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
11982               struct rte_flow_error *error)
11983 {
11984         struct mlx5_flow_dv_workspace *dv;
11985         struct mlx5_flow_handle *dh;
11986         struct mlx5_flow_handle_dv *dv_h;
11987         struct mlx5_flow *dev_flow;
11988         struct mlx5_priv *priv = dev->data->dev_private;
11989         uint32_t handle_idx;
11990         int n;
11991         int err;
11992         int idx;
11993         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11994         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
11995
11996         MLX5_ASSERT(wks);
11997         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
11998                 dev_flow = &wks->flows[idx];
11999                 dv = &dev_flow->dv;
12000                 dh = dev_flow->handle;
12001                 dv_h = &dh->dvh;
12002                 n = dv->actions_n;
12003                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
12004                         if (dv->transfer) {
12005                                 dv->actions[n++] = priv->sh->esw_drop_action;
12006                         } else {
12007                                 MLX5_ASSERT(priv->drop_queue.hrxq);
12008                                 dv->actions[n++] =
12009                                                 priv->drop_queue.hrxq->action;
12010                         }
12011                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
12012                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
12013                         struct mlx5_hrxq *hrxq;
12014                         uint32_t hrxq_idx;
12015
12016                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
12017                                                     &hrxq_idx);
12018                         if (!hrxq) {
12019                                 rte_flow_error_set
12020                                         (error, rte_errno,
12021                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12022                                          "cannot get hash queue");
12023                                 goto error;
12024                         }
12025                         dh->rix_hrxq = hrxq_idx;
12026                         dv->actions[n++] = hrxq->action;
12027                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12028                         struct mlx5_hrxq *hrxq = NULL;
12029                         uint32_t hrxq_idx;
12030
12031                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
12032                                                 rss_desc->shared_rss,
12033                                                 dev_flow->hash_fields,
12034                                                 !!(dh->layers &
12035                                                 MLX5_FLOW_LAYER_TUNNEL));
12036                         if (hrxq_idx)
12037                                 hrxq = mlx5_ipool_get
12038                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
12039                                          hrxq_idx);
12040                         if (!hrxq) {
12041                                 rte_flow_error_set
12042                                         (error, rte_errno,
12043                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12044                                          "cannot get hash queue");
12045                                 goto error;
12046                         }
12047                         dh->rix_srss = rss_desc->shared_rss;
12048                         dv->actions[n++] = hrxq->action;
12049                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
12050                         if (!priv->sh->default_miss_action) {
12051                                 rte_flow_error_set
12052                                         (error, rte_errno,
12053                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12054                                          "default miss action not be created.");
12055                                 goto error;
12056                         }
12057                         dv->actions[n++] = priv->sh->default_miss_action;
12058                 }
12059                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
12060                                                (void *)&dv->value, n,
12061                                                dv->actions, &dh->drv_flow);
12062                 if (err) {
12063                         rte_flow_error_set(error, errno,
12064                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12065                                            NULL,
12066                                            "hardware refuses to create flow");
12067                         goto error;
12068                 }
12069                 if (priv->vmwa_context &&
12070                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
12071                         /*
12072                          * The rule contains the VLAN pattern.
12073                          * For VF we are going to create VLAN
12074                          * interface to make hypervisor set correct
12075                          * e-Switch vport context.
12076                          */
12077                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
12078                 }
12079         }
12080         return 0;
12081 error:
12082         err = rte_errno; /* Save rte_errno before cleanup. */
12083         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
12084                        handle_idx, dh, next) {
12085                 /* hrxq is union, don't clear it if the flag is not set. */
12086                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
12087                         mlx5_hrxq_release(dev, dh->rix_hrxq);
12088                         dh->rix_hrxq = 0;
12089                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12090                         dh->rix_srss = 0;
12091                 }
12092                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12093                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12094         }
12095         rte_errno = err; /* Restore rte_errno. */
12096         return -rte_errno;
12097 }
12098
12099 void
12100 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
12101                           struct mlx5_cache_entry *entry)
12102 {
12103         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
12104                                                           entry);
12105
12106         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
12107         mlx5_free(cache);
12108 }
12109
12110 /**
12111  * Release the flow matcher.
12112  *
12113  * @param dev
12114  *   Pointer to Ethernet device.
12115  * @param port_id
12116  *   Index to port ID action resource.
12117  *
12118  * @return
12119  *   1 while a reference on it exists, 0 when freed.
12120  */
12121 static int
12122 flow_dv_matcher_release(struct rte_eth_dev *dev,
12123                         struct mlx5_flow_handle *handle)
12124 {
12125         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
12126         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
12127                                                             typeof(*tbl), tbl);
12128         int ret;
12129
12130         MLX5_ASSERT(matcher->matcher_object);
12131         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
12132         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
12133         return ret;
12134 }
12135
12136 /**
12137  * Release encap_decap resource.
12138  *
12139  * @param list
12140  *   Pointer to the hash list.
12141  * @param entry
12142  *   Pointer to exist resource entry object.
12143  */
12144 void
12145 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
12146                               struct mlx5_hlist_entry *entry)
12147 {
12148         struct mlx5_dev_ctx_shared *sh = list->ctx;
12149         struct mlx5_flow_dv_encap_decap_resource *res =
12150                 container_of(entry, typeof(*res), entry);
12151
12152         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12153         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
12154 }
12155
12156 /**
12157  * Release an encap/decap resource.
12158  *
12159  * @param dev
12160  *   Pointer to Ethernet device.
12161  * @param encap_decap_idx
12162  *   Index of encap decap resource.
12163  *
12164  * @return
12165  *   1 while a reference on it exists, 0 when freed.
12166  */
12167 static int
12168 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
12169                                      uint32_t encap_decap_idx)
12170 {
12171         struct mlx5_priv *priv = dev->data->dev_private;
12172         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
12173
12174         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
12175                                         encap_decap_idx);
12176         if (!cache_resource)
12177                 return 0;
12178         MLX5_ASSERT(cache_resource->action);
12179         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
12180                                      &cache_resource->entry);
12181 }
12182
12183 /**
12184  * Release an jump to table action resource.
12185  *
12186  * @param dev
12187  *   Pointer to Ethernet device.
12188  * @param rix_jump
12189  *   Index to the jump action resource.
12190  *
12191  * @return
12192  *   1 while a reference on it exists, 0 when freed.
12193  */
12194 static int
12195 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
12196                                   uint32_t rix_jump)
12197 {
12198         struct mlx5_priv *priv = dev->data->dev_private;
12199         struct mlx5_flow_tbl_data_entry *tbl_data;
12200
12201         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
12202                                   rix_jump);
12203         if (!tbl_data)
12204                 return 0;
12205         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
12206 }
12207
12208 void
12209 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
12210                          struct mlx5_hlist_entry *entry)
12211 {
12212         struct mlx5_flow_dv_modify_hdr_resource *res =
12213                 container_of(entry, typeof(*res), entry);
12214
12215         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12216         mlx5_free(entry);
12217 }
12218
12219 /**
12220  * Release a modify-header resource.
12221  *
12222  * @param dev
12223  *   Pointer to Ethernet device.
12224  * @param handle
12225  *   Pointer to mlx5_flow_handle.
12226  *
12227  * @return
12228  *   1 while a reference on it exists, 0 when freed.
12229  */
12230 static int
12231 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
12232                                     struct mlx5_flow_handle *handle)
12233 {
12234         struct mlx5_priv *priv = dev->data->dev_private;
12235         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
12236
12237         MLX5_ASSERT(entry->action);
12238         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
12239 }
12240
12241 void
12242 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
12243                           struct mlx5_cache_entry *entry)
12244 {
12245         struct mlx5_dev_ctx_shared *sh = list->ctx;
12246         struct mlx5_flow_dv_port_id_action_resource *cache =
12247                         container_of(entry, typeof(*cache), entry);
12248
12249         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12250         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
12251 }
12252
12253 /**
12254  * Release port ID action resource.
12255  *
12256  * @param dev
12257  *   Pointer to Ethernet device.
12258  * @param handle
12259  *   Pointer to mlx5_flow_handle.
12260  *
12261  * @return
12262  *   1 while a reference on it exists, 0 when freed.
12263  */
12264 static int
12265 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
12266                                         uint32_t port_id)
12267 {
12268         struct mlx5_priv *priv = dev->data->dev_private;
12269         struct mlx5_flow_dv_port_id_action_resource *cache;
12270
12271         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
12272         if (!cache)
12273                 return 0;
12274         MLX5_ASSERT(cache->action);
12275         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
12276                                      &cache->entry);
12277 }
12278
12279 /**
12280  * Release shared RSS action resource.
12281  *
12282  * @param dev
12283  *   Pointer to Ethernet device.
12284  * @param srss
12285  *   Shared RSS action index.
12286  */
12287 static void
12288 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
12289 {
12290         struct mlx5_priv *priv = dev->data->dev_private;
12291         struct mlx5_shared_action_rss *shared_rss;
12292
12293         shared_rss = mlx5_ipool_get
12294                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
12295         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12296 }
12297
12298 void
12299 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
12300                             struct mlx5_cache_entry *entry)
12301 {
12302         struct mlx5_dev_ctx_shared *sh = list->ctx;
12303         struct mlx5_flow_dv_push_vlan_action_resource *cache =
12304                         container_of(entry, typeof(*cache), entry);
12305
12306         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12307         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
12308 }
12309
12310 /**
12311  * Release push vlan action resource.
12312  *
12313  * @param dev
12314  *   Pointer to Ethernet device.
12315  * @param handle
12316  *   Pointer to mlx5_flow_handle.
12317  *
12318  * @return
12319  *   1 while a reference on it exists, 0 when freed.
12320  */
12321 static int
12322 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
12323                                           struct mlx5_flow_handle *handle)
12324 {
12325         struct mlx5_priv *priv = dev->data->dev_private;
12326         struct mlx5_flow_dv_push_vlan_action_resource *cache;
12327         uint32_t idx = handle->dvh.rix_push_vlan;
12328
12329         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
12330         if (!cache)
12331                 return 0;
12332         MLX5_ASSERT(cache->action);
12333         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
12334                                      &cache->entry);
12335 }
12336
12337 /**
12338  * Release the fate resource.
12339  *
12340  * @param dev
12341  *   Pointer to Ethernet device.
12342  * @param handle
12343  *   Pointer to mlx5_flow_handle.
12344  */
12345 static void
12346 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
12347                                struct mlx5_flow_handle *handle)
12348 {
12349         if (!handle->rix_fate)
12350                 return;
12351         switch (handle->fate_action) {
12352         case MLX5_FLOW_FATE_QUEUE:
12353                 mlx5_hrxq_release(dev, handle->rix_hrxq);
12354                 break;
12355         case MLX5_FLOW_FATE_JUMP:
12356                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
12357                 break;
12358         case MLX5_FLOW_FATE_PORT_ID:
12359                 flow_dv_port_id_action_resource_release(dev,
12360                                 handle->rix_port_id_action);
12361                 break;
12362         default:
12363                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
12364                 break;
12365         }
12366         handle->rix_fate = 0;
12367 }
12368
12369 void
12370 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
12371                          struct mlx5_cache_entry *entry)
12372 {
12373         struct mlx5_flow_dv_sample_resource *cache_resource =
12374                         container_of(entry, typeof(*cache_resource), entry);
12375         struct rte_eth_dev *dev = cache_resource->dev;
12376         struct mlx5_priv *priv = dev->data->dev_private;
12377
12378         if (cache_resource->verbs_action)
12379                 claim_zero(mlx5_flow_os_destroy_flow_action
12380                                 (cache_resource->verbs_action));
12381         if (cache_resource->normal_path_tbl)
12382                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12383                         cache_resource->normal_path_tbl);
12384         flow_dv_sample_sub_actions_release(dev,
12385                                 &cache_resource->sample_idx);
12386         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12387                         cache_resource->idx);
12388         DRV_LOG(DEBUG, "sample resource %p: removed",
12389                 (void *)cache_resource);
12390 }
12391
12392 /**
12393  * Release an sample resource.
12394  *
12395  * @param dev
12396  *   Pointer to Ethernet device.
12397  * @param handle
12398  *   Pointer to mlx5_flow_handle.
12399  *
12400  * @return
12401  *   1 while a reference on it exists, 0 when freed.
12402  */
12403 static int
12404 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
12405                                      struct mlx5_flow_handle *handle)
12406 {
12407         struct mlx5_priv *priv = dev->data->dev_private;
12408         struct mlx5_flow_dv_sample_resource *cache_resource;
12409
12410         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12411                          handle->dvh.rix_sample);
12412         if (!cache_resource)
12413                 return 0;
12414         MLX5_ASSERT(cache_resource->verbs_action);
12415         return mlx5_cache_unregister(&priv->sh->sample_action_list,
12416                                      &cache_resource->entry);
12417 }
12418
12419 void
12420 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
12421                              struct mlx5_cache_entry *entry)
12422 {
12423         struct mlx5_flow_dv_dest_array_resource *cache_resource =
12424                         container_of(entry, typeof(*cache_resource), entry);
12425         struct rte_eth_dev *dev = cache_resource->dev;
12426         struct mlx5_priv *priv = dev->data->dev_private;
12427         uint32_t i = 0;
12428
12429         MLX5_ASSERT(cache_resource->action);
12430         if (cache_resource->action)
12431                 claim_zero(mlx5_flow_os_destroy_flow_action
12432                                         (cache_resource->action));
12433         for (; i < cache_resource->num_of_dest; i++)
12434                 flow_dv_sample_sub_actions_release(dev,
12435                                 &cache_resource->sample_idx[i]);
12436         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12437                         cache_resource->idx);
12438         DRV_LOG(DEBUG, "destination array resource %p: removed",
12439                 (void *)cache_resource);
12440 }
12441
12442 /**
12443  * Release an destination array resource.
12444  *
12445  * @param dev
12446  *   Pointer to Ethernet device.
12447  * @param handle
12448  *   Pointer to mlx5_flow_handle.
12449  *
12450  * @return
12451  *   1 while a reference on it exists, 0 when freed.
12452  */
12453 static int
12454 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
12455                                     struct mlx5_flow_handle *handle)
12456 {
12457         struct mlx5_priv *priv = dev->data->dev_private;
12458         struct mlx5_flow_dv_dest_array_resource *cache;
12459
12460         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12461                                handle->dvh.rix_dest_array);
12462         if (!cache)
12463                 return 0;
12464         MLX5_ASSERT(cache->action);
12465         return mlx5_cache_unregister(&priv->sh->dest_array_list,
12466                                      &cache->entry);
12467 }
12468
12469 static void
12470 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
12471 {
12472         struct mlx5_priv *priv = dev->data->dev_private;
12473         struct mlx5_dev_ctx_shared *sh = priv->sh;
12474         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
12475                                 sh->geneve_tlv_option_resource;
12476         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
12477         if (geneve_opt_resource) {
12478                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
12479                                          __ATOMIC_RELAXED))) {
12480                         claim_zero(mlx5_devx_cmd_destroy
12481                                         (geneve_opt_resource->obj));
12482                         mlx5_free(sh->geneve_tlv_option_resource);
12483                         sh->geneve_tlv_option_resource = NULL;
12484                 }
12485         }
12486         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
12487 }
12488
12489 /**
12490  * Remove the flow from the NIC but keeps it in memory.
12491  * Lock free, (mutex should be acquired by caller).
12492  *
12493  * @param[in] dev
12494  *   Pointer to Ethernet device.
12495  * @param[in, out] flow
12496  *   Pointer to flow structure.
12497  */
12498 static void
12499 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12500 {
12501         struct mlx5_flow_handle *dh;
12502         uint32_t handle_idx;
12503         struct mlx5_priv *priv = dev->data->dev_private;
12504
12505         if (!flow)
12506                 return;
12507         handle_idx = flow->dev_handles;
12508         while (handle_idx) {
12509                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12510                                     handle_idx);
12511                 if (!dh)
12512                         return;
12513                 if (dh->drv_flow) {
12514                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
12515                         dh->drv_flow = NULL;
12516                 }
12517                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
12518                         flow_dv_fate_resource_release(dev, dh);
12519                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12520                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12521                 handle_idx = dh->next.next;
12522         }
12523 }
12524
12525 /**
12526  * Remove the flow from the NIC and the memory.
12527  * Lock free, (mutex should be acquired by caller).
12528  *
12529  * @param[in] dev
12530  *   Pointer to the Ethernet device structure.
12531  * @param[in, out] flow
12532  *   Pointer to flow structure.
12533  */
12534 static void
12535 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12536 {
12537         struct mlx5_flow_handle *dev_handle;
12538         struct mlx5_priv *priv = dev->data->dev_private;
12539         uint32_t srss = 0;
12540
12541         if (!flow)
12542                 return;
12543         flow_dv_remove(dev, flow);
12544         if (flow->counter) {
12545                 flow_dv_counter_free(dev, flow->counter);
12546                 flow->counter = 0;
12547         }
12548         if (flow->meter) {
12549                 struct mlx5_flow_meter *fm;
12550
12551                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
12552                                     flow->meter);
12553                 if (fm)
12554                         mlx5_flow_meter_detach(fm);
12555                 flow->meter = 0;
12556         }
12557         if (flow->age)
12558                 flow_dv_aso_age_release(dev, flow->age);
12559         if (flow->geneve_tlv_option) {
12560                 flow_dv_geneve_tlv_option_resource_release(dev);
12561                 flow->geneve_tlv_option = 0;
12562         }
12563         while (flow->dev_handles) {
12564                 uint32_t tmp_idx = flow->dev_handles;
12565
12566                 dev_handle = mlx5_ipool_get(priv->sh->ipool
12567                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
12568                 if (!dev_handle)
12569                         return;
12570                 flow->dev_handles = dev_handle->next.next;
12571                 if (dev_handle->dvh.matcher)
12572                         flow_dv_matcher_release(dev, dev_handle);
12573                 if (dev_handle->dvh.rix_sample)
12574                         flow_dv_sample_resource_release(dev, dev_handle);
12575                 if (dev_handle->dvh.rix_dest_array)
12576                         flow_dv_dest_array_resource_release(dev, dev_handle);
12577                 if (dev_handle->dvh.rix_encap_decap)
12578                         flow_dv_encap_decap_resource_release(dev,
12579                                 dev_handle->dvh.rix_encap_decap);
12580                 if (dev_handle->dvh.modify_hdr)
12581                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
12582                 if (dev_handle->dvh.rix_push_vlan)
12583                         flow_dv_push_vlan_action_resource_release(dev,
12584                                                                   dev_handle);
12585                 if (dev_handle->dvh.rix_tag)
12586                         flow_dv_tag_release(dev,
12587                                             dev_handle->dvh.rix_tag);
12588                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
12589                         flow_dv_fate_resource_release(dev, dev_handle);
12590                 else if (!srss)
12591                         srss = dev_handle->rix_srss;
12592                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12593                            tmp_idx);
12594         }
12595         if (srss)
12596                 flow_dv_shared_rss_action_release(dev, srss);
12597 }
12598
12599 /**
12600  * Release array of hash RX queue objects.
12601  * Helper function.
12602  *
12603  * @param[in] dev
12604  *   Pointer to the Ethernet device structure.
12605  * @param[in, out] hrxqs
12606  *   Array of hash RX queue objects.
12607  *
12608  * @return
12609  *   Total number of references to hash RX queue objects in *hrxqs* array
12610  *   after this operation.
12611  */
12612 static int
12613 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
12614                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
12615 {
12616         size_t i;
12617         int remaining = 0;
12618
12619         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
12620                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
12621
12622                 if (!ret)
12623                         (*hrxqs)[i] = 0;
12624                 remaining += ret;
12625         }
12626         return remaining;
12627 }
12628
12629 /**
12630  * Release all hash RX queue objects representing shared RSS action.
12631  *
12632  * @param[in] dev
12633  *   Pointer to the Ethernet device structure.
12634  * @param[in, out] action
12635  *   Shared RSS action to remove hash RX queue objects from.
12636  *
12637  * @return
12638  *   Total number of references to hash RX queue objects stored in *action*
12639  *   after this operation.
12640  *   Expected to be 0 if no external references held.
12641  */
12642 static int
12643 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
12644                                  struct mlx5_shared_action_rss *shared_rss)
12645 {
12646         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) +
12647                 __flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel);
12648 }
12649
12650 /**
12651  * Setup shared RSS action.
12652  * Prepare set of hash RX queue objects sufficient to handle all valid
12653  * hash_fields combinations (see enum ibv_rx_hash_fields).
12654  *
12655  * @param[in] dev
12656  *   Pointer to the Ethernet device structure.
12657  * @param[in] action_idx
12658  *   Shared RSS action ipool index.
12659  * @param[in, out] action
12660  *   Partially initialized shared RSS action.
12661  * @param[out] error
12662  *   Perform verbose error reporting if not NULL. Initialized in case of
12663  *   error only.
12664  *
12665  * @return
12666  *   0 on success, otherwise negative errno value.
12667  */
12668 static int
12669 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
12670                            uint32_t action_idx,
12671                            struct mlx5_shared_action_rss *shared_rss,
12672                            struct rte_flow_error *error)
12673 {
12674         struct mlx5_flow_rss_desc rss_desc = { 0 };
12675         size_t i;
12676         int err;
12677
12678         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
12679                 return rte_flow_error_set(error, rte_errno,
12680                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12681                                           "cannot setup indirection table");
12682         }
12683         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
12684         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
12685         rss_desc.const_q = shared_rss->origin.queue;
12686         rss_desc.queue_num = shared_rss->origin.queue_num;
12687         /* Set non-zero value to indicate a shared RSS. */
12688         rss_desc.shared_rss = action_idx;
12689         rss_desc.ind_tbl = shared_rss->ind_tbl;
12690         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
12691                 uint32_t hrxq_idx;
12692                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
12693                 int tunnel;
12694
12695                 for (tunnel = 0; tunnel < 2; tunnel++) {
12696                         rss_desc.tunnel = tunnel;
12697                         rss_desc.hash_fields = hash_fields;
12698                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
12699                         if (!hrxq_idx) {
12700                                 rte_flow_error_set
12701                                         (error, rte_errno,
12702                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12703                                          "cannot get hash queue");
12704                                 goto error_hrxq_new;
12705                         }
12706                         err = __flow_dv_action_rss_hrxq_set
12707                                 (shared_rss, hash_fields, tunnel, hrxq_idx);
12708                         MLX5_ASSERT(!err);
12709                 }
12710         }
12711         return 0;
12712 error_hrxq_new:
12713         err = rte_errno;
12714         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12715         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
12716                 shared_rss->ind_tbl = NULL;
12717         rte_errno = err;
12718         return -rte_errno;
12719 }
12720
12721 /**
12722  * Create shared RSS action.
12723  *
12724  * @param[in] dev
12725  *   Pointer to the Ethernet device structure.
12726  * @param[in] conf
12727  *   Shared action configuration.
12728  * @param[in] rss
12729  *   RSS action specification used to create shared action.
12730  * @param[out] error
12731  *   Perform verbose error reporting if not NULL. Initialized in case of
12732  *   error only.
12733  *
12734  * @return
12735  *   A valid shared action ID in case of success, 0 otherwise and
12736  *   rte_errno is set.
12737  */
12738 static uint32_t
12739 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
12740                             const struct rte_flow_shared_action_conf *conf,
12741                             const struct rte_flow_action_rss *rss,
12742                             struct rte_flow_error *error)
12743 {
12744         struct mlx5_priv *priv = dev->data->dev_private;
12745         struct mlx5_shared_action_rss *shared_rss = NULL;
12746         void *queue = NULL;
12747         struct rte_flow_action_rss *origin;
12748         const uint8_t *rss_key;
12749         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
12750         uint32_t idx;
12751
12752         RTE_SET_USED(conf);
12753         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
12754                             0, SOCKET_ID_ANY);
12755         shared_rss = mlx5_ipool_zmalloc
12756                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
12757         if (!shared_rss || !queue) {
12758                 rte_flow_error_set(error, ENOMEM,
12759                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12760                                    "cannot allocate resource memory");
12761                 goto error_rss_init;
12762         }
12763         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
12764                 rte_flow_error_set(error, E2BIG,
12765                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12766                                    "rss action number out of range");
12767                 goto error_rss_init;
12768         }
12769         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
12770                                           sizeof(*shared_rss->ind_tbl),
12771                                           0, SOCKET_ID_ANY);
12772         if (!shared_rss->ind_tbl) {
12773                 rte_flow_error_set(error, ENOMEM,
12774                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12775                                    "cannot allocate resource memory");
12776                 goto error_rss_init;
12777         }
12778         memcpy(queue, rss->queue, queue_size);
12779         shared_rss->ind_tbl->queues = queue;
12780         shared_rss->ind_tbl->queues_n = rss->queue_num;
12781         origin = &shared_rss->origin;
12782         origin->func = rss->func;
12783         origin->level = rss->level;
12784         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
12785         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
12786         /* NULL RSS key indicates default RSS key. */
12787         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12788         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12789         origin->key = &shared_rss->key[0];
12790         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
12791         origin->queue = queue;
12792         origin->queue_num = rss->queue_num;
12793         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
12794                 goto error_rss_init;
12795         rte_spinlock_init(&shared_rss->action_rss_sl);
12796         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12797         rte_spinlock_lock(&priv->shared_act_sl);
12798         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12799                      &priv->rss_shared_actions, idx, shared_rss, next);
12800         rte_spinlock_unlock(&priv->shared_act_sl);
12801         return idx;
12802 error_rss_init:
12803         if (shared_rss) {
12804                 if (shared_rss->ind_tbl)
12805                         mlx5_free(shared_rss->ind_tbl);
12806                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12807                                 idx);
12808         }
12809         if (queue)
12810                 mlx5_free(queue);
12811         return 0;
12812 }
12813
12814 /**
12815  * Destroy the shared RSS action.
12816  * Release related hash RX queue objects.
12817  *
12818  * @param[in] dev
12819  *   Pointer to the Ethernet device structure.
12820  * @param[in] idx
12821  *   The shared RSS action object ID to be removed.
12822  * @param[out] error
12823  *   Perform verbose error reporting if not NULL. Initialized in case of
12824  *   error only.
12825  *
12826  * @return
12827  *   0 on success, otherwise negative errno value.
12828  */
12829 static int
12830 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
12831                              struct rte_flow_error *error)
12832 {
12833         struct mlx5_priv *priv = dev->data->dev_private;
12834         struct mlx5_shared_action_rss *shared_rss =
12835             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12836         uint32_t old_refcnt = 1;
12837         int remaining;
12838         uint16_t *queue = NULL;
12839
12840         if (!shared_rss)
12841                 return rte_flow_error_set(error, EINVAL,
12842                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12843                                           "invalid shared action");
12844         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12845         if (remaining)
12846                 return rte_flow_error_set(error, EBUSY,
12847                                           RTE_FLOW_ERROR_TYPE_ACTION,
12848                                           NULL,
12849                                           "shared rss hrxq has references");
12850         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
12851                                          0, 0, __ATOMIC_ACQUIRE,
12852                                          __ATOMIC_RELAXED))
12853                 return rte_flow_error_set(error, EBUSY,
12854                                           RTE_FLOW_ERROR_TYPE_ACTION,
12855                                           NULL,
12856                                           "shared rss has references");
12857         queue = shared_rss->ind_tbl->queues;
12858         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
12859         if (remaining)
12860                 return rte_flow_error_set(error, EBUSY,
12861                                           RTE_FLOW_ERROR_TYPE_ACTION,
12862                                           NULL,
12863                                           "shared rss indirection table has"
12864                                           " references");
12865         mlx5_free(queue);
12866         rte_spinlock_lock(&priv->shared_act_sl);
12867         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12868                      &priv->rss_shared_actions, idx, shared_rss, next);
12869         rte_spinlock_unlock(&priv->shared_act_sl);
12870         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12871                         idx);
12872         return 0;
12873 }
12874
12875 /**
12876  * Create shared action, lock free,
12877  * (mutex should be acquired by caller).
12878  * Dispatcher for action type specific call.
12879  *
12880  * @param[in] dev
12881  *   Pointer to the Ethernet device structure.
12882  * @param[in] conf
12883  *   Shared action configuration.
12884  * @param[in] action
12885  *   Action specification used to create shared action.
12886  * @param[out] error
12887  *   Perform verbose error reporting if not NULL. Initialized in case of
12888  *   error only.
12889  *
12890  * @return
12891  *   A valid shared action handle in case of success, NULL otherwise and
12892  *   rte_errno is set.
12893  */
12894 static struct rte_flow_shared_action *
12895 flow_dv_action_create(struct rte_eth_dev *dev,
12896                       const struct rte_flow_shared_action_conf *conf,
12897                       const struct rte_flow_action *action,
12898                       struct rte_flow_error *err)
12899 {
12900         uint32_t idx = 0;
12901         uint32_t ret = 0;
12902
12903         switch (action->type) {
12904         case RTE_FLOW_ACTION_TYPE_RSS:
12905                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
12906                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
12907                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12908                 break;
12909         case RTE_FLOW_ACTION_TYPE_AGE:
12910                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
12911                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
12912                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12913                 if (ret) {
12914                         struct mlx5_aso_age_action *aso_age =
12915                                               flow_aso_age_get_by_idx(dev, ret);
12916
12917                         if (!aso_age->age_params.context)
12918                                 aso_age->age_params.context =
12919                                                          (void *)(uintptr_t)idx;
12920                 }
12921                 break;
12922         default:
12923                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12924                                    NULL, "action type not supported");
12925                 break;
12926         }
12927         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
12928 }
12929
12930 /**
12931  * Destroy the shared action.
12932  * Release action related resources on the NIC and the memory.
12933  * Lock free, (mutex should be acquired by caller).
12934  * Dispatcher for action type specific call.
12935  *
12936  * @param[in] dev
12937  *   Pointer to the Ethernet device structure.
12938  * @param[in] action
12939  *   The shared action object to be removed.
12940  * @param[out] error
12941  *   Perform verbose error reporting if not NULL. Initialized in case of
12942  *   error only.
12943  *
12944  * @return
12945  *   0 on success, otherwise negative errno value.
12946  */
12947 static int
12948 flow_dv_action_destroy(struct rte_eth_dev *dev,
12949                        struct rte_flow_shared_action *action,
12950                        struct rte_flow_error *error)
12951 {
12952         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12953         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12954         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12955         int ret;
12956
12957         switch (type) {
12958         case MLX5_SHARED_ACTION_TYPE_RSS:
12959                 return __flow_dv_action_rss_release(dev, idx, error);
12960         case MLX5_SHARED_ACTION_TYPE_AGE:
12961                 ret = flow_dv_aso_age_release(dev, idx);
12962                 if (ret)
12963                         /*
12964                          * In this case, the last flow has a reference will
12965                          * actually release the age action.
12966                          */
12967                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
12968                                 " released with references %d.", idx, ret);
12969                 return 0;
12970         default:
12971                 return rte_flow_error_set(error, ENOTSUP,
12972                                           RTE_FLOW_ERROR_TYPE_ACTION,
12973                                           NULL,
12974                                           "action type not supported");
12975         }
12976 }
12977
12978 /**
12979  * Updates in place shared RSS action configuration.
12980  *
12981  * @param[in] dev
12982  *   Pointer to the Ethernet device structure.
12983  * @param[in] idx
12984  *   The shared RSS action object ID to be updated.
12985  * @param[in] action_conf
12986  *   RSS action specification used to modify *shared_rss*.
12987  * @param[out] error
12988  *   Perform verbose error reporting if not NULL. Initialized in case of
12989  *   error only.
12990  *
12991  * @return
12992  *   0 on success, otherwise negative errno value.
12993  * @note: currently only support update of RSS queues.
12994  */
12995 static int
12996 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
12997                             const struct rte_flow_action_rss *action_conf,
12998                             struct rte_flow_error *error)
12999 {
13000         struct mlx5_priv *priv = dev->data->dev_private;
13001         struct mlx5_shared_action_rss *shared_rss =
13002             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13003         int ret = 0;
13004         void *queue = NULL;
13005         uint16_t *queue_old = NULL;
13006         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
13007
13008         if (!shared_rss)
13009                 return rte_flow_error_set(error, EINVAL,
13010                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13011                                           "invalid shared action to update");
13012         if (priv->obj_ops.ind_table_modify == NULL)
13013                 return rte_flow_error_set(error, ENOTSUP,
13014                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13015                                           "cannot modify indirection table");
13016         queue = mlx5_malloc(MLX5_MEM_ZERO,
13017                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13018                             0, SOCKET_ID_ANY);
13019         if (!queue)
13020                 return rte_flow_error_set(error, ENOMEM,
13021                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13022                                           NULL,
13023                                           "cannot allocate resource memory");
13024         memcpy(queue, action_conf->queue, queue_size);
13025         MLX5_ASSERT(shared_rss->ind_tbl);
13026         rte_spinlock_lock(&shared_rss->action_rss_sl);
13027         queue_old = shared_rss->ind_tbl->queues;
13028         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
13029                                         queue, action_conf->queue_num, true);
13030         if (ret) {
13031                 mlx5_free(queue);
13032                 ret = rte_flow_error_set(error, rte_errno,
13033                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13034                                           "cannot update indirection table");
13035         } else {
13036                 mlx5_free(queue_old);
13037                 shared_rss->origin.queue = queue;
13038                 shared_rss->origin.queue_num = action_conf->queue_num;
13039         }
13040         rte_spinlock_unlock(&shared_rss->action_rss_sl);
13041         return ret;
13042 }
13043
13044 /**
13045  * Updates in place shared action configuration, lock free,
13046  * (mutex should be acquired by caller).
13047  *
13048  * @param[in] dev
13049  *   Pointer to the Ethernet device structure.
13050  * @param[in] action
13051  *   The shared action object to be updated.
13052  * @param[in] action_conf
13053  *   Action specification used to modify *action*.
13054  *   *action_conf* should be of type correlating with type of the *action*,
13055  *   otherwise considered as invalid.
13056  * @param[out] error
13057  *   Perform verbose error reporting if not NULL. Initialized in case of
13058  *   error only.
13059  *
13060  * @return
13061  *   0 on success, otherwise negative errno value.
13062  */
13063 static int
13064 flow_dv_action_update(struct rte_eth_dev *dev,
13065                         struct rte_flow_shared_action *action,
13066                         const void *action_conf,
13067                         struct rte_flow_error *err)
13068 {
13069         uint32_t act_idx = (uint32_t)(uintptr_t)action;
13070         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
13071         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
13072
13073         switch (type) {
13074         case MLX5_SHARED_ACTION_TYPE_RSS:
13075                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
13076         default:
13077                 return rte_flow_error_set(err, ENOTSUP,
13078                                           RTE_FLOW_ERROR_TYPE_ACTION,
13079                                           NULL,
13080                                           "action type update not supported");
13081         }
13082 }
13083
13084 static int
13085 flow_dv_action_query(struct rte_eth_dev *dev,
13086                      const struct rte_flow_shared_action *action, void *data,
13087                      struct rte_flow_error *error)
13088 {
13089         struct mlx5_age_param *age_param;
13090         struct rte_flow_query_age *resp;
13091         uint32_t act_idx = (uint32_t)(uintptr_t)action;
13092         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
13093         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
13094
13095         switch (type) {
13096         case MLX5_SHARED_ACTION_TYPE_AGE:
13097                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
13098                 resp = data;
13099                 resp->aged = __atomic_load_n(&age_param->state,
13100                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
13101                                                                           1 : 0;
13102                 resp->sec_since_last_hit_valid = !resp->aged;
13103                 if (resp->sec_since_last_hit_valid)
13104                         resp->sec_since_last_hit = __atomic_load_n
13105                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13106                 return 0;
13107         default:
13108                 return rte_flow_error_set(error, ENOTSUP,
13109                                           RTE_FLOW_ERROR_TYPE_ACTION,
13110                                           NULL,
13111                                           "action type query not supported");
13112         }
13113 }
13114
13115 /**
13116  * Query a dv flow  rule for its statistics via devx.
13117  *
13118  * @param[in] dev
13119  *   Pointer to Ethernet device.
13120  * @param[in] flow
13121  *   Pointer to the sub flow.
13122  * @param[out] data
13123  *   data retrieved by the query.
13124  * @param[out] error
13125  *   Perform verbose error reporting if not NULL.
13126  *
13127  * @return
13128  *   0 on success, a negative errno value otherwise and rte_errno is set.
13129  */
13130 static int
13131 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
13132                     void *data, struct rte_flow_error *error)
13133 {
13134         struct mlx5_priv *priv = dev->data->dev_private;
13135         struct rte_flow_query_count *qc = data;
13136
13137         if (!priv->config.devx)
13138                 return rte_flow_error_set(error, ENOTSUP,
13139                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13140                                           NULL,
13141                                           "counters are not supported");
13142         if (flow->counter) {
13143                 uint64_t pkts, bytes;
13144                 struct mlx5_flow_counter *cnt;
13145
13146                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
13147                                                  NULL);
13148                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
13149                                                &bytes);
13150
13151                 if (err)
13152                         return rte_flow_error_set(error, -err,
13153                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13154                                         NULL, "cannot read counters");
13155                 qc->hits_set = 1;
13156                 qc->bytes_set = 1;
13157                 qc->hits = pkts - cnt->hits;
13158                 qc->bytes = bytes - cnt->bytes;
13159                 if (qc->reset) {
13160                         cnt->hits = pkts;
13161                         cnt->bytes = bytes;
13162                 }
13163                 return 0;
13164         }
13165         return rte_flow_error_set(error, EINVAL,
13166                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13167                                   NULL,
13168                                   "counters are not available");
13169 }
13170
13171 /**
13172  * Query a flow rule AGE action for aging information.
13173  *
13174  * @param[in] dev
13175  *   Pointer to Ethernet device.
13176  * @param[in] flow
13177  *   Pointer to the sub flow.
13178  * @param[out] data
13179  *   data retrieved by the query.
13180  * @param[out] error
13181  *   Perform verbose error reporting if not NULL.
13182  *
13183  * @return
13184  *   0 on success, a negative errno value otherwise and rte_errno is set.
13185  */
13186 static int
13187 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
13188                   void *data, struct rte_flow_error *error)
13189 {
13190         struct rte_flow_query_age *resp = data;
13191         struct mlx5_age_param *age_param;
13192
13193         if (flow->age) {
13194                 struct mlx5_aso_age_action *act =
13195                                      flow_aso_age_get_by_idx(dev, flow->age);
13196
13197                 age_param = &act->age_params;
13198         } else if (flow->counter) {
13199                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
13200
13201                 if (!age_param || !age_param->timeout)
13202                         return rte_flow_error_set
13203                                         (error, EINVAL,
13204                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13205                                          NULL, "cannot read age data");
13206         } else {
13207                 return rte_flow_error_set(error, EINVAL,
13208                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13209                                           NULL, "age data not available");
13210         }
13211         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
13212                                      AGE_TMOUT ? 1 : 0;
13213         resp->sec_since_last_hit_valid = !resp->aged;
13214         if (resp->sec_since_last_hit_valid)
13215                 resp->sec_since_last_hit = __atomic_load_n
13216                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13217         return 0;
13218 }
13219
13220 /**
13221  * Query a flow.
13222  *
13223  * @see rte_flow_query()
13224  * @see rte_flow_ops
13225  */
13226 static int
13227 flow_dv_query(struct rte_eth_dev *dev,
13228               struct rte_flow *flow __rte_unused,
13229               const struct rte_flow_action *actions __rte_unused,
13230               void *data __rte_unused,
13231               struct rte_flow_error *error __rte_unused)
13232 {
13233         int ret = -EINVAL;
13234
13235         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
13236                 switch (actions->type) {
13237                 case RTE_FLOW_ACTION_TYPE_VOID:
13238                         break;
13239                 case RTE_FLOW_ACTION_TYPE_COUNT:
13240                         ret = flow_dv_query_count(dev, flow, data, error);
13241                         break;
13242                 case RTE_FLOW_ACTION_TYPE_AGE:
13243                         ret = flow_dv_query_age(dev, flow, data, error);
13244                         break;
13245                 default:
13246                         return rte_flow_error_set(error, ENOTSUP,
13247                                                   RTE_FLOW_ERROR_TYPE_ACTION,
13248                                                   actions,
13249                                                   "action not supported");
13250                 }
13251         }
13252         return ret;
13253 }
13254
13255 /**
13256  * Destroy the meter table set.
13257  * Lock free, (mutex should be acquired by caller).
13258  *
13259  * @param[in] dev
13260  *   Pointer to Ethernet device.
13261  * @param[in] tbl
13262  *   Pointer to the meter table set.
13263  *
13264  * @return
13265  *   Always 0.
13266  */
13267 static int
13268 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
13269                         struct mlx5_meter_domains_infos *tbl)
13270 {
13271         struct mlx5_priv *priv = dev->data->dev_private;
13272         struct mlx5_meter_domains_infos *mtd =
13273                                 (struct mlx5_meter_domains_infos *)tbl;
13274
13275         if (!mtd || !priv->config.dv_flow_en)
13276                 return 0;
13277         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
13278                 claim_zero(mlx5_flow_os_destroy_flow
13279                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
13280         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
13281                 claim_zero(mlx5_flow_os_destroy_flow
13282                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
13283         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
13284                 claim_zero(mlx5_flow_os_destroy_flow
13285                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
13286         if (mtd->egress.color_matcher)
13287                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13288                            (mtd->egress.color_matcher));
13289         if (mtd->egress.any_matcher)
13290                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13291                            (mtd->egress.any_matcher));
13292         if (mtd->egress.tbl)
13293                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
13294         if (mtd->egress.sfx_tbl)
13295                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
13296         if (mtd->ingress.color_matcher)
13297                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13298                            (mtd->ingress.color_matcher));
13299         if (mtd->ingress.any_matcher)
13300                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13301                            (mtd->ingress.any_matcher));
13302         if (mtd->ingress.tbl)
13303                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
13304         if (mtd->ingress.sfx_tbl)
13305                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13306                                              mtd->ingress.sfx_tbl);
13307         if (mtd->transfer.color_matcher)
13308                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13309                            (mtd->transfer.color_matcher));
13310         if (mtd->transfer.any_matcher)
13311                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13312                            (mtd->transfer.any_matcher));
13313         if (mtd->transfer.tbl)
13314                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
13315         if (mtd->transfer.sfx_tbl)
13316                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13317                                              mtd->transfer.sfx_tbl);
13318         if (mtd->drop_actn)
13319                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
13320         mlx5_free(mtd);
13321         return 0;
13322 }
13323
13324 /* Number of meter flow actions, count and jump or count and drop. */
13325 #define METER_ACTIONS 2
13326
13327 /**
13328  * Create specify domain meter table and suffix table.
13329  *
13330  * @param[in] dev
13331  *   Pointer to Ethernet device.
13332  * @param[in,out] mtb
13333  *   Pointer to DV meter table set.
13334  * @param[in] egress
13335  *   Table attribute.
13336  * @param[in] transfer
13337  *   Table attribute.
13338  * @param[in] color_reg_c_idx
13339  *   Reg C index for color match.
13340  *
13341  * @return
13342  *   0 on success, -1 otherwise and rte_errno is set.
13343  */
13344 static int
13345 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
13346                            struct mlx5_meter_domains_infos *mtb,
13347                            uint8_t egress, uint8_t transfer,
13348                            uint32_t color_reg_c_idx)
13349 {
13350         struct mlx5_priv *priv = dev->data->dev_private;
13351         struct mlx5_dev_ctx_shared *sh = priv->sh;
13352         struct mlx5_flow_dv_match_params mask = {
13353                 .size = sizeof(mask.buf),
13354         };
13355         struct mlx5_flow_dv_match_params value = {
13356                 .size = sizeof(value.buf),
13357         };
13358         struct mlx5dv_flow_matcher_attr dv_attr = {
13359                 .type = IBV_FLOW_ATTR_NORMAL,
13360                 .priority = 0,
13361                 .match_criteria_enable = 0,
13362                 .match_mask = (void *)&mask,
13363         };
13364         void *actions[METER_ACTIONS];
13365         struct mlx5_meter_domain_info *dtb;
13366         struct rte_flow_error error;
13367         int i = 0;
13368         int ret;
13369
13370         if (transfer)
13371                 dtb = &mtb->transfer;
13372         else if (egress)
13373                 dtb = &mtb->egress;
13374         else
13375                 dtb = &mtb->ingress;
13376         /* Create the meter table with METER level. */
13377         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
13378                                             egress, transfer, false, NULL, 0,
13379                                             0, &error);
13380         if (!dtb->tbl) {
13381                 DRV_LOG(ERR, "Failed to create meter policer table.");
13382                 return -1;
13383         }
13384         /* Create the meter suffix table with SUFFIX level. */
13385         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
13386                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
13387                                             egress, transfer, false, NULL, 0,
13388                                             0, &error);
13389         if (!dtb->sfx_tbl) {
13390                 DRV_LOG(ERR, "Failed to create meter suffix table.");
13391                 return -1;
13392         }
13393         /* Create matchers, Any and Color. */
13394         dv_attr.priority = 3;
13395         dv_attr.match_criteria_enable = 0;
13396         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
13397                                                &dtb->any_matcher);
13398         if (ret) {
13399                 DRV_LOG(ERR, "Failed to create meter"
13400                              " policer default matcher.");
13401                 goto error_exit;
13402         }
13403         dv_attr.priority = 0;
13404         dv_attr.match_criteria_enable =
13405                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
13406         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
13407                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
13408         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
13409                                                &dtb->color_matcher);
13410         if (ret) {
13411                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
13412                 goto error_exit;
13413         }
13414         if (mtb->count_actns[RTE_MTR_DROPPED])
13415                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
13416         actions[i++] = mtb->drop_actn;
13417         /* Default rule: lowest priority, match any, actions: drop. */
13418         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
13419                                        actions,
13420                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
13421         if (ret) {
13422                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
13423                 goto error_exit;
13424         }
13425         return 0;
13426 error_exit:
13427         return -1;
13428 }
13429
13430 /**
13431  * Create the needed meter and suffix tables.
13432  * Lock free, (mutex should be acquired by caller).
13433  *
13434  * @param[in] dev
13435  *   Pointer to Ethernet device.
13436  * @param[in] fm
13437  *   Pointer to the flow meter.
13438  *
13439  * @return
13440  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
13441  */
13442 static struct mlx5_meter_domains_infos *
13443 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
13444                        const struct mlx5_flow_meter *fm)
13445 {
13446         struct mlx5_priv *priv = dev->data->dev_private;
13447         struct mlx5_meter_domains_infos *mtb;
13448         int ret;
13449         int i;
13450
13451         if (!priv->mtr_en) {
13452                 rte_errno = ENOTSUP;
13453                 return NULL;
13454         }
13455         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
13456         if (!mtb) {
13457                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
13458                 return NULL;
13459         }
13460         /* Create meter count actions */
13461         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
13462                 struct mlx5_flow_counter *cnt;
13463                 if (!fm->policer_stats.cnt[i])
13464                         continue;
13465                 cnt = flow_dv_counter_get_by_idx(dev,
13466                       fm->policer_stats.cnt[i], NULL);
13467                 mtb->count_actns[i] = cnt->action;
13468         }
13469         /* Create drop action. */
13470         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
13471         if (ret) {
13472                 DRV_LOG(ERR, "Failed to create drop action.");
13473                 goto error_exit;
13474         }
13475         /* Egress meter table. */
13476         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
13477         if (ret) {
13478                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
13479                 goto error_exit;
13480         }
13481         /* Ingress meter table. */
13482         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
13483         if (ret) {
13484                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
13485                 goto error_exit;
13486         }
13487         /* FDB meter table. */
13488         if (priv->config.dv_esw_en) {
13489                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
13490                                                  priv->mtr_color_reg);
13491                 if (ret) {
13492                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
13493                         goto error_exit;
13494                 }
13495         }
13496         return mtb;
13497 error_exit:
13498         flow_dv_destroy_mtr_tbl(dev, mtb);
13499         return NULL;
13500 }
13501
13502 /**
13503  * Destroy domain policer rule.
13504  *
13505  * @param[in] dt
13506  *   Pointer to domain table.
13507  */
13508 static void
13509 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
13510 {
13511         int i;
13512
13513         for (i = 0; i < RTE_MTR_DROPPED; i++) {
13514                 if (dt->policer_rules[i]) {
13515                         claim_zero(mlx5_flow_os_destroy_flow
13516                                    (dt->policer_rules[i]));
13517                         dt->policer_rules[i] = NULL;
13518                 }
13519         }
13520         if (dt->jump_actn) {
13521                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
13522                 dt->jump_actn = NULL;
13523         }
13524 }
13525
13526 /**
13527  * Destroy policer rules.
13528  *
13529  * @param[in] dev
13530  *   Pointer to Ethernet device.
13531  * @param[in] fm
13532  *   Pointer to flow meter structure.
13533  * @param[in] attr
13534  *   Pointer to flow attributes.
13535  *
13536  * @return
13537  *   Always 0.
13538  */
13539 static int
13540 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
13541                               const struct mlx5_flow_meter *fm,
13542                               const struct rte_flow_attr *attr)
13543 {
13544         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
13545
13546         if (!mtb)
13547                 return 0;
13548         if (attr->egress)
13549                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
13550         if (attr->ingress)
13551                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
13552         if (attr->transfer)
13553                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
13554         return 0;
13555 }
13556
13557 /**
13558  * Create specify domain meter policer rule.
13559  *
13560  * @param[in] fm
13561  *   Pointer to flow meter structure.
13562  * @param[in] mtb
13563  *   Pointer to DV meter table set.
13564  * @param[in] mtr_reg_c
13565  *   Color match REG_C.
13566  *
13567  * @return
13568  *   0 on success, -1 otherwise.
13569  */
13570 static int
13571 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
13572                                     struct mlx5_meter_domain_info *dtb,
13573                                     uint8_t mtr_reg_c)
13574 {
13575         struct mlx5_flow_dv_match_params matcher = {
13576                 .size = sizeof(matcher.buf),
13577         };
13578         struct mlx5_flow_dv_match_params value = {
13579                 .size = sizeof(value.buf),
13580         };
13581         struct mlx5_meter_domains_infos *mtb = fm->mfts;
13582         void *actions[METER_ACTIONS];
13583         int i;
13584         int ret = 0;
13585
13586         /* Create jump action. */
13587         if (!dtb->jump_actn)
13588                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
13589                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
13590         if (ret) {
13591                 DRV_LOG(ERR, "Failed to create policer jump action.");
13592                 goto error;
13593         }
13594         for (i = 0; i < RTE_MTR_DROPPED; i++) {
13595                 int j = 0;
13596
13597                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
13598                                        rte_col_2_mlx5_col(i), UINT8_MAX);
13599                 if (mtb->count_actns[i])
13600                         actions[j++] = mtb->count_actns[i];
13601                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
13602                         actions[j++] = mtb->drop_actn;
13603                 else
13604                         actions[j++] = dtb->jump_actn;
13605                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
13606                                                (void *)&value, j, actions,
13607                                                &dtb->policer_rules[i]);
13608                 if (ret) {
13609                         DRV_LOG(ERR, "Failed to create policer rule.");
13610                         goto error;
13611                 }
13612         }
13613         return 0;
13614 error:
13615         rte_errno = errno;
13616         return -1;
13617 }
13618
13619 /**
13620  * Create policer rules.
13621  *
13622  * @param[in] dev
13623  *   Pointer to Ethernet device.
13624  * @param[in] fm
13625  *   Pointer to flow meter structure.
13626  * @param[in] attr
13627  *   Pointer to flow attributes.
13628  *
13629  * @return
13630  *   0 on success, -1 otherwise.
13631  */
13632 static int
13633 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
13634                              struct mlx5_flow_meter *fm,
13635                              const struct rte_flow_attr *attr)
13636 {
13637         struct mlx5_priv *priv = dev->data->dev_private;
13638         struct mlx5_meter_domains_infos *mtb = fm->mfts;
13639         int ret;
13640
13641         if (attr->egress) {
13642                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
13643                                                 priv->mtr_color_reg);
13644                 if (ret) {
13645                         DRV_LOG(ERR, "Failed to create egress policer.");
13646                         goto error;
13647                 }
13648         }
13649         if (attr->ingress) {
13650                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
13651                                                 priv->mtr_color_reg);
13652                 if (ret) {
13653                         DRV_LOG(ERR, "Failed to create ingress policer.");
13654                         goto error;
13655                 }
13656         }
13657         if (attr->transfer) {
13658                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
13659                                                 priv->mtr_color_reg);
13660                 if (ret) {
13661                         DRV_LOG(ERR, "Failed to create transfer policer.");
13662                         goto error;
13663                 }
13664         }
13665         return 0;
13666 error:
13667         flow_dv_destroy_policer_rules(dev, fm, attr);
13668         return -1;
13669 }
13670
13671 /**
13672  * Validate the batch counter support in root table.
13673  *
13674  * Create a simple flow with invalid counter and drop action on root table to
13675  * validate if batch counter with offset on root table is supported or not.
13676  *
13677  * @param[in] dev
13678  *   Pointer to rte_eth_dev structure.
13679  *
13680  * @return
13681  *   0 on success, a negative errno value otherwise and rte_errno is set.
13682  */
13683 int
13684 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
13685 {
13686         struct mlx5_priv *priv = dev->data->dev_private;
13687         struct mlx5_dev_ctx_shared *sh = priv->sh;
13688         struct mlx5_flow_dv_match_params mask = {
13689                 .size = sizeof(mask.buf),
13690         };
13691         struct mlx5_flow_dv_match_params value = {
13692                 .size = sizeof(value.buf),
13693         };
13694         struct mlx5dv_flow_matcher_attr dv_attr = {
13695                 .type = IBV_FLOW_ATTR_NORMAL,
13696                 .priority = 0,
13697                 .match_criteria_enable = 0,
13698                 .match_mask = (void *)&mask,
13699         };
13700         void *actions[2] = { 0 };
13701         struct mlx5_flow_tbl_resource *tbl = NULL;
13702         struct mlx5_devx_obj *dcs = NULL;
13703         void *matcher = NULL;
13704         void *flow = NULL;
13705         int ret = -1;
13706
13707         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
13708         if (!tbl)
13709                 goto err;
13710         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
13711         if (!dcs)
13712                 goto err;
13713         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
13714                                                     &actions[0]);
13715         if (ret)
13716                 goto err;
13717         actions[1] = priv->drop_queue.hrxq->action;
13718         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
13719         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
13720                                                &matcher);
13721         if (ret)
13722                 goto err;
13723         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
13724                                        actions, &flow);
13725 err:
13726         /*
13727          * If batch counter with offset is not supported, the driver will not
13728          * validate the invalid offset value, flow create should success.
13729          * In this case, it means batch counter is not supported in root table.
13730          *
13731          * Otherwise, if flow create is failed, counter offset is supported.
13732          */
13733         if (flow) {
13734                 DRV_LOG(INFO, "Batch counter is not supported in root "
13735                               "table. Switch to fallback mode.");
13736                 rte_errno = ENOTSUP;
13737                 ret = -rte_errno;
13738                 claim_zero(mlx5_flow_os_destroy_flow(flow));
13739         } else {
13740                 /* Check matcher to make sure validate fail at flow create. */
13741                 if (!matcher || (matcher && errno != EINVAL))
13742                         DRV_LOG(ERR, "Unexpected error in counter offset "
13743                                      "support detection");
13744                 ret = 0;
13745         }
13746         if (actions[0])
13747                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
13748         if (matcher)
13749                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
13750         if (tbl)
13751                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13752         if (dcs)
13753                 claim_zero(mlx5_devx_cmd_destroy(dcs));
13754         return ret;
13755 }
13756
13757 /**
13758  * Query a devx counter.
13759  *
13760  * @param[in] dev
13761  *   Pointer to the Ethernet device structure.
13762  * @param[in] cnt
13763  *   Index to the flow counter.
13764  * @param[in] clear
13765  *   Set to clear the counter statistics.
13766  * @param[out] pkts
13767  *   The statistics value of packets.
13768  * @param[out] bytes
13769  *   The statistics value of bytes.
13770  *
13771  * @return
13772  *   0 on success, otherwise return -1.
13773  */
13774 static int
13775 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
13776                       uint64_t *pkts, uint64_t *bytes)
13777 {
13778         struct mlx5_priv *priv = dev->data->dev_private;
13779         struct mlx5_flow_counter *cnt;
13780         uint64_t inn_pkts, inn_bytes;
13781         int ret;
13782
13783         if (!priv->config.devx)
13784                 return -1;
13785
13786         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
13787         if (ret)
13788                 return -1;
13789         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
13790         *pkts = inn_pkts - cnt->hits;
13791         *bytes = inn_bytes - cnt->bytes;
13792         if (clear) {
13793                 cnt->hits = inn_pkts;
13794                 cnt->bytes = inn_bytes;
13795         }
13796         return 0;
13797 }
13798
13799 /**
13800  * Get aged-out flows.
13801  *
13802  * @param[in] dev
13803  *   Pointer to the Ethernet device structure.
13804  * @param[in] context
13805  *   The address of an array of pointers to the aged-out flows contexts.
13806  * @param[in] nb_contexts
13807  *   The length of context array pointers.
13808  * @param[out] error
13809  *   Perform verbose error reporting if not NULL. Initialized in case of
13810  *   error only.
13811  *
13812  * @return
13813  *   how many contexts get in success, otherwise negative errno value.
13814  *   if nb_contexts is 0, return the amount of all aged contexts.
13815  *   if nb_contexts is not 0 , return the amount of aged flows reported
13816  *   in the context array.
13817  * @note: only stub for now
13818  */
13819 static int
13820 flow_get_aged_flows(struct rte_eth_dev *dev,
13821                     void **context,
13822                     uint32_t nb_contexts,
13823                     struct rte_flow_error *error)
13824 {
13825         struct mlx5_priv *priv = dev->data->dev_private;
13826         struct mlx5_age_info *age_info;
13827         struct mlx5_age_param *age_param;
13828         struct mlx5_flow_counter *counter;
13829         struct mlx5_aso_age_action *act;
13830         int nb_flows = 0;
13831
13832         if (nb_contexts && !context)
13833                 return rte_flow_error_set(error, EINVAL,
13834                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13835                                           NULL, "empty context");
13836         age_info = GET_PORT_AGE_INFO(priv);
13837         rte_spinlock_lock(&age_info->aged_sl);
13838         LIST_FOREACH(act, &age_info->aged_aso, next) {
13839                 nb_flows++;
13840                 if (nb_contexts) {
13841                         context[nb_flows - 1] =
13842                                                 act->age_params.context;
13843                         if (!(--nb_contexts))
13844                                 break;
13845                 }
13846         }
13847         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
13848                 nb_flows++;
13849                 if (nb_contexts) {
13850                         age_param = MLX5_CNT_TO_AGE(counter);
13851                         context[nb_flows - 1] = age_param->context;
13852                         if (!(--nb_contexts))
13853                                 break;
13854                 }
13855         }
13856         rte_spinlock_unlock(&age_info->aged_sl);
13857         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13858         return nb_flows;
13859 }
13860
13861 /*
13862  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
13863  */
13864 static uint32_t
13865 flow_dv_counter_allocate(struct rte_eth_dev *dev)
13866 {
13867         return flow_dv_counter_alloc(dev, 0);
13868 }
13869
13870 /**
13871  * Validate shared action.
13872  * Dispatcher for action type specific validation.
13873  *
13874  * @param[in] dev
13875  *   Pointer to the Ethernet device structure.
13876  * @param[in] conf
13877  *   Shared action configuration.
13878  * @param[in] action
13879  *   The shared action object to validate.
13880  * @param[out] error
13881  *   Perform verbose error reporting if not NULL. Initialized in case of
13882  *   error only.
13883  *
13884  * @return
13885  *   0 on success, otherwise negative errno value.
13886  */
13887 static int
13888 flow_dv_action_validate(struct rte_eth_dev *dev,
13889                         const struct rte_flow_shared_action_conf *conf,
13890                         const struct rte_flow_action *action,
13891                         struct rte_flow_error *err)
13892 {
13893         struct mlx5_priv *priv = dev->data->dev_private;
13894
13895         RTE_SET_USED(conf);
13896         switch (action->type) {
13897         case RTE_FLOW_ACTION_TYPE_RSS:
13898                 /*
13899                  * priv->obj_ops is set according to driver capabilities.
13900                  * When DevX capabilities are
13901                  * sufficient, it is set to devx_obj_ops.
13902                  * Otherwise, it is set to ibv_obj_ops.
13903                  * ibv_obj_ops doesn't support ind_table_modify operation.
13904                  * In this case the shared RSS action can't be used.
13905                  */
13906                 if (priv->obj_ops.ind_table_modify == NULL)
13907                         return rte_flow_error_set
13908                                         (err, ENOTSUP,
13909                                          RTE_FLOW_ERROR_TYPE_ACTION,
13910                                          NULL,
13911                                          "shared RSS action not supported");
13912                 return mlx5_validate_action_rss(dev, action, err);
13913         case RTE_FLOW_ACTION_TYPE_AGE:
13914                 if (!priv->sh->aso_age_mng)
13915                         return rte_flow_error_set(err, ENOTSUP,
13916                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13917                                                 NULL,
13918                                              "shared age action not supported");
13919                 return flow_dv_validate_action_age(0, action, dev, err);
13920         default:
13921                 return rte_flow_error_set(err, ENOTSUP,
13922                                           RTE_FLOW_ERROR_TYPE_ACTION,
13923                                           NULL,
13924                                           "action type not supported");
13925         }
13926 }
13927
13928 static int
13929 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
13930 {
13931         struct mlx5_priv *priv = dev->data->dev_private;
13932         int ret = 0;
13933
13934         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
13935                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
13936                                                 flags);
13937                 if (ret != 0)
13938                         return ret;
13939         }
13940         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
13941                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
13942                 if (ret != 0)
13943                         return ret;
13944         }
13945         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
13946                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
13947                 if (ret != 0)
13948                         return ret;
13949         }
13950         return 0;
13951 }
13952
13953 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
13954         .validate = flow_dv_validate,
13955         .prepare = flow_dv_prepare,
13956         .translate = flow_dv_translate,
13957         .apply = flow_dv_apply,
13958         .remove = flow_dv_remove,
13959         .destroy = flow_dv_destroy,
13960         .query = flow_dv_query,
13961         .create_mtr_tbls = flow_dv_create_mtr_tbl,
13962         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
13963         .create_policer_rules = flow_dv_create_policer_rules,
13964         .destroy_policer_rules = flow_dv_destroy_policer_rules,
13965         .counter_alloc = flow_dv_counter_allocate,
13966         .counter_free = flow_dv_counter_free,
13967         .counter_query = flow_dv_counter_query,
13968         .get_aged_flows = flow_get_aged_flows,
13969         .action_validate = flow_dv_action_validate,
13970         .action_create = flow_dv_action_create,
13971         .action_destroy = flow_dv_action_destroy,
13972         .action_update = flow_dv_action_update,
13973         .action_query = flow_dv_action_query,
13974         .sync_domain = flow_dv_sync_domain,
13975 };
13976
13977 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
13978