net/mlx5: fix counter and age flow action validation
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                      uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86
87 static int
88 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
89                                   uint32_t rix_jump);
90
91 /**
92  * Initialize flow attributes structure according to flow items' types.
93  *
94  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
95  * mode. For tunnel mode, the items to be modified are the outermost ones.
96  *
97  * @param[in] item
98  *   Pointer to item specification.
99  * @param[out] attr
100  *   Pointer to flow attributes structure.
101  * @param[in] dev_flow
102  *   Pointer to the sub flow.
103  * @param[in] tunnel_decap
104  *   Whether action is after tunnel decapsulation.
105  */
106 static void
107 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
108                   struct mlx5_flow *dev_flow, bool tunnel_decap)
109 {
110         uint64_t layers = dev_flow->handle->layers;
111
112         /*
113          * If layers is already initialized, it means this dev_flow is the
114          * suffix flow, the layers flags is set by the prefix flow. Need to
115          * use the layer flags from prefix flow as the suffix flow may not
116          * have the user defined items as the flow is split.
117          */
118         if (layers) {
119                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
120                         attr->ipv4 = 1;
121                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
122                         attr->ipv6 = 1;
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
124                         attr->tcp = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
126                         attr->udp = 1;
127                 attr->valid = 1;
128                 return;
129         }
130         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
131                 uint8_t next_protocol = 0xff;
132                 switch (item->type) {
133                 case RTE_FLOW_ITEM_TYPE_GRE:
134                 case RTE_FLOW_ITEM_TYPE_NVGRE:
135                 case RTE_FLOW_ITEM_TYPE_VXLAN:
136                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
137                 case RTE_FLOW_ITEM_TYPE_GENEVE:
138                 case RTE_FLOW_ITEM_TYPE_MPLS:
139                         if (tunnel_decap)
140                                 attr->attr = 0;
141                         break;
142                 case RTE_FLOW_ITEM_TYPE_IPV4:
143                         if (!attr->ipv6)
144                                 attr->ipv4 = 1;
145                         if (item->mask != NULL &&
146                             ((const struct rte_flow_item_ipv4 *)
147                             item->mask)->hdr.next_proto_id)
148                                 next_protocol =
149                                     ((const struct rte_flow_item_ipv4 *)
150                                       (item->spec))->hdr.next_proto_id &
151                                     ((const struct rte_flow_item_ipv4 *)
152                                       (item->mask))->hdr.next_proto_id;
153                         if ((next_protocol == IPPROTO_IPIP ||
154                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
155                                 attr->attr = 0;
156                         break;
157                 case RTE_FLOW_ITEM_TYPE_IPV6:
158                         if (!attr->ipv4)
159                                 attr->ipv6 = 1;
160                         if (item->mask != NULL &&
161                             ((const struct rte_flow_item_ipv6 *)
162                             item->mask)->hdr.proto)
163                                 next_protocol =
164                                     ((const struct rte_flow_item_ipv6 *)
165                                       (item->spec))->hdr.proto &
166                                     ((const struct rte_flow_item_ipv6 *)
167                                       (item->mask))->hdr.proto;
168                         if ((next_protocol == IPPROTO_IPIP ||
169                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
170                                 attr->attr = 0;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_UDP:
173                         if (!attr->tcp)
174                                 attr->udp = 1;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_TCP:
177                         if (!attr->udp)
178                                 attr->tcp = 1;
179                         break;
180                 default:
181                         break;
182                 }
183         }
184         attr->valid = 1;
185 }
186
187 /**
188  * Convert rte_mtr_color to mlx5 color.
189  *
190  * @param[in] rcol
191  *   rte_mtr_color.
192  *
193  * @return
194  *   mlx5 color.
195  */
196 static int
197 rte_col_2_mlx5_col(enum rte_color rcol)
198 {
199         switch (rcol) {
200         case RTE_COLOR_GREEN:
201                 return MLX5_FLOW_COLOR_GREEN;
202         case RTE_COLOR_YELLOW:
203                 return MLX5_FLOW_COLOR_YELLOW;
204         case RTE_COLOR_RED:
205                 return MLX5_FLOW_COLOR_RED;
206         default:
207                 break;
208         }
209         return MLX5_FLOW_COLOR_UNDEFINED;
210 }
211
212 struct field_modify_info {
213         uint32_t size; /* Size of field in protocol header, in bytes. */
214         uint32_t offset; /* Offset of field in protocol header, in bytes. */
215         enum mlx5_modification_field id;
216 };
217
218 struct field_modify_info modify_eth[] = {
219         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
220         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
221         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
222         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_vlan_out_first_vid[] = {
227         /* Size in bits !!! */
228         {12, 0, MLX5_MODI_OUT_FIRST_VID},
229         {0, 0, 0},
230 };
231
232 struct field_modify_info modify_ipv4[] = {
233         {1,  1, MLX5_MODI_OUT_IP_DSCP},
234         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
235         {4, 12, MLX5_MODI_OUT_SIPV4},
236         {4, 16, MLX5_MODI_OUT_DIPV4},
237         {0, 0, 0},
238 };
239
240 struct field_modify_info modify_ipv6[] = {
241         {1,  0, MLX5_MODI_OUT_IP_DSCP},
242         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
243         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
244         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
245         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
246         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
247         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
248         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
249         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
250         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_udp[] = {
255         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
256         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
257         {0, 0, 0},
258 };
259
260 struct field_modify_info modify_tcp[] = {
261         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
262         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
263         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
264         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
265         {0, 0, 0},
266 };
267
268 static void
269 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
270                           uint8_t next_protocol, uint64_t *item_flags,
271                           int *tunnel)
272 {
273         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
274                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
275         if (next_protocol == IPPROTO_IPIP) {
276                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
277                 *tunnel = 1;
278         }
279         if (next_protocol == IPPROTO_IPV6) {
280                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
281                 *tunnel = 1;
282         }
283 }
284
285 /* Update VLAN's VID/PCP based on input rte_flow_action.
286  *
287  * @param[in] action
288  *   Pointer to struct rte_flow_action.
289  * @param[out] vlan
290  *   Pointer to struct rte_vlan_hdr.
291  */
292 static void
293 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
294                          struct rte_vlan_hdr *vlan)
295 {
296         uint16_t vlan_tci;
297         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
298                 vlan_tci =
299                     ((const struct rte_flow_action_of_set_vlan_pcp *)
300                                                action->conf)->vlan_pcp;
301                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
302                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
303                 vlan->vlan_tci |= vlan_tci;
304         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
306                 vlan->vlan_tci |= rte_be_to_cpu_16
307                     (((const struct rte_flow_action_of_set_vlan_vid *)
308                                              action->conf)->vlan_vid);
309         }
310 }
311
312 /**
313  * Fetch 1, 2, 3 or 4 byte field from the byte array
314  * and return as unsigned integer in host-endian format.
315  *
316  * @param[in] data
317  *   Pointer to data array.
318  * @param[in] size
319  *   Size of field to extract.
320  *
321  * @return
322  *   converted field in host endian format.
323  */
324 static inline uint32_t
325 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
326 {
327         uint32_t ret;
328
329         switch (size) {
330         case 1:
331                 ret = *data;
332                 break;
333         case 2:
334                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
335                 break;
336         case 3:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 ret = (ret << 8) | *(data + sizeof(uint16_t));
339                 break;
340         case 4:
341                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
342                 break;
343         default:
344                 MLX5_ASSERT(false);
345                 ret = 0;
346                 break;
347         }
348         return ret;
349 }
350
351 /**
352  * Convert modify-header action to DV specification.
353  *
354  * Data length of each action is determined by provided field description
355  * and the item mask. Data bit offset and width of each action is determined
356  * by provided item mask.
357  *
358  * @param[in] item
359  *   Pointer to item specification.
360  * @param[in] field
361  *   Pointer to field modification information.
362  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
363  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
364  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
365  * @param[in] dcopy
366  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
367  *   Negative offset value sets the same offset as source offset.
368  *   size field is ignored, value is taken from source field.
369  * @param[in,out] resource
370  *   Pointer to the modify-header resource.
371  * @param[in] type
372  *   Type of modification.
373  * @param[out] error
374  *   Pointer to the error structure.
375  *
376  * @return
377  *   0 on success, a negative errno value otherwise and rte_errno is set.
378  */
379 static int
380 flow_dv_convert_modify_action(struct rte_flow_item *item,
381                               struct field_modify_info *field,
382                               struct field_modify_info *dcopy,
383                               struct mlx5_flow_dv_modify_hdr_resource *resource,
384                               uint32_t type, struct rte_flow_error *error)
385 {
386         uint32_t i = resource->actions_num;
387         struct mlx5_modification_cmd *actions = resource->actions;
388
389         /*
390          * The item and mask are provided in big-endian format.
391          * The fields should be presented as in big-endian format either.
392          * Mask must be always present, it defines the actual field width.
393          */
394         MLX5_ASSERT(item->mask);
395         MLX5_ASSERT(field->size);
396         do {
397                 unsigned int size_b;
398                 unsigned int off_b;
399                 uint32_t mask;
400                 uint32_t data;
401
402                 if (i >= MLX5_MAX_MODIFY_NUM)
403                         return rte_flow_error_set(error, EINVAL,
404                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
405                                  "too many items to modify");
406                 /* Fetch variable byte size mask from the array. */
407                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
408                                            field->offset, field->size);
409                 if (!mask) {
410                         ++field;
411                         continue;
412                 }
413                 /* Deduce actual data width in bits from mask value. */
414                 off_b = rte_bsf32(mask);
415                 size_b = sizeof(uint32_t) * CHAR_BIT -
416                          off_b - __builtin_clz(mask);
417                 MLX5_ASSERT(size_b);
418                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
419                 actions[i] = (struct mlx5_modification_cmd) {
420                         .action_type = type,
421                         .field = field->id,
422                         .offset = off_b,
423                         .length = size_b,
424                 };
425                 /* Convert entire record to expected big-endian format. */
426                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
427                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
428                         MLX5_ASSERT(dcopy);
429                         actions[i].dst_field = dcopy->id;
430                         actions[i].dst_offset =
431                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
432                         /* Convert entire record to big-endian format. */
433                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
434                         ++dcopy;
435                 } else {
436                         MLX5_ASSERT(item->spec);
437                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
438                                                    field->offset, field->size);
439                         /* Shift out the trailing masked bits from data. */
440                         data = (data & mask) >> off_b;
441                         actions[i].data1 = rte_cpu_to_be_32(data);
442                 }
443                 ++i;
444                 ++field;
445         } while (field->size);
446         if (resource->actions_num == i)
447                 return rte_flow_error_set(error, EINVAL,
448                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
449                                           "invalid modification flow item");
450         resource->actions_num = i;
451         return 0;
452 }
453
454 /**
455  * Convert modify-header set IPv4 address action to DV specification.
456  *
457  * @param[in,out] resource
458  *   Pointer to the modify-header resource.
459  * @param[in] action
460  *   Pointer to action specification.
461  * @param[out] error
462  *   Pointer to the error structure.
463  *
464  * @return
465  *   0 on success, a negative errno value otherwise and rte_errno is set.
466  */
467 static int
468 flow_dv_convert_action_modify_ipv4
469                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
470                          const struct rte_flow_action *action,
471                          struct rte_flow_error *error)
472 {
473         const struct rte_flow_action_set_ipv4 *conf =
474                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
475         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
476         struct rte_flow_item_ipv4 ipv4;
477         struct rte_flow_item_ipv4 ipv4_mask;
478
479         memset(&ipv4, 0, sizeof(ipv4));
480         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
481         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
482                 ipv4.hdr.src_addr = conf->ipv4_addr;
483                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
484         } else {
485                 ipv4.hdr.dst_addr = conf->ipv4_addr;
486                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
487         }
488         item.spec = &ipv4;
489         item.mask = &ipv4_mask;
490         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
491                                              MLX5_MODIFICATION_TYPE_SET, error);
492 }
493
494 /**
495  * Convert modify-header set IPv6 address action to DV specification.
496  *
497  * @param[in,out] resource
498  *   Pointer to the modify-header resource.
499  * @param[in] action
500  *   Pointer to action specification.
501  * @param[out] error
502  *   Pointer to the error structure.
503  *
504  * @return
505  *   0 on success, a negative errno value otherwise and rte_errno is set.
506  */
507 static int
508 flow_dv_convert_action_modify_ipv6
509                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
510                          const struct rte_flow_action *action,
511                          struct rte_flow_error *error)
512 {
513         const struct rte_flow_action_set_ipv6 *conf =
514                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
515         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
516         struct rte_flow_item_ipv6 ipv6;
517         struct rte_flow_item_ipv6 ipv6_mask;
518
519         memset(&ipv6, 0, sizeof(ipv6));
520         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
521         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
522                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
523                        sizeof(ipv6.hdr.src_addr));
524                 memcpy(&ipv6_mask.hdr.src_addr,
525                        &rte_flow_item_ipv6_mask.hdr.src_addr,
526                        sizeof(ipv6.hdr.src_addr));
527         } else {
528                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
529                        sizeof(ipv6.hdr.dst_addr));
530                 memcpy(&ipv6_mask.hdr.dst_addr,
531                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
532                        sizeof(ipv6.hdr.dst_addr));
533         }
534         item.spec = &ipv6;
535         item.mask = &ipv6_mask;
536         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
537                                              MLX5_MODIFICATION_TYPE_SET, error);
538 }
539
540 /**
541  * Convert modify-header set MAC address action to DV specification.
542  *
543  * @param[in,out] resource
544  *   Pointer to the modify-header resource.
545  * @param[in] action
546  *   Pointer to action specification.
547  * @param[out] error
548  *   Pointer to the error structure.
549  *
550  * @return
551  *   0 on success, a negative errno value otherwise and rte_errno is set.
552  */
553 static int
554 flow_dv_convert_action_modify_mac
555                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
556                          const struct rte_flow_action *action,
557                          struct rte_flow_error *error)
558 {
559         const struct rte_flow_action_set_mac *conf =
560                 (const struct rte_flow_action_set_mac *)(action->conf);
561         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
562         struct rte_flow_item_eth eth;
563         struct rte_flow_item_eth eth_mask;
564
565         memset(&eth, 0, sizeof(eth));
566         memset(&eth_mask, 0, sizeof(eth_mask));
567         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
568                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
569                        sizeof(eth.src.addr_bytes));
570                 memcpy(&eth_mask.src.addr_bytes,
571                        &rte_flow_item_eth_mask.src.addr_bytes,
572                        sizeof(eth_mask.src.addr_bytes));
573         } else {
574                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
575                        sizeof(eth.dst.addr_bytes));
576                 memcpy(&eth_mask.dst.addr_bytes,
577                        &rte_flow_item_eth_mask.dst.addr_bytes,
578                        sizeof(eth_mask.dst.addr_bytes));
579         }
580         item.spec = &eth;
581         item.mask = &eth_mask;
582         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
583                                              MLX5_MODIFICATION_TYPE_SET, error);
584 }
585
586 /**
587  * Convert modify-header set VLAN VID action to DV specification.
588  *
589  * @param[in,out] resource
590  *   Pointer to the modify-header resource.
591  * @param[in] action
592  *   Pointer to action specification.
593  * @param[out] error
594  *   Pointer to the error structure.
595  *
596  * @return
597  *   0 on success, a negative errno value otherwise and rte_errno is set.
598  */
599 static int
600 flow_dv_convert_action_modify_vlan_vid
601                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
602                          const struct rte_flow_action *action,
603                          struct rte_flow_error *error)
604 {
605         const struct rte_flow_action_of_set_vlan_vid *conf =
606                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
607         int i = resource->actions_num;
608         struct mlx5_modification_cmd *actions = resource->actions;
609         struct field_modify_info *field = modify_vlan_out_first_vid;
610
611         if (i >= MLX5_MAX_MODIFY_NUM)
612                 return rte_flow_error_set(error, EINVAL,
613                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
614                          "too many items to modify");
615         actions[i] = (struct mlx5_modification_cmd) {
616                 .action_type = MLX5_MODIFICATION_TYPE_SET,
617                 .field = field->id,
618                 .length = field->size,
619                 .offset = field->offset,
620         };
621         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
622         actions[i].data1 = conf->vlan_vid;
623         actions[i].data1 = actions[i].data1 << 16;
624         resource->actions_num = ++i;
625         return 0;
626 }
627
628 /**
629  * Convert modify-header set TP action to DV specification.
630  *
631  * @param[in,out] resource
632  *   Pointer to the modify-header resource.
633  * @param[in] action
634  *   Pointer to action specification.
635  * @param[in] items
636  *   Pointer to rte_flow_item objects list.
637  * @param[in] attr
638  *   Pointer to flow attributes structure.
639  * @param[in] dev_flow
640  *   Pointer to the sub flow.
641  * @param[in] tunnel_decap
642  *   Whether action is after tunnel decapsulation.
643  * @param[out] error
644  *   Pointer to the error structure.
645  *
646  * @return
647  *   0 on success, a negative errno value otherwise and rte_errno is set.
648  */
649 static int
650 flow_dv_convert_action_modify_tp
651                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
652                          const struct rte_flow_action *action,
653                          const struct rte_flow_item *items,
654                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
655                          bool tunnel_decap, struct rte_flow_error *error)
656 {
657         const struct rte_flow_action_set_tp *conf =
658                 (const struct rte_flow_action_set_tp *)(action->conf);
659         struct rte_flow_item item;
660         struct rte_flow_item_udp udp;
661         struct rte_flow_item_udp udp_mask;
662         struct rte_flow_item_tcp tcp;
663         struct rte_flow_item_tcp tcp_mask;
664         struct field_modify_info *field;
665
666         if (!attr->valid)
667                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
668         if (attr->udp) {
669                 memset(&udp, 0, sizeof(udp));
670                 memset(&udp_mask, 0, sizeof(udp_mask));
671                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
672                         udp.hdr.src_port = conf->port;
673                         udp_mask.hdr.src_port =
674                                         rte_flow_item_udp_mask.hdr.src_port;
675                 } else {
676                         udp.hdr.dst_port = conf->port;
677                         udp_mask.hdr.dst_port =
678                                         rte_flow_item_udp_mask.hdr.dst_port;
679                 }
680                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
681                 item.spec = &udp;
682                 item.mask = &udp_mask;
683                 field = modify_udp;
684         } else {
685                 MLX5_ASSERT(attr->tcp);
686                 memset(&tcp, 0, sizeof(tcp));
687                 memset(&tcp_mask, 0, sizeof(tcp_mask));
688                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
689                         tcp.hdr.src_port = conf->port;
690                         tcp_mask.hdr.src_port =
691                                         rte_flow_item_tcp_mask.hdr.src_port;
692                 } else {
693                         tcp.hdr.dst_port = conf->port;
694                         tcp_mask.hdr.dst_port =
695                                         rte_flow_item_tcp_mask.hdr.dst_port;
696                 }
697                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
698                 item.spec = &tcp;
699                 item.mask = &tcp_mask;
700                 field = modify_tcp;
701         }
702         return flow_dv_convert_modify_action(&item, field, NULL, resource,
703                                              MLX5_MODIFICATION_TYPE_SET, error);
704 }
705
706 /**
707  * Convert modify-header set TTL action to DV specification.
708  *
709  * @param[in,out] resource
710  *   Pointer to the modify-header resource.
711  * @param[in] action
712  *   Pointer to action specification.
713  * @param[in] items
714  *   Pointer to rte_flow_item objects list.
715  * @param[in] attr
716  *   Pointer to flow attributes structure.
717  * @param[in] dev_flow
718  *   Pointer to the sub flow.
719  * @param[in] tunnel_decap
720  *   Whether action is after tunnel decapsulation.
721  * @param[out] error
722  *   Pointer to the error structure.
723  *
724  * @return
725  *   0 on success, a negative errno value otherwise and rte_errno is set.
726  */
727 static int
728 flow_dv_convert_action_modify_ttl
729                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
730                          const struct rte_flow_action *action,
731                          const struct rte_flow_item *items,
732                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
733                          bool tunnel_decap, struct rte_flow_error *error)
734 {
735         const struct rte_flow_action_set_ttl *conf =
736                 (const struct rte_flow_action_set_ttl *)(action->conf);
737         struct rte_flow_item item;
738         struct rte_flow_item_ipv4 ipv4;
739         struct rte_flow_item_ipv4 ipv4_mask;
740         struct rte_flow_item_ipv6 ipv6;
741         struct rte_flow_item_ipv6 ipv6_mask;
742         struct field_modify_info *field;
743
744         if (!attr->valid)
745                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
746         if (attr->ipv4) {
747                 memset(&ipv4, 0, sizeof(ipv4));
748                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
749                 ipv4.hdr.time_to_live = conf->ttl_value;
750                 ipv4_mask.hdr.time_to_live = 0xFF;
751                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
752                 item.spec = &ipv4;
753                 item.mask = &ipv4_mask;
754                 field = modify_ipv4;
755         } else {
756                 MLX5_ASSERT(attr->ipv6);
757                 memset(&ipv6, 0, sizeof(ipv6));
758                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
759                 ipv6.hdr.hop_limits = conf->ttl_value;
760                 ipv6_mask.hdr.hop_limits = 0xFF;
761                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
762                 item.spec = &ipv6;
763                 item.mask = &ipv6_mask;
764                 field = modify_ipv6;
765         }
766         return flow_dv_convert_modify_action(&item, field, NULL, resource,
767                                              MLX5_MODIFICATION_TYPE_SET, error);
768 }
769
770 /**
771  * Convert modify-header decrement TTL action to DV specification.
772  *
773  * @param[in,out] resource
774  *   Pointer to the modify-header resource.
775  * @param[in] action
776  *   Pointer to action specification.
777  * @param[in] items
778  *   Pointer to rte_flow_item objects list.
779  * @param[in] attr
780  *   Pointer to flow attributes structure.
781  * @param[in] dev_flow
782  *   Pointer to the sub flow.
783  * @param[in] tunnel_decap
784  *   Whether action is after tunnel decapsulation.
785  * @param[out] error
786  *   Pointer to the error structure.
787  *
788  * @return
789  *   0 on success, a negative errno value otherwise and rte_errno is set.
790  */
791 static int
792 flow_dv_convert_action_modify_dec_ttl
793                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
794                          const struct rte_flow_item *items,
795                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
796                          bool tunnel_decap, struct rte_flow_error *error)
797 {
798         struct rte_flow_item item;
799         struct rte_flow_item_ipv4 ipv4;
800         struct rte_flow_item_ipv4 ipv4_mask;
801         struct rte_flow_item_ipv6 ipv6;
802         struct rte_flow_item_ipv6 ipv6_mask;
803         struct field_modify_info *field;
804
805         if (!attr->valid)
806                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
807         if (attr->ipv4) {
808                 memset(&ipv4, 0, sizeof(ipv4));
809                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
810                 ipv4.hdr.time_to_live = 0xFF;
811                 ipv4_mask.hdr.time_to_live = 0xFF;
812                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
813                 item.spec = &ipv4;
814                 item.mask = &ipv4_mask;
815                 field = modify_ipv4;
816         } else {
817                 MLX5_ASSERT(attr->ipv6);
818                 memset(&ipv6, 0, sizeof(ipv6));
819                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
820                 ipv6.hdr.hop_limits = 0xFF;
821                 ipv6_mask.hdr.hop_limits = 0xFF;
822                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
823                 item.spec = &ipv6;
824                 item.mask = &ipv6_mask;
825                 field = modify_ipv6;
826         }
827         return flow_dv_convert_modify_action(&item, field, NULL, resource,
828                                              MLX5_MODIFICATION_TYPE_ADD, error);
829 }
830
831 /**
832  * Convert modify-header increment/decrement TCP Sequence number
833  * to DV specification.
834  *
835  * @param[in,out] resource
836  *   Pointer to the modify-header resource.
837  * @param[in] action
838  *   Pointer to action specification.
839  * @param[out] error
840  *   Pointer to the error structure.
841  *
842  * @return
843  *   0 on success, a negative errno value otherwise and rte_errno is set.
844  */
845 static int
846 flow_dv_convert_action_modify_tcp_seq
847                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
848                          const struct rte_flow_action *action,
849                          struct rte_flow_error *error)
850 {
851         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
852         uint64_t value = rte_be_to_cpu_32(*conf);
853         struct rte_flow_item item;
854         struct rte_flow_item_tcp tcp;
855         struct rte_flow_item_tcp tcp_mask;
856
857         memset(&tcp, 0, sizeof(tcp));
858         memset(&tcp_mask, 0, sizeof(tcp_mask));
859         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
860                 /*
861                  * The HW has no decrement operation, only increment operation.
862                  * To simulate decrement X from Y using increment operation
863                  * we need to add UINT32_MAX X times to Y.
864                  * Each adding of UINT32_MAX decrements Y by 1.
865                  */
866                 value *= UINT32_MAX;
867         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
868         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
869         item.type = RTE_FLOW_ITEM_TYPE_TCP;
870         item.spec = &tcp;
871         item.mask = &tcp_mask;
872         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
873                                              MLX5_MODIFICATION_TYPE_ADD, error);
874 }
875
876 /**
877  * Convert modify-header increment/decrement TCP Acknowledgment number
878  * to DV specification.
879  *
880  * @param[in,out] resource
881  *   Pointer to the modify-header resource.
882  * @param[in] action
883  *   Pointer to action specification.
884  * @param[out] error
885  *   Pointer to the error structure.
886  *
887  * @return
888  *   0 on success, a negative errno value otherwise and rte_errno is set.
889  */
890 static int
891 flow_dv_convert_action_modify_tcp_ack
892                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
893                          const struct rte_flow_action *action,
894                          struct rte_flow_error *error)
895 {
896         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
897         uint64_t value = rte_be_to_cpu_32(*conf);
898         struct rte_flow_item item;
899         struct rte_flow_item_tcp tcp;
900         struct rte_flow_item_tcp tcp_mask;
901
902         memset(&tcp, 0, sizeof(tcp));
903         memset(&tcp_mask, 0, sizeof(tcp_mask));
904         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
905                 /*
906                  * The HW has no decrement operation, only increment operation.
907                  * To simulate decrement X from Y using increment operation
908                  * we need to add UINT32_MAX X times to Y.
909                  * Each adding of UINT32_MAX decrements Y by 1.
910                  */
911                 value *= UINT32_MAX;
912         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
913         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
914         item.type = RTE_FLOW_ITEM_TYPE_TCP;
915         item.spec = &tcp;
916         item.mask = &tcp_mask;
917         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
918                                              MLX5_MODIFICATION_TYPE_ADD, error);
919 }
920
921 static enum mlx5_modification_field reg_to_field[] = {
922         [REG_NON] = MLX5_MODI_OUT_NONE,
923         [REG_A] = MLX5_MODI_META_DATA_REG_A,
924         [REG_B] = MLX5_MODI_META_DATA_REG_B,
925         [REG_C_0] = MLX5_MODI_META_REG_C_0,
926         [REG_C_1] = MLX5_MODI_META_REG_C_1,
927         [REG_C_2] = MLX5_MODI_META_REG_C_2,
928         [REG_C_3] = MLX5_MODI_META_REG_C_3,
929         [REG_C_4] = MLX5_MODI_META_REG_C_4,
930         [REG_C_5] = MLX5_MODI_META_REG_C_5,
931         [REG_C_6] = MLX5_MODI_META_REG_C_6,
932         [REG_C_7] = MLX5_MODI_META_REG_C_7,
933 };
934
935 /**
936  * Convert register set to DV specification.
937  *
938  * @param[in,out] resource
939  *   Pointer to the modify-header resource.
940  * @param[in] action
941  *   Pointer to action specification.
942  * @param[out] error
943  *   Pointer to the error structure.
944  *
945  * @return
946  *   0 on success, a negative errno value otherwise and rte_errno is set.
947  */
948 static int
949 flow_dv_convert_action_set_reg
950                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
951                          const struct rte_flow_action *action,
952                          struct rte_flow_error *error)
953 {
954         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
955         struct mlx5_modification_cmd *actions = resource->actions;
956         uint32_t i = resource->actions_num;
957
958         if (i >= MLX5_MAX_MODIFY_NUM)
959                 return rte_flow_error_set(error, EINVAL,
960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
961                                           "too many items to modify");
962         MLX5_ASSERT(conf->id != REG_NON);
963         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
964         actions[i] = (struct mlx5_modification_cmd) {
965                 .action_type = MLX5_MODIFICATION_TYPE_SET,
966                 .field = reg_to_field[conf->id],
967         };
968         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
969         actions[i].data1 = rte_cpu_to_be_32(conf->data);
970         ++i;
971         resource->actions_num = i;
972         return 0;
973 }
974
975 /**
976  * Convert SET_TAG action to DV specification.
977  *
978  * @param[in] dev
979  *   Pointer to the rte_eth_dev structure.
980  * @param[in,out] resource
981  *   Pointer to the modify-header resource.
982  * @param[in] conf
983  *   Pointer to action specification.
984  * @param[out] error
985  *   Pointer to the error structure.
986  *
987  * @return
988  *   0 on success, a negative errno value otherwise and rte_errno is set.
989  */
990 static int
991 flow_dv_convert_action_set_tag
992                         (struct rte_eth_dev *dev,
993                          struct mlx5_flow_dv_modify_hdr_resource *resource,
994                          const struct rte_flow_action_set_tag *conf,
995                          struct rte_flow_error *error)
996 {
997         rte_be32_t data = rte_cpu_to_be_32(conf->data);
998         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
999         struct rte_flow_item item = {
1000                 .spec = &data,
1001                 .mask = &mask,
1002         };
1003         struct field_modify_info reg_c_x[] = {
1004                 [1] = {0, 0, 0},
1005         };
1006         enum mlx5_modification_field reg_type;
1007         int ret;
1008
1009         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1010         if (ret < 0)
1011                 return ret;
1012         MLX5_ASSERT(ret != REG_NON);
1013         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1014         reg_type = reg_to_field[ret];
1015         MLX5_ASSERT(reg_type > 0);
1016         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1017         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1018                                              MLX5_MODIFICATION_TYPE_SET, error);
1019 }
1020
1021 /**
1022  * Convert internal COPY_REG action to DV specification.
1023  *
1024  * @param[in] dev
1025  *   Pointer to the rte_eth_dev structure.
1026  * @param[in,out] res
1027  *   Pointer to the modify-header resource.
1028  * @param[in] action
1029  *   Pointer to action specification.
1030  * @param[out] error
1031  *   Pointer to the error structure.
1032  *
1033  * @return
1034  *   0 on success, a negative errno value otherwise and rte_errno is set.
1035  */
1036 static int
1037 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1038                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1039                                  const struct rte_flow_action *action,
1040                                  struct rte_flow_error *error)
1041 {
1042         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1043         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1044         struct rte_flow_item item = {
1045                 .spec = NULL,
1046                 .mask = &mask,
1047         };
1048         struct field_modify_info reg_src[] = {
1049                 {4, 0, reg_to_field[conf->src]},
1050                 {0, 0, 0},
1051         };
1052         struct field_modify_info reg_dst = {
1053                 .offset = 0,
1054                 .id = reg_to_field[conf->dst],
1055         };
1056         /* Adjust reg_c[0] usage according to reported mask. */
1057         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1058                 struct mlx5_priv *priv = dev->data->dev_private;
1059                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1060
1061                 MLX5_ASSERT(reg_c0);
1062                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1063                 if (conf->dst == REG_C_0) {
1064                         /* Copy to reg_c[0], within mask only. */
1065                         reg_dst.offset = rte_bsf32(reg_c0);
1066                         /*
1067                          * Mask is ignoring the enianness, because
1068                          * there is no conversion in datapath.
1069                          */
1070 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1071                         /* Copy from destination lower bits to reg_c[0]. */
1072                         mask = reg_c0 >> reg_dst.offset;
1073 #else
1074                         /* Copy from destination upper bits to reg_c[0]. */
1075                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1076                                           rte_fls_u32(reg_c0));
1077 #endif
1078                 } else {
1079                         mask = rte_cpu_to_be_32(reg_c0);
1080 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1081                         /* Copy from reg_c[0] to destination lower bits. */
1082                         reg_dst.offset = 0;
1083 #else
1084                         /* Copy from reg_c[0] to destination upper bits. */
1085                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1086                                          (rte_fls_u32(reg_c0) -
1087                                           rte_bsf32(reg_c0));
1088 #endif
1089                 }
1090         }
1091         return flow_dv_convert_modify_action(&item,
1092                                              reg_src, &reg_dst, res,
1093                                              MLX5_MODIFICATION_TYPE_COPY,
1094                                              error);
1095 }
1096
1097 /**
1098  * Convert MARK action to DV specification. This routine is used
1099  * in extensive metadata only and requires metadata register to be
1100  * handled. In legacy mode hardware tag resource is engaged.
1101  *
1102  * @param[in] dev
1103  *   Pointer to the rte_eth_dev structure.
1104  * @param[in] conf
1105  *   Pointer to MARK action specification.
1106  * @param[in,out] resource
1107  *   Pointer to the modify-header resource.
1108  * @param[out] error
1109  *   Pointer to the error structure.
1110  *
1111  * @return
1112  *   0 on success, a negative errno value otherwise and rte_errno is set.
1113  */
1114 static int
1115 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1116                             const struct rte_flow_action_mark *conf,
1117                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1118                             struct rte_flow_error *error)
1119 {
1120         struct mlx5_priv *priv = dev->data->dev_private;
1121         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1122                                            priv->sh->dv_mark_mask);
1123         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1124         struct rte_flow_item item = {
1125                 .spec = &data,
1126                 .mask = &mask,
1127         };
1128         struct field_modify_info reg_c_x[] = {
1129                 [1] = {0, 0, 0},
1130         };
1131         int reg;
1132
1133         if (!mask)
1134                 return rte_flow_error_set(error, EINVAL,
1135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1136                                           NULL, "zero mark action mask");
1137         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1138         if (reg < 0)
1139                 return reg;
1140         MLX5_ASSERT(reg > 0);
1141         if (reg == REG_C_0) {
1142                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1143                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1144
1145                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1146                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1147                 mask = rte_cpu_to_be_32(mask << shl_c0);
1148         }
1149         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1150         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1151                                              MLX5_MODIFICATION_TYPE_SET, error);
1152 }
1153
1154 /**
1155  * Get metadata register index for specified steering domain.
1156  *
1157  * @param[in] dev
1158  *   Pointer to the rte_eth_dev structure.
1159  * @param[in] attr
1160  *   Attributes of flow to determine steering domain.
1161  * @param[out] error
1162  *   Pointer to the error structure.
1163  *
1164  * @return
1165  *   positive index on success, a negative errno value otherwise
1166  *   and rte_errno is set.
1167  */
1168 static enum modify_reg
1169 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1170                          const struct rte_flow_attr *attr,
1171                          struct rte_flow_error *error)
1172 {
1173         int reg =
1174                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1175                                           MLX5_METADATA_FDB :
1176                                             attr->egress ?
1177                                             MLX5_METADATA_TX :
1178                                             MLX5_METADATA_RX, 0, error);
1179         if (reg < 0)
1180                 return rte_flow_error_set(error,
1181                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1182                                           NULL, "unavailable "
1183                                           "metadata register");
1184         return reg;
1185 }
1186
1187 /**
1188  * Convert SET_META action to DV specification.
1189  *
1190  * @param[in] dev
1191  *   Pointer to the rte_eth_dev structure.
1192  * @param[in,out] resource
1193  *   Pointer to the modify-header resource.
1194  * @param[in] attr
1195  *   Attributes of flow that includes this item.
1196  * @param[in] conf
1197  *   Pointer to action specification.
1198  * @param[out] error
1199  *   Pointer to the error structure.
1200  *
1201  * @return
1202  *   0 on success, a negative errno value otherwise and rte_errno is set.
1203  */
1204 static int
1205 flow_dv_convert_action_set_meta
1206                         (struct rte_eth_dev *dev,
1207                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1208                          const struct rte_flow_attr *attr,
1209                          const struct rte_flow_action_set_meta *conf,
1210                          struct rte_flow_error *error)
1211 {
1212         uint32_t data = conf->data;
1213         uint32_t mask = conf->mask;
1214         struct rte_flow_item item = {
1215                 .spec = &data,
1216                 .mask = &mask,
1217         };
1218         struct field_modify_info reg_c_x[] = {
1219                 [1] = {0, 0, 0},
1220         };
1221         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1222
1223         if (reg < 0)
1224                 return reg;
1225         MLX5_ASSERT(reg != REG_NON);
1226         /*
1227          * In datapath code there is no endianness
1228          * coversions for perfromance reasons, all
1229          * pattern conversions are done in rte_flow.
1230          */
1231         if (reg == REG_C_0) {
1232                 struct mlx5_priv *priv = dev->data->dev_private;
1233                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1234                 uint32_t shl_c0;
1235
1236                 MLX5_ASSERT(msk_c0);
1237 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1238                 shl_c0 = rte_bsf32(msk_c0);
1239 #else
1240                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1241 #endif
1242                 mask <<= shl_c0;
1243                 data <<= shl_c0;
1244                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1245         }
1246         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1247         /* The routine expects parameters in memory as big-endian ones. */
1248         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1249                                              MLX5_MODIFICATION_TYPE_SET, error);
1250 }
1251
1252 /**
1253  * Convert modify-header set IPv4 DSCP action to DV specification.
1254  *
1255  * @param[in,out] resource
1256  *   Pointer to the modify-header resource.
1257  * @param[in] action
1258  *   Pointer to action specification.
1259  * @param[out] error
1260  *   Pointer to the error structure.
1261  *
1262  * @return
1263  *   0 on success, a negative errno value otherwise and rte_errno is set.
1264  */
1265 static int
1266 flow_dv_convert_action_modify_ipv4_dscp
1267                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1268                          const struct rte_flow_action *action,
1269                          struct rte_flow_error *error)
1270 {
1271         const struct rte_flow_action_set_dscp *conf =
1272                 (const struct rte_flow_action_set_dscp *)(action->conf);
1273         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1274         struct rte_flow_item_ipv4 ipv4;
1275         struct rte_flow_item_ipv4 ipv4_mask;
1276
1277         memset(&ipv4, 0, sizeof(ipv4));
1278         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1279         ipv4.hdr.type_of_service = conf->dscp;
1280         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1281         item.spec = &ipv4;
1282         item.mask = &ipv4_mask;
1283         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1284                                              MLX5_MODIFICATION_TYPE_SET, error);
1285 }
1286
1287 /**
1288  * Convert modify-header set IPv6 DSCP action to DV specification.
1289  *
1290  * @param[in,out] resource
1291  *   Pointer to the modify-header resource.
1292  * @param[in] action
1293  *   Pointer to action specification.
1294  * @param[out] error
1295  *   Pointer to the error structure.
1296  *
1297  * @return
1298  *   0 on success, a negative errno value otherwise and rte_errno is set.
1299  */
1300 static int
1301 flow_dv_convert_action_modify_ipv6_dscp
1302                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1303                          const struct rte_flow_action *action,
1304                          struct rte_flow_error *error)
1305 {
1306         const struct rte_flow_action_set_dscp *conf =
1307                 (const struct rte_flow_action_set_dscp *)(action->conf);
1308         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1309         struct rte_flow_item_ipv6 ipv6;
1310         struct rte_flow_item_ipv6 ipv6_mask;
1311
1312         memset(&ipv6, 0, sizeof(ipv6));
1313         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1314         /*
1315          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1316          * rdma-core only accept the DSCP bits byte aligned start from
1317          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1318          * bits in IPv6 case as rdma-core requires byte aligned value.
1319          */
1320         ipv6.hdr.vtc_flow = conf->dscp;
1321         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1322         item.spec = &ipv6;
1323         item.mask = &ipv6_mask;
1324         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1325                                              MLX5_MODIFICATION_TYPE_SET, error);
1326 }
1327
1328 static void
1329 mlx5_flow_field_id_to_modify_info
1330                 (const struct rte_flow_action_modify_data *data,
1331                  struct field_modify_info *info,
1332                  uint32_t *mask, uint32_t *value, uint32_t width,
1333                  struct rte_eth_dev *dev,
1334                  const struct rte_flow_attr *attr,
1335                  struct rte_flow_error *error)
1336 {
1337         uint32_t idx = 0;
1338         switch (data->field) {
1339         case RTE_FLOW_FIELD_START:
1340                 /* not supported yet */
1341                 MLX5_ASSERT(false);
1342                 break;
1343         case RTE_FLOW_FIELD_MAC_DST:
1344                 if (mask) {
1345                         if (data->offset < 32) {
1346                                 info[idx] = (struct field_modify_info){4, 0,
1347                                                 MLX5_MODI_OUT_DMAC_47_16};
1348                                 mask[idx] = 0xffffffff;
1349                                 if (width < 32) {
1350                                         mask[idx] = mask[idx] << (32 - width);
1351                                         width = 0;
1352                                 } else {
1353                                         width -= 32;
1354                                 }
1355                                 if (!width)
1356                                         break;
1357                                 ++idx;
1358                         }
1359                         info[idx] = (struct field_modify_info){2, 4 * idx,
1360                                                 MLX5_MODI_OUT_DMAC_15_0};
1361                         mask[idx] = (width) ? 0x0000ffff : 0x0;
1362                         if (width < 16)
1363                                 mask[idx] = (mask[idx] << (16 - width)) &
1364                                                 0x0000ffff;
1365                 } else {
1366                         if (data->offset < 32)
1367                                 info[idx++] = (struct field_modify_info){4, 0,
1368                                                 MLX5_MODI_OUT_DMAC_47_16};
1369                         info[idx] = (struct field_modify_info){2, 0,
1370                                                 MLX5_MODI_OUT_DMAC_15_0};
1371                 }
1372                 break;
1373         case RTE_FLOW_FIELD_MAC_SRC:
1374                 if (mask) {
1375                         if (data->offset < 32) {
1376                                 info[idx] = (struct field_modify_info){4, 0,
1377                                                 MLX5_MODI_OUT_SMAC_47_16};
1378                                 mask[idx] = 0xffffffff;
1379                                 if (width < 32) {
1380                                         mask[idx] = mask[idx] << (32 - width);
1381                                         width = 0;
1382                                 } else {
1383                                         width -= 32;
1384                                 }
1385                                 if (!width)
1386                                         break;
1387                                 ++idx;
1388                         }
1389                         info[idx] = (struct field_modify_info){2, 4 * idx,
1390                                                 MLX5_MODI_OUT_SMAC_15_0};
1391                         mask[idx] = (width) ? 0x0000ffff : 0x0;
1392                         if (width < 16)
1393                                 mask[idx] = (mask[idx] << (16 - width)) &
1394                                                 0x0000ffff;
1395                 } else {
1396                         if (data->offset < 32)
1397                                 info[idx++] = (struct field_modify_info){4, 0,
1398                                                 MLX5_MODI_OUT_SMAC_47_16};
1399                         info[idx] = (struct field_modify_info){2, 0,
1400                                                 MLX5_MODI_OUT_SMAC_15_0};
1401                 }
1402                 break;
1403         case RTE_FLOW_FIELD_VLAN_TYPE:
1404                 /* not supported yet */
1405                 break;
1406         case RTE_FLOW_FIELD_VLAN_ID:
1407                 info[idx] = (struct field_modify_info){2, 0,
1408                                         MLX5_MODI_OUT_FIRST_VID};
1409                 if (mask) {
1410                         mask[idx] = 0x00000fff;
1411                         if (width < 12)
1412                                 mask[idx] = (mask[idx] << (12 - width)) &
1413                                                 0x00000fff;
1414                 }
1415                 break;
1416         case RTE_FLOW_FIELD_MAC_TYPE:
1417                 info[idx] = (struct field_modify_info){2, 0,
1418                                         MLX5_MODI_OUT_ETHERTYPE};
1419                 if (mask) {
1420                         mask[idx] = 0x0000ffff;
1421                         if (width < 16)
1422                                 mask[idx] = (mask[idx] << (16 - width)) &
1423                                                 0x0000ffff;
1424                 }
1425                 break;
1426         case RTE_FLOW_FIELD_IPV4_DSCP:
1427                 info[idx] = (struct field_modify_info){1, 0,
1428                                         MLX5_MODI_OUT_IP_DSCP};
1429                 if (mask) {
1430                         mask[idx] = 0x0000003f;
1431                         if (width < 6)
1432                                 mask[idx] = (mask[idx] << (6 - width)) &
1433                                                 0x0000003f;
1434                 }
1435                 break;
1436         case RTE_FLOW_FIELD_IPV4_TTL:
1437                 info[idx] = (struct field_modify_info){1, 0,
1438                                         MLX5_MODI_OUT_IPV4_TTL};
1439                 if (mask) {
1440                         mask[idx] = 0x000000ff;
1441                         if (width < 8)
1442                                 mask[idx] = (mask[idx] << (8 - width)) &
1443                                                 0x000000ff;
1444                 }
1445                 break;
1446         case RTE_FLOW_FIELD_IPV4_SRC:
1447                 info[idx] = (struct field_modify_info){4, 0,
1448                                         MLX5_MODI_OUT_SIPV4};
1449                 if (mask) {
1450                         mask[idx] = 0xffffffff;
1451                         if (width < 32)
1452                                 mask[idx] = mask[idx] << (32 - width);
1453                 }
1454                 break;
1455         case RTE_FLOW_FIELD_IPV4_DST:
1456                 info[idx] = (struct field_modify_info){4, 0,
1457                                         MLX5_MODI_OUT_DIPV4};
1458                 if (mask) {
1459                         mask[idx] = 0xffffffff;
1460                         if (width < 32)
1461                                 mask[idx] = mask[idx] << (32 - width);
1462                 }
1463                 break;
1464         case RTE_FLOW_FIELD_IPV6_DSCP:
1465                 info[idx] = (struct field_modify_info){1, 0,
1466                                         MLX5_MODI_OUT_IP_DSCP};
1467                 if (mask) {
1468                         mask[idx] = 0x0000003f;
1469                         if (width < 6)
1470                                 mask[idx] = (mask[idx] << (6 - width)) &
1471                                                 0x0000003f;
1472                 }
1473                 break;
1474         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1475                 info[idx] = (struct field_modify_info){1, 0,
1476                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1477                 if (mask) {
1478                         mask[idx] = 0x000000ff;
1479                         if (width < 8)
1480                                 mask[idx] = (mask[idx] << (8 - width)) &
1481                                                 0x000000ff;
1482                 }
1483                 break;
1484         case RTE_FLOW_FIELD_IPV6_SRC:
1485                 if (mask) {
1486                         if (data->offset < 32) {
1487                                 info[idx] = (struct field_modify_info){4, 0,
1488                                                 MLX5_MODI_OUT_SIPV6_127_96};
1489                                 mask[idx] = 0xffffffff;
1490                                 if (width < 32) {
1491                                         mask[idx] = mask[idx] << (32 - width);
1492                                         width = 0;
1493                                 } else {
1494                                         width -= 32;
1495                                 }
1496                                 if (!width)
1497                                         break;
1498                                 ++idx;
1499                         }
1500                         if (data->offset < 64) {
1501                                 info[idx] = (struct field_modify_info){4,
1502                                                 4 * idx,
1503                                                 MLX5_MODI_OUT_SIPV6_95_64};
1504                                 mask[idx] = 0xffffffff;
1505                                 if (width < 32) {
1506                                         mask[idx] = mask[idx] << (32 - width);
1507                                         width = 0;
1508                                 } else {
1509                                         width -= 32;
1510                                 }
1511                                 if (!width)
1512                                         break;
1513                                 ++idx;
1514                         }
1515                         if (data->offset < 96) {
1516                                 info[idx] = (struct field_modify_info){4,
1517                                                 8 * idx,
1518                                                 MLX5_MODI_OUT_SIPV6_63_32};
1519                                 mask[idx] = 0xffffffff;
1520                                 if (width < 32) {
1521                                         mask[idx] = mask[idx] << (32 - width);
1522                                         width = 0;
1523                                 } else {
1524                                         width -= 32;
1525                                 }
1526                                 if (!width)
1527                                         break;
1528                                 ++idx;
1529                         }
1530                         info[idx] = (struct field_modify_info){4, 12 * idx,
1531                                                 MLX5_MODI_OUT_SIPV6_31_0};
1532                         mask[idx] = 0xffffffff;
1533                         if (width < 32)
1534                                 mask[idx] = mask[idx] << (32 - width);
1535                 } else {
1536                         if (data->offset < 32)
1537                                 info[idx++] = (struct field_modify_info){4, 0,
1538                                                 MLX5_MODI_OUT_SIPV6_127_96};
1539                         if (data->offset < 64)
1540                                 info[idx++] = (struct field_modify_info){4, 0,
1541                                                 MLX5_MODI_OUT_SIPV6_95_64};
1542                         if (data->offset < 96)
1543                                 info[idx++] = (struct field_modify_info){4, 0,
1544                                                 MLX5_MODI_OUT_SIPV6_63_32};
1545                         if (data->offset < 128)
1546                                 info[idx++] = (struct field_modify_info){4, 0,
1547                                                 MLX5_MODI_OUT_SIPV6_31_0};
1548                 }
1549                 break;
1550         case RTE_FLOW_FIELD_IPV6_DST:
1551                 if (mask) {
1552                         if (data->offset < 32) {
1553                                 info[idx] = (struct field_modify_info){4, 0,
1554                                                 MLX5_MODI_OUT_DIPV6_127_96};
1555                                 mask[idx] = 0xffffffff;
1556                                 if (width < 32) {
1557                                         mask[idx] = mask[idx] << (32 - width);
1558                                         width = 0;
1559                                 } else {
1560                                         width -= 32;
1561                                 }
1562                                 if (!width)
1563                                         break;
1564                                 ++idx;
1565                         }
1566                         if (data->offset < 64) {
1567                                 info[idx] = (struct field_modify_info){4,
1568                                                 4 * idx,
1569                                                 MLX5_MODI_OUT_DIPV6_95_64};
1570                                 mask[idx] = 0xffffffff;
1571                                 if (width < 32) {
1572                                         mask[idx] = mask[idx] << (32 - width);
1573                                         width = 0;
1574                                 } else {
1575                                         width -= 32;
1576                                 }
1577                                 if (!width)
1578                                         break;
1579                                 ++idx;
1580                         }
1581                         if (data->offset < 96) {
1582                                 info[idx] = (struct field_modify_info){4,
1583                                                 8 * idx,
1584                                                 MLX5_MODI_OUT_DIPV6_63_32};
1585                                 mask[idx] = 0xffffffff;
1586                                 if (width < 32) {
1587                                         mask[idx] = mask[idx] << (32 - width);
1588                                         width = 0;
1589                                 } else {
1590                                         width -= 32;
1591                                 }
1592                                 if (!width)
1593                                         break;
1594                                 ++idx;
1595                         }
1596                         info[idx] = (struct field_modify_info){4, 12 * idx,
1597                                                 MLX5_MODI_OUT_DIPV6_31_0};
1598                         mask[idx] = 0xffffffff;
1599                         if (width < 32)
1600                                 mask[idx] = mask[idx] << (32 - width);
1601                 } else {
1602                         if (data->offset < 32)
1603                                 info[idx++] = (struct field_modify_info){4, 0,
1604                                                 MLX5_MODI_OUT_DIPV6_127_96};
1605                         if (data->offset < 64)
1606                                 info[idx++] = (struct field_modify_info){4, 0,
1607                                                 MLX5_MODI_OUT_DIPV6_95_64};
1608                         if (data->offset < 96)
1609                                 info[idx++] = (struct field_modify_info){4, 0,
1610                                                 MLX5_MODI_OUT_DIPV6_63_32};
1611                         if (data->offset < 128)
1612                                 info[idx++] = (struct field_modify_info){4, 0,
1613                                                 MLX5_MODI_OUT_DIPV6_31_0};
1614                 }
1615                 break;
1616         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1617                 info[idx] = (struct field_modify_info){2, 0,
1618                                         MLX5_MODI_OUT_TCP_SPORT};
1619                 if (mask) {
1620                         mask[idx] = 0x0000ffff;
1621                         if (width < 16)
1622                                 mask[idx] = (mask[idx] << (16 - width)) &
1623                                                 0x0000ffff;
1624                 }
1625                 break;
1626         case RTE_FLOW_FIELD_TCP_PORT_DST:
1627                 info[idx] = (struct field_modify_info){2, 0,
1628                                         MLX5_MODI_OUT_TCP_DPORT};
1629                 if (mask) {
1630                         mask[idx] = 0x0000ffff;
1631                         if (width < 16)
1632                                 mask[idx] = (mask[idx] << (16 - width)) &
1633                                                 0x0000ffff;
1634                 }
1635                 break;
1636         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1637                 info[idx] = (struct field_modify_info){4, 0,
1638                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1639                 if (mask) {
1640                         mask[idx] = 0xffffffff;
1641                         if (width < 32)
1642                                 mask[idx] = (mask[idx] << (32 - width));
1643                 }
1644                 break;
1645         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1646                 info[idx] = (struct field_modify_info){4, 0,
1647                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1648                 if (mask) {
1649                         mask[idx] = 0xffffffff;
1650                         if (width < 32)
1651                                 mask[idx] = (mask[idx] << (32 - width));
1652                 }
1653                 break;
1654         case RTE_FLOW_FIELD_TCP_FLAGS:
1655                 info[idx] = (struct field_modify_info){1, 0,
1656                                         MLX5_MODI_IN_TCP_FLAGS};
1657                 if (mask) {
1658                         mask[idx] = 0x0000003f;
1659                         if (width < 6)
1660                                 mask[idx] = (mask[idx] << (6 - width)) &
1661                                                 0x0000003f;
1662                 }
1663                 break;
1664         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1665                 info[idx] = (struct field_modify_info){2, 0,
1666                                         MLX5_MODI_OUT_UDP_SPORT};
1667                 if (mask) {
1668                         mask[idx] = 0x0000ffff;
1669                         if (width < 16)
1670                                 mask[idx] = (mask[idx] << (16 - width)) &
1671                                                 0x0000ffff;
1672                 }
1673                 break;
1674         case RTE_FLOW_FIELD_UDP_PORT_DST:
1675                 info[idx] = (struct field_modify_info){2, 0,
1676                                         MLX5_MODI_OUT_UDP_DPORT};
1677                 if (mask) {
1678                         mask[idx] = 0x0000ffff;
1679                         if (width < 16)
1680                                 mask[idx] = (mask[idx] << (16 - width)) &
1681                                                 0x0000ffff;
1682                 }
1683                 break;
1684         case RTE_FLOW_FIELD_VXLAN_VNI:
1685                 /* not supported yet */
1686                 break;
1687         case RTE_FLOW_FIELD_GENEVE_VNI:
1688                 /* not supported yet*/
1689                 break;
1690         case RTE_FLOW_FIELD_GTP_TEID:
1691                 info[idx] = (struct field_modify_info){4, 0,
1692                                         MLX5_MODI_GTP_TEID};
1693                 if (mask) {
1694                         mask[idx] = 0xffffffff;
1695                         if (width < 32)
1696                                 mask[idx] = mask[idx] << (32 - width);
1697                 }
1698                 break;
1699         case RTE_FLOW_FIELD_TAG:
1700                 {
1701                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1702                                                    data->level, error);
1703                         if (reg < 0)
1704                                 return;
1705                         MLX5_ASSERT(reg != REG_NON);
1706                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1707                         info[idx] = (struct field_modify_info){4, 0,
1708                                                 reg_to_field[reg]};
1709                         if (mask) {
1710                                 mask[idx] = 0xffffffff;
1711                                 if (width < 32)
1712                                         mask[idx] = mask[idx] << (32 - width);
1713                         }
1714                 }
1715                 break;
1716         case RTE_FLOW_FIELD_MARK:
1717                 {
1718                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1719                                                        0, error);
1720                         if (reg < 0)
1721                                 return;
1722                         MLX5_ASSERT(reg != REG_NON);
1723                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1724                         info[idx] = (struct field_modify_info){4, 0,
1725                                                 reg_to_field[reg]};
1726                         if (mask) {
1727                                 mask[idx] = 0xffffffff;
1728                                 if (width < 32)
1729                                         mask[idx] = mask[idx] << (32 - width);
1730                         }
1731                 }
1732                 break;
1733         case RTE_FLOW_FIELD_META:
1734                 {
1735                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1736                         if (reg < 0)
1737                                 return;
1738                         MLX5_ASSERT(reg != REG_NON);
1739                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1740                         info[idx] = (struct field_modify_info){4, 0,
1741                                                 reg_to_field[reg]};
1742                         if (mask) {
1743                                 mask[idx] = 0xffffffff;
1744                                 if (width < 32)
1745                                         mask[idx] = mask[idx] << (32 - width);
1746                         }
1747                 }
1748                 break;
1749         case RTE_FLOW_FIELD_POINTER:
1750                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1751                         if (mask[idx]) {
1752                                 memcpy(&value[idx],
1753                                         (void *)(uintptr_t)data->value, 32);
1754                                 value[idx] = RTE_BE32(value[idx]);
1755                                 break;
1756                         }
1757                 }
1758                 break;
1759         case RTE_FLOW_FIELD_VALUE:
1760                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1761                         if (mask[idx]) {
1762                                 value[idx] = RTE_BE32((uint32_t)data->value);
1763                                 break;
1764                         }
1765                 }
1766                 break;
1767         default:
1768                 MLX5_ASSERT(false);
1769                 break;
1770         }
1771 }
1772
1773 /**
1774  * Convert modify_field action to DV specification.
1775  *
1776  * @param[in] dev
1777  *   Pointer to the rte_eth_dev structure.
1778  * @param[in,out] resource
1779  *   Pointer to the modify-header resource.
1780  * @param[in] action
1781  *   Pointer to action specification.
1782  * @param[in] attr
1783  *   Attributes of flow that includes this item.
1784  * @param[out] error
1785  *   Pointer to the error structure.
1786  *
1787  * @return
1788  *   0 on success, a negative errno value otherwise and rte_errno is set.
1789  */
1790 static int
1791 flow_dv_convert_action_modify_field
1792                         (struct rte_eth_dev *dev,
1793                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1794                          const struct rte_flow_action *action,
1795                          const struct rte_flow_attr *attr,
1796                          struct rte_flow_error *error)
1797 {
1798         const struct rte_flow_action_modify_field *conf =
1799                 (const struct rte_flow_action_modify_field *)(action->conf);
1800         struct rte_flow_item item;
1801         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1802                                                                 {0, 0, 0} };
1803         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1804                                                                 {0, 0, 0} };
1805         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1806         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1807         uint32_t type;
1808
1809         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1810                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1811                 type = MLX5_MODIFICATION_TYPE_SET;
1812                 /** For SET fill the destination field (field) first. */
1813                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1814                                           value, conf->width, dev, attr, error);
1815                 /** Then copy immediate value from source as per mask. */
1816                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1817                                           value, conf->width, dev, attr, error);
1818                 item.spec = &value;
1819         } else {
1820                 type = MLX5_MODIFICATION_TYPE_COPY;
1821                 /** For COPY fill the destination field (dcopy) without mask. */
1822                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1823                                           value, conf->width, dev, attr, error);
1824                 /** Then construct the source field (field) with mask. */
1825                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1826                                           value, conf->width, dev, attr, error);
1827         }
1828         item.mask = &mask;
1829         return flow_dv_convert_modify_action(&item,
1830                         field, dcopy, resource, type, error);
1831 }
1832
1833 /**
1834  * Validate MARK item.
1835  *
1836  * @param[in] dev
1837  *   Pointer to the rte_eth_dev structure.
1838  * @param[in] item
1839  *   Item specification.
1840  * @param[in] attr
1841  *   Attributes of flow that includes this item.
1842  * @param[out] error
1843  *   Pointer to error structure.
1844  *
1845  * @return
1846  *   0 on success, a negative errno value otherwise and rte_errno is set.
1847  */
1848 static int
1849 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1850                            const struct rte_flow_item *item,
1851                            const struct rte_flow_attr *attr __rte_unused,
1852                            struct rte_flow_error *error)
1853 {
1854         struct mlx5_priv *priv = dev->data->dev_private;
1855         struct mlx5_dev_config *config = &priv->config;
1856         const struct rte_flow_item_mark *spec = item->spec;
1857         const struct rte_flow_item_mark *mask = item->mask;
1858         const struct rte_flow_item_mark nic_mask = {
1859                 .id = priv->sh->dv_mark_mask,
1860         };
1861         int ret;
1862
1863         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1866                                           "extended metadata feature"
1867                                           " isn't enabled");
1868         if (!mlx5_flow_ext_mreg_supported(dev))
1869                 return rte_flow_error_set(error, ENOTSUP,
1870                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1871                                           "extended metadata register"
1872                                           " isn't supported");
1873         if (!nic_mask.id)
1874                 return rte_flow_error_set(error, ENOTSUP,
1875                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1876                                           "extended metadata register"
1877                                           " isn't available");
1878         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1879         if (ret < 0)
1880                 return ret;
1881         if (!spec)
1882                 return rte_flow_error_set(error, EINVAL,
1883                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1884                                           item->spec,
1885                                           "data cannot be empty");
1886         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1887                 return rte_flow_error_set(error, EINVAL,
1888                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1889                                           &spec->id,
1890                                           "mark id exceeds the limit");
1891         if (!mask)
1892                 mask = &nic_mask;
1893         if (!mask->id)
1894                 return rte_flow_error_set(error, EINVAL,
1895                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1896                                         "mask cannot be zero");
1897
1898         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1899                                         (const uint8_t *)&nic_mask,
1900                                         sizeof(struct rte_flow_item_mark),
1901                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1902         if (ret < 0)
1903                 return ret;
1904         return 0;
1905 }
1906
1907 /**
1908  * Validate META item.
1909  *
1910  * @param[in] dev
1911  *   Pointer to the rte_eth_dev structure.
1912  * @param[in] item
1913  *   Item specification.
1914  * @param[in] attr
1915  *   Attributes of flow that includes this item.
1916  * @param[out] error
1917  *   Pointer to error structure.
1918  *
1919  * @return
1920  *   0 on success, a negative errno value otherwise and rte_errno is set.
1921  */
1922 static int
1923 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1924                            const struct rte_flow_item *item,
1925                            const struct rte_flow_attr *attr,
1926                            struct rte_flow_error *error)
1927 {
1928         struct mlx5_priv *priv = dev->data->dev_private;
1929         struct mlx5_dev_config *config = &priv->config;
1930         const struct rte_flow_item_meta *spec = item->spec;
1931         const struct rte_flow_item_meta *mask = item->mask;
1932         struct rte_flow_item_meta nic_mask = {
1933                 .data = UINT32_MAX
1934         };
1935         int reg;
1936         int ret;
1937
1938         if (!spec)
1939                 return rte_flow_error_set(error, EINVAL,
1940                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1941                                           item->spec,
1942                                           "data cannot be empty");
1943         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1944                 if (!mlx5_flow_ext_mreg_supported(dev))
1945                         return rte_flow_error_set(error, ENOTSUP,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "extended metadata register"
1948                                           " isn't supported");
1949                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1950                 if (reg < 0)
1951                         return reg;
1952                 if (reg == REG_NON)
1953                         return rte_flow_error_set(error, ENOTSUP,
1954                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1955                                         "unavalable extended metadata register");
1956                 if (reg == REG_B)
1957                         return rte_flow_error_set(error, ENOTSUP,
1958                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1959                                           "match on reg_b "
1960                                           "isn't supported");
1961                 if (reg != REG_A)
1962                         nic_mask.data = priv->sh->dv_meta_mask;
1963         } else if (attr->transfer) {
1964                 return rte_flow_error_set(error, ENOTSUP,
1965                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1966                                         "extended metadata feature "
1967                                         "should be enabled when "
1968                                         "meta item is requested "
1969                                         "with e-switch mode ");
1970         }
1971         if (!mask)
1972                 mask = &rte_flow_item_meta_mask;
1973         if (!mask->data)
1974                 return rte_flow_error_set(error, EINVAL,
1975                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1976                                         "mask cannot be zero");
1977
1978         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1979                                         (const uint8_t *)&nic_mask,
1980                                         sizeof(struct rte_flow_item_meta),
1981                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1982         return ret;
1983 }
1984
1985 /**
1986  * Validate TAG item.
1987  *
1988  * @param[in] dev
1989  *   Pointer to the rte_eth_dev structure.
1990  * @param[in] item
1991  *   Item specification.
1992  * @param[in] attr
1993  *   Attributes of flow that includes this item.
1994  * @param[out] error
1995  *   Pointer to error structure.
1996  *
1997  * @return
1998  *   0 on success, a negative errno value otherwise and rte_errno is set.
1999  */
2000 static int
2001 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2002                           const struct rte_flow_item *item,
2003                           const struct rte_flow_attr *attr __rte_unused,
2004                           struct rte_flow_error *error)
2005 {
2006         const struct rte_flow_item_tag *spec = item->spec;
2007         const struct rte_flow_item_tag *mask = item->mask;
2008         const struct rte_flow_item_tag nic_mask = {
2009                 .data = RTE_BE32(UINT32_MAX),
2010                 .index = 0xff,
2011         };
2012         int ret;
2013
2014         if (!mlx5_flow_ext_mreg_supported(dev))
2015                 return rte_flow_error_set(error, ENOTSUP,
2016                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2017                                           "extensive metadata register"
2018                                           " isn't supported");
2019         if (!spec)
2020                 return rte_flow_error_set(error, EINVAL,
2021                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2022                                           item->spec,
2023                                           "data cannot be empty");
2024         if (!mask)
2025                 mask = &rte_flow_item_tag_mask;
2026         if (!mask->data)
2027                 return rte_flow_error_set(error, EINVAL,
2028                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2029                                         "mask cannot be zero");
2030
2031         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2032                                         (const uint8_t *)&nic_mask,
2033                                         sizeof(struct rte_flow_item_tag),
2034                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2035         if (ret < 0)
2036                 return ret;
2037         if (mask->index != 0xff)
2038                 return rte_flow_error_set(error, EINVAL,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2040                                           "partial mask for tag index"
2041                                           " is not supported");
2042         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2043         if (ret < 0)
2044                 return ret;
2045         MLX5_ASSERT(ret != REG_NON);
2046         return 0;
2047 }
2048
2049 /**
2050  * Validate vport item.
2051  *
2052  * @param[in] dev
2053  *   Pointer to the rte_eth_dev structure.
2054  * @param[in] item
2055  *   Item specification.
2056  * @param[in] attr
2057  *   Attributes of flow that includes this item.
2058  * @param[in] item_flags
2059  *   Bit-fields that holds the items detected until now.
2060  * @param[out] error
2061  *   Pointer to error structure.
2062  *
2063  * @return
2064  *   0 on success, a negative errno value otherwise and rte_errno is set.
2065  */
2066 static int
2067 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2068                               const struct rte_flow_item *item,
2069                               const struct rte_flow_attr *attr,
2070                               uint64_t item_flags,
2071                               struct rte_flow_error *error)
2072 {
2073         const struct rte_flow_item_port_id *spec = item->spec;
2074         const struct rte_flow_item_port_id *mask = item->mask;
2075         const struct rte_flow_item_port_id switch_mask = {
2076                         .id = 0xffffffff,
2077         };
2078         struct mlx5_priv *esw_priv;
2079         struct mlx5_priv *dev_priv;
2080         int ret;
2081
2082         if (!attr->transfer)
2083                 return rte_flow_error_set(error, EINVAL,
2084                                           RTE_FLOW_ERROR_TYPE_ITEM,
2085                                           NULL,
2086                                           "match on port id is valid only"
2087                                           " when transfer flag is enabled");
2088         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2089                 return rte_flow_error_set(error, ENOTSUP,
2090                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2091                                           "multiple source ports are not"
2092                                           " supported");
2093         if (!mask)
2094                 mask = &switch_mask;
2095         if (mask->id != 0xffffffff)
2096                 return rte_flow_error_set(error, ENOTSUP,
2097                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2098                                            mask,
2099                                            "no support for partial mask on"
2100                                            " \"id\" field");
2101         ret = mlx5_flow_item_acceptable
2102                                 (item, (const uint8_t *)mask,
2103                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2104                                  sizeof(struct rte_flow_item_port_id),
2105                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2106         if (ret)
2107                 return ret;
2108         if (!spec)
2109                 return 0;
2110         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2111         if (!esw_priv)
2112                 return rte_flow_error_set(error, rte_errno,
2113                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2114                                           "failed to obtain E-Switch info for"
2115                                           " port");
2116         dev_priv = mlx5_dev_to_eswitch_info(dev);
2117         if (!dev_priv)
2118                 return rte_flow_error_set(error, rte_errno,
2119                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2120                                           NULL,
2121                                           "failed to obtain E-Switch info");
2122         if (esw_priv->domain_id != dev_priv->domain_id)
2123                 return rte_flow_error_set(error, EINVAL,
2124                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2125                                           "cannot match on a port from a"
2126                                           " different E-Switch");
2127         return 0;
2128 }
2129
2130 /**
2131  * Validate VLAN item.
2132  *
2133  * @param[in] item
2134  *   Item specification.
2135  * @param[in] item_flags
2136  *   Bit-fields that holds the items detected until now.
2137  * @param[in] dev
2138  *   Ethernet device flow is being created on.
2139  * @param[out] error
2140  *   Pointer to error structure.
2141  *
2142  * @return
2143  *   0 on success, a negative errno value otherwise and rte_errno is set.
2144  */
2145 static int
2146 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2147                            uint64_t item_flags,
2148                            struct rte_eth_dev *dev,
2149                            struct rte_flow_error *error)
2150 {
2151         const struct rte_flow_item_vlan *mask = item->mask;
2152         const struct rte_flow_item_vlan nic_mask = {
2153                 .tci = RTE_BE16(UINT16_MAX),
2154                 .inner_type = RTE_BE16(UINT16_MAX),
2155                 .has_more_vlan = 1,
2156         };
2157         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2158         int ret;
2159         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2160                                         MLX5_FLOW_LAYER_INNER_L4) :
2161                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2162                                         MLX5_FLOW_LAYER_OUTER_L4);
2163         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2164                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2165
2166         if (item_flags & vlanm)
2167                 return rte_flow_error_set(error, EINVAL,
2168                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2169                                           "multiple VLAN layers not supported");
2170         else if ((item_flags & l34m) != 0)
2171                 return rte_flow_error_set(error, EINVAL,
2172                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2173                                           "VLAN cannot follow L3/L4 layer");
2174         if (!mask)
2175                 mask = &rte_flow_item_vlan_mask;
2176         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2177                                         (const uint8_t *)&nic_mask,
2178                                         sizeof(struct rte_flow_item_vlan),
2179                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2180         if (ret)
2181                 return ret;
2182         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2183                 struct mlx5_priv *priv = dev->data->dev_private;
2184
2185                 if (priv->vmwa_context) {
2186                         /*
2187                          * Non-NULL context means we have a virtual machine
2188                          * and SR-IOV enabled, we have to create VLAN interface
2189                          * to make hypervisor to setup E-Switch vport
2190                          * context correctly. We avoid creating the multiple
2191                          * VLAN interfaces, so we cannot support VLAN tag mask.
2192                          */
2193                         return rte_flow_error_set(error, EINVAL,
2194                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2195                                                   item,
2196                                                   "VLAN tag mask is not"
2197                                                   " supported in virtual"
2198                                                   " environment");
2199                 }
2200         }
2201         return 0;
2202 }
2203
2204 /*
2205  * GTP flags are contained in 1 byte of the format:
2206  * -------------------------------------------
2207  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2208  * |-----------------------------------------|
2209  * | value | Version | PT | Res | E | S | PN |
2210  * -------------------------------------------
2211  *
2212  * Matching is supported only for GTP flags E, S, PN.
2213  */
2214 #define MLX5_GTP_FLAGS_MASK     0x07
2215
2216 /**
2217  * Validate GTP item.
2218  *
2219  * @param[in] dev
2220  *   Pointer to the rte_eth_dev structure.
2221  * @param[in] item
2222  *   Item specification.
2223  * @param[in] item_flags
2224  *   Bit-fields that holds the items detected until now.
2225  * @param[out] error
2226  *   Pointer to error structure.
2227  *
2228  * @return
2229  *   0 on success, a negative errno value otherwise and rte_errno is set.
2230  */
2231 static int
2232 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2233                           const struct rte_flow_item *item,
2234                           uint64_t item_flags,
2235                           struct rte_flow_error *error)
2236 {
2237         struct mlx5_priv *priv = dev->data->dev_private;
2238         const struct rte_flow_item_gtp *spec = item->spec;
2239         const struct rte_flow_item_gtp *mask = item->mask;
2240         const struct rte_flow_item_gtp nic_mask = {
2241                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2242                 .msg_type = 0xff,
2243                 .teid = RTE_BE32(0xffffffff),
2244         };
2245
2246         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2247                 return rte_flow_error_set(error, ENOTSUP,
2248                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2249                                           "GTP support is not enabled");
2250         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2251                 return rte_flow_error_set(error, ENOTSUP,
2252                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2253                                           "multiple tunnel layers not"
2254                                           " supported");
2255         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2256                 return rte_flow_error_set(error, EINVAL,
2257                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2258                                           "no outer UDP layer found");
2259         if (!mask)
2260                 mask = &rte_flow_item_gtp_mask;
2261         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2262                 return rte_flow_error_set(error, ENOTSUP,
2263                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2264                                           "Match is supported for GTP"
2265                                           " flags only");
2266         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2267                                          (const uint8_t *)&nic_mask,
2268                                          sizeof(struct rte_flow_item_gtp),
2269                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2270 }
2271
2272 /**
2273  * Validate GTP PSC item.
2274  *
2275  * @param[in] item
2276  *   Item specification.
2277  * @param[in] last_item
2278  *   Previous validated item in the pattern items.
2279  * @param[in] gtp_item
2280  *   Previous GTP item specification.
2281  * @param[in] attr
2282  *   Pointer to flow attributes.
2283  * @param[out] error
2284  *   Pointer to error structure.
2285  *
2286  * @return
2287  *   0 on success, a negative errno value otherwise and rte_errno is set.
2288  */
2289 static int
2290 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2291                               uint64_t last_item,
2292                               const struct rte_flow_item *gtp_item,
2293                               const struct rte_flow_attr *attr,
2294                               struct rte_flow_error *error)
2295 {
2296         const struct rte_flow_item_gtp *gtp_spec;
2297         const struct rte_flow_item_gtp *gtp_mask;
2298         const struct rte_flow_item_gtp_psc *spec;
2299         const struct rte_flow_item_gtp_psc *mask;
2300         const struct rte_flow_item_gtp_psc nic_mask = {
2301                 .pdu_type = 0xFF,
2302                 .qfi = 0xFF,
2303         };
2304
2305         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2306                 return rte_flow_error_set
2307                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2308                          "GTP PSC item must be preceded with GTP item");
2309         gtp_spec = gtp_item->spec;
2310         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2311         /* GTP spec and E flag is requested to match zero. */
2312         if (gtp_spec &&
2313                 (gtp_mask->v_pt_rsv_flags &
2314                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2315                 return rte_flow_error_set
2316                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2317                          "GTP E flag must be 1 to match GTP PSC");
2318         /* Check the flow is not created in group zero. */
2319         if (!attr->transfer && !attr->group)
2320                 return rte_flow_error_set
2321                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2322                          "GTP PSC is not supported for group 0");
2323         /* GTP spec is here and E flag is requested to match zero. */
2324         if (!item->spec)
2325                 return 0;
2326         spec = item->spec;
2327         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2328         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2329                 return rte_flow_error_set
2330                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2331                          "PDU type should be smaller than 16");
2332         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2333                                          (const uint8_t *)&nic_mask,
2334                                          sizeof(struct rte_flow_item_gtp_psc),
2335                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2336 }
2337
2338 /**
2339  * Validate IPV4 item.
2340  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2341  * add specific validation of fragment_offset field,
2342  *
2343  * @param[in] item
2344  *   Item specification.
2345  * @param[in] item_flags
2346  *   Bit-fields that holds the items detected until now.
2347  * @param[out] error
2348  *   Pointer to error structure.
2349  *
2350  * @return
2351  *   0 on success, a negative errno value otherwise and rte_errno is set.
2352  */
2353 static int
2354 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2355                            uint64_t item_flags,
2356                            uint64_t last_item,
2357                            uint16_t ether_type,
2358                            struct rte_flow_error *error)
2359 {
2360         int ret;
2361         const struct rte_flow_item_ipv4 *spec = item->spec;
2362         const struct rte_flow_item_ipv4 *last = item->last;
2363         const struct rte_flow_item_ipv4 *mask = item->mask;
2364         rte_be16_t fragment_offset_spec = 0;
2365         rte_be16_t fragment_offset_last = 0;
2366         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2367                 .hdr = {
2368                         .src_addr = RTE_BE32(0xffffffff),
2369                         .dst_addr = RTE_BE32(0xffffffff),
2370                         .type_of_service = 0xff,
2371                         .fragment_offset = RTE_BE16(0xffff),
2372                         .next_proto_id = 0xff,
2373                         .time_to_live = 0xff,
2374                 },
2375         };
2376
2377         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2378                                            ether_type, &nic_ipv4_mask,
2379                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2380         if (ret < 0)
2381                 return ret;
2382         if (spec && mask)
2383                 fragment_offset_spec = spec->hdr.fragment_offset &
2384                                        mask->hdr.fragment_offset;
2385         if (!fragment_offset_spec)
2386                 return 0;
2387         /*
2388          * spec and mask are valid, enforce using full mask to make sure the
2389          * complete value is used correctly.
2390          */
2391         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2392                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2393                 return rte_flow_error_set(error, EINVAL,
2394                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2395                                           item, "must use full mask for"
2396                                           " fragment_offset");
2397         /*
2398          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2399          * indicating this is 1st fragment of fragmented packet.
2400          * This is not yet supported in MLX5, return appropriate error message.
2401          */
2402         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2403                 return rte_flow_error_set(error, ENOTSUP,
2404                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2405                                           "match on first fragment not "
2406                                           "supported");
2407         if (fragment_offset_spec && !last)
2408                 return rte_flow_error_set(error, ENOTSUP,
2409                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2410                                           "specified value not supported");
2411         /* spec and last are valid, validate the specified range. */
2412         fragment_offset_last = last->hdr.fragment_offset &
2413                                mask->hdr.fragment_offset;
2414         /*
2415          * Match on fragment_offset spec 0x2001 and last 0x3fff
2416          * means MF is 1 and frag-offset is > 0.
2417          * This packet is fragment 2nd and onward, excluding last.
2418          * This is not yet supported in MLX5, return appropriate
2419          * error message.
2420          */
2421         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2422             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2423                 return rte_flow_error_set(error, ENOTSUP,
2424                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2425                                           last, "match on following "
2426                                           "fragments not supported");
2427         /*
2428          * Match on fragment_offset spec 0x0001 and last 0x1fff
2429          * means MF is 0 and frag-offset is > 0.
2430          * This packet is last fragment of fragmented packet.
2431          * This is not yet supported in MLX5, return appropriate
2432          * error message.
2433          */
2434         if (fragment_offset_spec == RTE_BE16(1) &&
2435             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2436                 return rte_flow_error_set(error, ENOTSUP,
2437                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2438                                           last, "match on last "
2439                                           "fragment not supported");
2440         /*
2441          * Match on fragment_offset spec 0x0001 and last 0x3fff
2442          * means MF and/or frag-offset is not 0.
2443          * This is a fragmented packet.
2444          * Other range values are invalid and rejected.
2445          */
2446         if (!(fragment_offset_spec == RTE_BE16(1) &&
2447               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2448                 return rte_flow_error_set(error, ENOTSUP,
2449                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2450                                           "specified range not supported");
2451         return 0;
2452 }
2453
2454 /**
2455  * Validate IPV6 fragment extension item.
2456  *
2457  * @param[in] item
2458  *   Item specification.
2459  * @param[in] item_flags
2460  *   Bit-fields that holds the items detected until now.
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno value otherwise and rte_errno is set.
2466  */
2467 static int
2468 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2469                                     uint64_t item_flags,
2470                                     struct rte_flow_error *error)
2471 {
2472         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2473         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2474         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2475         rte_be16_t frag_data_spec = 0;
2476         rte_be16_t frag_data_last = 0;
2477         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2478         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2479                                       MLX5_FLOW_LAYER_OUTER_L4;
2480         int ret = 0;
2481         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2482                 .hdr = {
2483                         .next_header = 0xff,
2484                         .frag_data = RTE_BE16(0xffff),
2485                 },
2486         };
2487
2488         if (item_flags & l4m)
2489                 return rte_flow_error_set(error, EINVAL,
2490                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2491                                           "ipv6 fragment extension item cannot "
2492                                           "follow L4 item.");
2493         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2494             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2495                 return rte_flow_error_set(error, EINVAL,
2496                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2497                                           "ipv6 fragment extension item must "
2498                                           "follow ipv6 item");
2499         if (spec && mask)
2500                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2501         if (!frag_data_spec)
2502                 return 0;
2503         /*
2504          * spec and mask are valid, enforce using full mask to make sure the
2505          * complete value is used correctly.
2506          */
2507         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2508                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2509                 return rte_flow_error_set(error, EINVAL,
2510                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2511                                           item, "must use full mask for"
2512                                           " frag_data");
2513         /*
2514          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2515          * This is 1st fragment of fragmented packet.
2516          */
2517         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2518                 return rte_flow_error_set(error, ENOTSUP,
2519                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2520                                           "match on first fragment not "
2521                                           "supported");
2522         if (frag_data_spec && !last)
2523                 return rte_flow_error_set(error, EINVAL,
2524                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2525                                           "specified value not supported");
2526         ret = mlx5_flow_item_acceptable
2527                                 (item, (const uint8_t *)mask,
2528                                  (const uint8_t *)&nic_mask,
2529                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2530                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2531         if (ret)
2532                 return ret;
2533         /* spec and last are valid, validate the specified range. */
2534         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2535         /*
2536          * Match on frag_data spec 0x0009 and last 0xfff9
2537          * means M is 1 and frag-offset is > 0.
2538          * This packet is fragment 2nd and onward, excluding last.
2539          * This is not yet supported in MLX5, return appropriate
2540          * error message.
2541          */
2542         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2543                                        RTE_IPV6_EHDR_MF_MASK) &&
2544             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2545                 return rte_flow_error_set(error, ENOTSUP,
2546                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2547                                           last, "match on following "
2548                                           "fragments not supported");
2549         /*
2550          * Match on frag_data spec 0x0008 and last 0xfff8
2551          * means M is 0 and frag-offset is > 0.
2552          * This packet is last fragment of fragmented packet.
2553          * This is not yet supported in MLX5, return appropriate
2554          * error message.
2555          */
2556         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2557             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2558                 return rte_flow_error_set(error, ENOTSUP,
2559                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2560                                           last, "match on last "
2561                                           "fragment not supported");
2562         /* Other range values are invalid and rejected. */
2563         return rte_flow_error_set(error, EINVAL,
2564                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2565                                   "specified range not supported");
2566 }
2567
2568 /**
2569  * Validate the pop VLAN action.
2570  *
2571  * @param[in] dev
2572  *   Pointer to the rte_eth_dev structure.
2573  * @param[in] action_flags
2574  *   Holds the actions detected until now.
2575  * @param[in] action
2576  *   Pointer to the pop vlan action.
2577  * @param[in] item_flags
2578  *   The items found in this flow rule.
2579  * @param[in] attr
2580  *   Pointer to flow attributes.
2581  * @param[out] error
2582  *   Pointer to error structure.
2583  *
2584  * @return
2585  *   0 on success, a negative errno value otherwise and rte_errno is set.
2586  */
2587 static int
2588 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2589                                  uint64_t action_flags,
2590                                  const struct rte_flow_action *action,
2591                                  uint64_t item_flags,
2592                                  const struct rte_flow_attr *attr,
2593                                  struct rte_flow_error *error)
2594 {
2595         const struct mlx5_priv *priv = dev->data->dev_private;
2596
2597         (void)action;
2598         (void)attr;
2599         if (!priv->sh->pop_vlan_action)
2600                 return rte_flow_error_set(error, ENOTSUP,
2601                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2602                                           NULL,
2603                                           "pop vlan action is not supported");
2604         if (attr->egress)
2605                 return rte_flow_error_set(error, ENOTSUP,
2606                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2607                                           NULL,
2608                                           "pop vlan action not supported for "
2609                                           "egress");
2610         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2611                 return rte_flow_error_set(error, ENOTSUP,
2612                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2613                                           "no support for multiple VLAN "
2614                                           "actions");
2615         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2616         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2617             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2618                 return rte_flow_error_set(error, ENOTSUP,
2619                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2620                                           NULL,
2621                                           "cannot pop vlan after decap without "
2622                                           "match on inner vlan in the flow");
2623         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2624         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2625             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2626                 return rte_flow_error_set(error, ENOTSUP,
2627                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2628                                           NULL,
2629                                           "cannot pop vlan without a "
2630                                           "match on (outer) vlan in the flow");
2631         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2632                 return rte_flow_error_set(error, EINVAL,
2633                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2634                                           "wrong action order, port_id should "
2635                                           "be after pop VLAN action");
2636         if (!attr->transfer && priv->representor)
2637                 return rte_flow_error_set(error, ENOTSUP,
2638                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2639                                           "pop vlan action for VF representor "
2640                                           "not supported on NIC table");
2641         return 0;
2642 }
2643
2644 /**
2645  * Get VLAN default info from vlan match info.
2646  *
2647  * @param[in] items
2648  *   the list of item specifications.
2649  * @param[out] vlan
2650  *   pointer VLAN info to fill to.
2651  *
2652  * @return
2653  *   0 on success, a negative errno value otherwise and rte_errno is set.
2654  */
2655 static void
2656 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2657                                   struct rte_vlan_hdr *vlan)
2658 {
2659         const struct rte_flow_item_vlan nic_mask = {
2660                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2661                                 MLX5DV_FLOW_VLAN_VID_MASK),
2662                 .inner_type = RTE_BE16(0xffff),
2663         };
2664
2665         if (items == NULL)
2666                 return;
2667         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2668                 int type = items->type;
2669
2670                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2671                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2672                         break;
2673         }
2674         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2675                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2676                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2677
2678                 /* If VLAN item in pattern doesn't contain data, return here. */
2679                 if (!vlan_v)
2680                         return;
2681                 if (!vlan_m)
2682                         vlan_m = &nic_mask;
2683                 /* Only full match values are accepted */
2684                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2685                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2686                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2687                         vlan->vlan_tci |=
2688                                 rte_be_to_cpu_16(vlan_v->tci &
2689                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2690                 }
2691                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2692                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2693                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2694                         vlan->vlan_tci |=
2695                                 rte_be_to_cpu_16(vlan_v->tci &
2696                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2697                 }
2698                 if (vlan_m->inner_type == nic_mask.inner_type)
2699                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2700                                                            vlan_m->inner_type);
2701         }
2702 }
2703
2704 /**
2705  * Validate the push VLAN action.
2706  *
2707  * @param[in] dev
2708  *   Pointer to the rte_eth_dev structure.
2709  * @param[in] action_flags
2710  *   Holds the actions detected until now.
2711  * @param[in] item_flags
2712  *   The items found in this flow rule.
2713  * @param[in] action
2714  *   Pointer to the action structure.
2715  * @param[in] attr
2716  *   Pointer to flow attributes
2717  * @param[out] error
2718  *   Pointer to error structure.
2719  *
2720  * @return
2721  *   0 on success, a negative errno value otherwise and rte_errno is set.
2722  */
2723 static int
2724 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2725                                   uint64_t action_flags,
2726                                   const struct rte_flow_item_vlan *vlan_m,
2727                                   const struct rte_flow_action *action,
2728                                   const struct rte_flow_attr *attr,
2729                                   struct rte_flow_error *error)
2730 {
2731         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2732         const struct mlx5_priv *priv = dev->data->dev_private;
2733
2734         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2735             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2736                 return rte_flow_error_set(error, EINVAL,
2737                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2738                                           "invalid vlan ethertype");
2739         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2740                 return rte_flow_error_set(error, EINVAL,
2741                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2742                                           "wrong action order, port_id should "
2743                                           "be after push VLAN");
2744         if (!attr->transfer && priv->representor)
2745                 return rte_flow_error_set(error, ENOTSUP,
2746                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2747                                           "push vlan action for VF representor "
2748                                           "not supported on NIC table");
2749         if (vlan_m &&
2750             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2751             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2752                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2753             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2754             !(mlx5_flow_find_action
2755                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2756                 return rte_flow_error_set(error, EINVAL,
2757                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2758                                           "not full match mask on VLAN PCP and "
2759                                           "there is no of_set_vlan_pcp action, "
2760                                           "push VLAN action cannot figure out "
2761                                           "PCP value");
2762         if (vlan_m &&
2763             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2764             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2765                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2766             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2767             !(mlx5_flow_find_action
2768                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2769                 return rte_flow_error_set(error, EINVAL,
2770                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2771                                           "not full match mask on VLAN VID and "
2772                                           "there is no of_set_vlan_vid action, "
2773                                           "push VLAN action cannot figure out "
2774                                           "VID value");
2775         (void)attr;
2776         return 0;
2777 }
2778
2779 /**
2780  * Validate the set VLAN PCP.
2781  *
2782  * @param[in] action_flags
2783  *   Holds the actions detected until now.
2784  * @param[in] actions
2785  *   Pointer to the list of actions remaining in the flow rule.
2786  * @param[out] error
2787  *   Pointer to error structure.
2788  *
2789  * @return
2790  *   0 on success, a negative errno value otherwise and rte_errno is set.
2791  */
2792 static int
2793 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2794                                      const struct rte_flow_action actions[],
2795                                      struct rte_flow_error *error)
2796 {
2797         const struct rte_flow_action *action = actions;
2798         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2799
2800         if (conf->vlan_pcp > 7)
2801                 return rte_flow_error_set(error, EINVAL,
2802                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2803                                           "VLAN PCP value is too big");
2804         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2805                 return rte_flow_error_set(error, ENOTSUP,
2806                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2807                                           "set VLAN PCP action must follow "
2808                                           "the push VLAN action");
2809         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2810                 return rte_flow_error_set(error, ENOTSUP,
2811                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2812                                           "Multiple VLAN PCP modification are "
2813                                           "not supported");
2814         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2815                 return rte_flow_error_set(error, EINVAL,
2816                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2817                                           "wrong action order, port_id should "
2818                                           "be after set VLAN PCP");
2819         return 0;
2820 }
2821
2822 /**
2823  * Validate the set VLAN VID.
2824  *
2825  * @param[in] item_flags
2826  *   Holds the items detected in this rule.
2827  * @param[in] action_flags
2828  *   Holds the actions detected until now.
2829  * @param[in] actions
2830  *   Pointer to the list of actions remaining in the flow rule.
2831  * @param[out] error
2832  *   Pointer to error structure.
2833  *
2834  * @return
2835  *   0 on success, a negative errno value otherwise and rte_errno is set.
2836  */
2837 static int
2838 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2839                                      uint64_t action_flags,
2840                                      const struct rte_flow_action actions[],
2841                                      struct rte_flow_error *error)
2842 {
2843         const struct rte_flow_action *action = actions;
2844         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2845
2846         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2847                 return rte_flow_error_set(error, EINVAL,
2848                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2849                                           "VLAN VID value is too big");
2850         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2851             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2852                 return rte_flow_error_set(error, ENOTSUP,
2853                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2854                                           "set VLAN VID action must follow push"
2855                                           " VLAN action or match on VLAN item");
2856         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2857                 return rte_flow_error_set(error, ENOTSUP,
2858                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2859                                           "Multiple VLAN VID modifications are "
2860                                           "not supported");
2861         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2862                 return rte_flow_error_set(error, EINVAL,
2863                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2864                                           "wrong action order, port_id should "
2865                                           "be after set VLAN VID");
2866         return 0;
2867 }
2868
2869 /*
2870  * Validate the FLAG action.
2871  *
2872  * @param[in] dev
2873  *   Pointer to the rte_eth_dev structure.
2874  * @param[in] action_flags
2875  *   Holds the actions detected until now.
2876  * @param[in] attr
2877  *   Pointer to flow attributes
2878  * @param[out] error
2879  *   Pointer to error structure.
2880  *
2881  * @return
2882  *   0 on success, a negative errno value otherwise and rte_errno is set.
2883  */
2884 static int
2885 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2886                              uint64_t action_flags,
2887                              const struct rte_flow_attr *attr,
2888                              struct rte_flow_error *error)
2889 {
2890         struct mlx5_priv *priv = dev->data->dev_private;
2891         struct mlx5_dev_config *config = &priv->config;
2892         int ret;
2893
2894         /* Fall back if no extended metadata register support. */
2895         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2896                 return mlx5_flow_validate_action_flag(action_flags, attr,
2897                                                       error);
2898         /* Extensive metadata mode requires registers. */
2899         if (!mlx5_flow_ext_mreg_supported(dev))
2900                 return rte_flow_error_set(error, ENOTSUP,
2901                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2902                                           "no metadata registers "
2903                                           "to support flag action");
2904         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2905                 return rte_flow_error_set(error, ENOTSUP,
2906                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2907                                           "extended metadata register"
2908                                           " isn't available");
2909         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2910         if (ret < 0)
2911                 return ret;
2912         MLX5_ASSERT(ret > 0);
2913         if (action_flags & MLX5_FLOW_ACTION_MARK)
2914                 return rte_flow_error_set(error, EINVAL,
2915                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2916                                           "can't mark and flag in same flow");
2917         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2918                 return rte_flow_error_set(error, EINVAL,
2919                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2920                                           "can't have 2 flag"
2921                                           " actions in same flow");
2922         return 0;
2923 }
2924
2925 /**
2926  * Validate MARK action.
2927  *
2928  * @param[in] dev
2929  *   Pointer to the rte_eth_dev structure.
2930  * @param[in] action
2931  *   Pointer to action.
2932  * @param[in] action_flags
2933  *   Holds the actions detected until now.
2934  * @param[in] attr
2935  *   Pointer to flow attributes
2936  * @param[out] error
2937  *   Pointer to error structure.
2938  *
2939  * @return
2940  *   0 on success, a negative errno value otherwise and rte_errno is set.
2941  */
2942 static int
2943 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2944                              const struct rte_flow_action *action,
2945                              uint64_t action_flags,
2946                              const struct rte_flow_attr *attr,
2947                              struct rte_flow_error *error)
2948 {
2949         struct mlx5_priv *priv = dev->data->dev_private;
2950         struct mlx5_dev_config *config = &priv->config;
2951         const struct rte_flow_action_mark *mark = action->conf;
2952         int ret;
2953
2954         if (is_tunnel_offload_active(dev))
2955                 return rte_flow_error_set(error, ENOTSUP,
2956                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2957                                           "no mark action "
2958                                           "if tunnel offload active");
2959         /* Fall back if no extended metadata register support. */
2960         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2961                 return mlx5_flow_validate_action_mark(action, action_flags,
2962                                                       attr, error);
2963         /* Extensive metadata mode requires registers. */
2964         if (!mlx5_flow_ext_mreg_supported(dev))
2965                 return rte_flow_error_set(error, ENOTSUP,
2966                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2967                                           "no metadata registers "
2968                                           "to support mark action");
2969         if (!priv->sh->dv_mark_mask)
2970                 return rte_flow_error_set(error, ENOTSUP,
2971                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2972                                           "extended metadata register"
2973                                           " isn't available");
2974         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2975         if (ret < 0)
2976                 return ret;
2977         MLX5_ASSERT(ret > 0);
2978         if (!mark)
2979                 return rte_flow_error_set(error, EINVAL,
2980                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2981                                           "configuration cannot be null");
2982         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2983                 return rte_flow_error_set(error, EINVAL,
2984                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2985                                           &mark->id,
2986                                           "mark id exceeds the limit");
2987         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2988                 return rte_flow_error_set(error, EINVAL,
2989                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2990                                           "can't flag and mark in same flow");
2991         if (action_flags & MLX5_FLOW_ACTION_MARK)
2992                 return rte_flow_error_set(error, EINVAL,
2993                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2994                                           "can't have 2 mark actions in same"
2995                                           " flow");
2996         return 0;
2997 }
2998
2999 /**
3000  * Validate SET_META action.
3001  *
3002  * @param[in] dev
3003  *   Pointer to the rte_eth_dev structure.
3004  * @param[in] action
3005  *   Pointer to the action structure.
3006  * @param[in] action_flags
3007  *   Holds the actions detected until now.
3008  * @param[in] attr
3009  *   Pointer to flow attributes
3010  * @param[out] error
3011  *   Pointer to error structure.
3012  *
3013  * @return
3014  *   0 on success, a negative errno value otherwise and rte_errno is set.
3015  */
3016 static int
3017 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3018                                  const struct rte_flow_action *action,
3019                                  uint64_t action_flags __rte_unused,
3020                                  const struct rte_flow_attr *attr,
3021                                  struct rte_flow_error *error)
3022 {
3023         const struct rte_flow_action_set_meta *conf;
3024         uint32_t nic_mask = UINT32_MAX;
3025         int reg;
3026
3027         if (!mlx5_flow_ext_mreg_supported(dev))
3028                 return rte_flow_error_set(error, ENOTSUP,
3029                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3030                                           "extended metadata register"
3031                                           " isn't supported");
3032         reg = flow_dv_get_metadata_reg(dev, attr, error);
3033         if (reg < 0)
3034                 return reg;
3035         if (reg == REG_NON)
3036                 return rte_flow_error_set(error, ENOTSUP,
3037                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3038                                           "unavalable extended metadata register");
3039         if (reg != REG_A && reg != REG_B) {
3040                 struct mlx5_priv *priv = dev->data->dev_private;
3041
3042                 nic_mask = priv->sh->dv_meta_mask;
3043         }
3044         if (!(action->conf))
3045                 return rte_flow_error_set(error, EINVAL,
3046                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3047                                           "configuration cannot be null");
3048         conf = (const struct rte_flow_action_set_meta *)action->conf;
3049         if (!conf->mask)
3050                 return rte_flow_error_set(error, EINVAL,
3051                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3052                                           "zero mask doesn't have any effect");
3053         if (conf->mask & ~nic_mask)
3054                 return rte_flow_error_set(error, EINVAL,
3055                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3056                                           "meta data must be within reg C0");
3057         return 0;
3058 }
3059
3060 /**
3061  * Validate SET_TAG action.
3062  *
3063  * @param[in] dev
3064  *   Pointer to the rte_eth_dev structure.
3065  * @param[in] action
3066  *   Pointer to the action structure.
3067  * @param[in] action_flags
3068  *   Holds the actions detected until now.
3069  * @param[in] attr
3070  *   Pointer to flow attributes
3071  * @param[out] error
3072  *   Pointer to error structure.
3073  *
3074  * @return
3075  *   0 on success, a negative errno value otherwise and rte_errno is set.
3076  */
3077 static int
3078 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3079                                 const struct rte_flow_action *action,
3080                                 uint64_t action_flags,
3081                                 const struct rte_flow_attr *attr,
3082                                 struct rte_flow_error *error)
3083 {
3084         const struct rte_flow_action_set_tag *conf;
3085         const uint64_t terminal_action_flags =
3086                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3087                 MLX5_FLOW_ACTION_RSS;
3088         int ret;
3089
3090         if (!mlx5_flow_ext_mreg_supported(dev))
3091                 return rte_flow_error_set(error, ENOTSUP,
3092                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3093                                           "extensive metadata register"
3094                                           " isn't supported");
3095         if (!(action->conf))
3096                 return rte_flow_error_set(error, EINVAL,
3097                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3098                                           "configuration cannot be null");
3099         conf = (const struct rte_flow_action_set_tag *)action->conf;
3100         if (!conf->mask)
3101                 return rte_flow_error_set(error, EINVAL,
3102                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3103                                           "zero mask doesn't have any effect");
3104         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3105         if (ret < 0)
3106                 return ret;
3107         if (!attr->transfer && attr->ingress &&
3108             (action_flags & terminal_action_flags))
3109                 return rte_flow_error_set(error, EINVAL,
3110                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3111                                           "set_tag has no effect"
3112                                           " with terminal actions");
3113         return 0;
3114 }
3115
3116 /**
3117  * Validate count action.
3118  *
3119  * @param[in] dev
3120  *   Pointer to rte_eth_dev structure.
3121  * @param[in] action
3122  *   Pointer to the action structure.
3123  * @param[in] action_flags
3124  *   Holds the actions detected until now.
3125  * @param[out] error
3126  *   Pointer to error structure.
3127  *
3128  * @return
3129  *   0 on success, a negative errno value otherwise and rte_errno is set.
3130  */
3131 static int
3132 flow_dv_validate_action_count(struct rte_eth_dev *dev,
3133                               const struct rte_flow_action *action,
3134                               uint64_t action_flags,
3135                               struct rte_flow_error *error)
3136 {
3137         struct mlx5_priv *priv = dev->data->dev_private;
3138         const struct rte_flow_action_count *count;
3139
3140         if (!priv->config.devx)
3141                 goto notsup_err;
3142         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3143                 return rte_flow_error_set(error, EINVAL,
3144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3145                                           "duplicate count actions set");
3146         count = (const struct rte_flow_action_count *)action->conf;
3147         if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3148             !priv->sh->flow_hit_aso_en)
3149                 return rte_flow_error_set(error, EINVAL,
3150                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3151                                           "old age and shared count combination is not supported");
3152 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3153         return 0;
3154 #endif
3155 notsup_err:
3156         return rte_flow_error_set
3157                       (error, ENOTSUP,
3158                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3159                        NULL,
3160                        "count action not supported");
3161 }
3162
3163 /**
3164  * Validate the L2 encap action.
3165  *
3166  * @param[in] dev
3167  *   Pointer to the rte_eth_dev structure.
3168  * @param[in] action_flags
3169  *   Holds the actions detected until now.
3170  * @param[in] action
3171  *   Pointer to the action structure.
3172  * @param[in] attr
3173  *   Pointer to flow attributes.
3174  * @param[out] error
3175  *   Pointer to error structure.
3176  *
3177  * @return
3178  *   0 on success, a negative errno value otherwise and rte_errno is set.
3179  */
3180 static int
3181 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3182                                  uint64_t action_flags,
3183                                  const struct rte_flow_action *action,
3184                                  const struct rte_flow_attr *attr,
3185                                  struct rte_flow_error *error)
3186 {
3187         const struct mlx5_priv *priv = dev->data->dev_private;
3188
3189         if (!(action->conf))
3190                 return rte_flow_error_set(error, EINVAL,
3191                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3192                                           "configuration cannot be null");
3193         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3194                 return rte_flow_error_set(error, EINVAL,
3195                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3196                                           "can only have a single encap action "
3197                                           "in a flow");
3198         if (!attr->transfer && priv->representor)
3199                 return rte_flow_error_set(error, ENOTSUP,
3200                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3201                                           "encap action for VF representor "
3202                                           "not supported on NIC table");
3203         return 0;
3204 }
3205
3206 /**
3207  * Validate a decap action.
3208  *
3209  * @param[in] dev
3210  *   Pointer to the rte_eth_dev structure.
3211  * @param[in] action_flags
3212  *   Holds the actions detected until now.
3213  * @param[in] action
3214  *   Pointer to the action structure.
3215  * @param[in] item_flags
3216  *   Holds the items detected.
3217  * @param[in] attr
3218  *   Pointer to flow attributes
3219  * @param[out] error
3220  *   Pointer to error structure.
3221  *
3222  * @return
3223  *   0 on success, a negative errno value otherwise and rte_errno is set.
3224  */
3225 static int
3226 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3227                               uint64_t action_flags,
3228                               const struct rte_flow_action *action,
3229                               const uint64_t item_flags,
3230                               const struct rte_flow_attr *attr,
3231                               struct rte_flow_error *error)
3232 {
3233         const struct mlx5_priv *priv = dev->data->dev_private;
3234
3235         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3236             !priv->config.decap_en)
3237                 return rte_flow_error_set(error, ENOTSUP,
3238                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3239                                           "decap is not enabled");
3240         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3241                 return rte_flow_error_set(error, ENOTSUP,
3242                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3243                                           action_flags &
3244                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3245                                           "have a single decap action" : "decap "
3246                                           "after encap is not supported");
3247         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3248                 return rte_flow_error_set(error, EINVAL,
3249                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3250                                           "can't have decap action after"
3251                                           " modify action");
3252         if (attr->egress)
3253                 return rte_flow_error_set(error, ENOTSUP,
3254                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3255                                           NULL,
3256                                           "decap action not supported for "
3257                                           "egress");
3258         if (!attr->transfer && priv->representor)
3259                 return rte_flow_error_set(error, ENOTSUP,
3260                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3261                                           "decap action for VF representor "
3262                                           "not supported on NIC table");
3263         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3264             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3265                 return rte_flow_error_set(error, ENOTSUP,
3266                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3267                                 "VXLAN item should be present for VXLAN decap");
3268         return 0;
3269 }
3270
3271 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3272
3273 /**
3274  * Validate the raw encap and decap actions.
3275  *
3276  * @param[in] dev
3277  *   Pointer to the rte_eth_dev structure.
3278  * @param[in] decap
3279  *   Pointer to the decap action.
3280  * @param[in] encap
3281  *   Pointer to the encap action.
3282  * @param[in] attr
3283  *   Pointer to flow attributes
3284  * @param[in/out] action_flags
3285  *   Holds the actions detected until now.
3286  * @param[out] actions_n
3287  *   pointer to the number of actions counter.
3288  * @param[in] action
3289  *   Pointer to the action structure.
3290  * @param[in] item_flags
3291  *   Holds the items detected.
3292  * @param[out] error
3293  *   Pointer to error structure.
3294  *
3295  * @return
3296  *   0 on success, a negative errno value otherwise and rte_errno is set.
3297  */
3298 static int
3299 flow_dv_validate_action_raw_encap_decap
3300         (struct rte_eth_dev *dev,
3301          const struct rte_flow_action_raw_decap *decap,
3302          const struct rte_flow_action_raw_encap *encap,
3303          const struct rte_flow_attr *attr, uint64_t *action_flags,
3304          int *actions_n, const struct rte_flow_action *action,
3305          uint64_t item_flags, struct rte_flow_error *error)
3306 {
3307         const struct mlx5_priv *priv = dev->data->dev_private;
3308         int ret;
3309
3310         if (encap && (!encap->size || !encap->data))
3311                 return rte_flow_error_set(error, EINVAL,
3312                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3313                                           "raw encap data cannot be empty");
3314         if (decap && encap) {
3315                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3316                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3317                         /* L3 encap. */
3318                         decap = NULL;
3319                 else if (encap->size <=
3320                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3321                            decap->size >
3322                            MLX5_ENCAPSULATION_DECISION_SIZE)
3323                         /* L3 decap. */
3324                         encap = NULL;
3325                 else if (encap->size >
3326                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3327                            decap->size >
3328                            MLX5_ENCAPSULATION_DECISION_SIZE)
3329                         /* 2 L2 actions: encap and decap. */
3330                         ;
3331                 else
3332                         return rte_flow_error_set(error,
3333                                 ENOTSUP,
3334                                 RTE_FLOW_ERROR_TYPE_ACTION,
3335                                 NULL, "unsupported too small "
3336                                 "raw decap and too small raw "
3337                                 "encap combination");
3338         }
3339         if (decap) {
3340                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3341                                                     item_flags, attr, error);
3342                 if (ret < 0)
3343                         return ret;
3344                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3345                 ++(*actions_n);
3346         }
3347         if (encap) {
3348                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3349                         return rte_flow_error_set(error, ENOTSUP,
3350                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3351                                                   NULL,
3352                                                   "small raw encap size");
3353                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3354                         return rte_flow_error_set(error, EINVAL,
3355                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3356                                                   NULL,
3357                                                   "more than one encap action");
3358                 if (!attr->transfer && priv->representor)
3359                         return rte_flow_error_set
3360                                         (error, ENOTSUP,
3361                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3362                                          "encap action for VF representor "
3363                                          "not supported on NIC table");
3364                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3365                 ++(*actions_n);
3366         }
3367         return 0;
3368 }
3369
3370 /**
3371  * Match encap_decap resource.
3372  *
3373  * @param list
3374  *   Pointer to the hash list.
3375  * @param entry
3376  *   Pointer to exist resource entry object.
3377  * @param key
3378  *   Key of the new entry.
3379  * @param ctx_cb
3380  *   Pointer to new encap_decap resource.
3381  *
3382  * @return
3383  *   0 on matching, none-zero otherwise.
3384  */
3385 int
3386 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3387                              struct mlx5_hlist_entry *entry,
3388                              uint64_t key __rte_unused, void *cb_ctx)
3389 {
3390         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3391         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3392         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3393
3394         cache_resource = container_of(entry,
3395                                       struct mlx5_flow_dv_encap_decap_resource,
3396                                       entry);
3397         if (resource->reformat_type == cache_resource->reformat_type &&
3398             resource->ft_type == cache_resource->ft_type &&
3399             resource->flags == cache_resource->flags &&
3400             resource->size == cache_resource->size &&
3401             !memcmp((const void *)resource->buf,
3402                     (const void *)cache_resource->buf,
3403                     resource->size))
3404                 return 0;
3405         return -1;
3406 }
3407
3408 /**
3409  * Allocate encap_decap resource.
3410  *
3411  * @param list
3412  *   Pointer to the hash list.
3413  * @param entry
3414  *   Pointer to exist resource entry object.
3415  * @param ctx_cb
3416  *   Pointer to new encap_decap resource.
3417  *
3418  * @return
3419  *   0 on matching, none-zero otherwise.
3420  */
3421 struct mlx5_hlist_entry *
3422 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3423                               uint64_t key __rte_unused,
3424                               void *cb_ctx)
3425 {
3426         struct mlx5_dev_ctx_shared *sh = list->ctx;
3427         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3428         struct mlx5dv_dr_domain *domain;
3429         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3430         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3431         uint32_t idx;
3432         int ret;
3433
3434         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3435                 domain = sh->fdb_domain;
3436         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3437                 domain = sh->rx_domain;
3438         else
3439                 domain = sh->tx_domain;
3440         /* Register new encap/decap resource. */
3441         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3442                                        &idx);
3443         if (!cache_resource) {
3444                 rte_flow_error_set(ctx->error, ENOMEM,
3445                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3446                                    "cannot allocate resource memory");
3447                 return NULL;
3448         }
3449         *cache_resource = *resource;
3450         cache_resource->idx = idx;
3451         ret = mlx5_flow_os_create_flow_action_packet_reformat
3452                                         (sh->ctx, domain, cache_resource,
3453                                          &cache_resource->action);
3454         if (ret) {
3455                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3456                 rte_flow_error_set(ctx->error, ENOMEM,
3457                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3458                                    NULL, "cannot create action");
3459                 return NULL;
3460         }
3461
3462         return &cache_resource->entry;
3463 }
3464
3465 /**
3466  * Find existing encap/decap resource or create and register a new one.
3467  *
3468  * @param[in, out] dev
3469  *   Pointer to rte_eth_dev structure.
3470  * @param[in, out] resource
3471  *   Pointer to encap/decap resource.
3472  * @parm[in, out] dev_flow
3473  *   Pointer to the dev_flow.
3474  * @param[out] error
3475  *   pointer to error structure.
3476  *
3477  * @return
3478  *   0 on success otherwise -errno and errno is set.
3479  */
3480 static int
3481 flow_dv_encap_decap_resource_register
3482                         (struct rte_eth_dev *dev,
3483                          struct mlx5_flow_dv_encap_decap_resource *resource,
3484                          struct mlx5_flow *dev_flow,
3485                          struct rte_flow_error *error)
3486 {
3487         struct mlx5_priv *priv = dev->data->dev_private;
3488         struct mlx5_dev_ctx_shared *sh = priv->sh;
3489         struct mlx5_hlist_entry *entry;
3490         union {
3491                 struct {
3492                         uint32_t ft_type:8;
3493                         uint32_t refmt_type:8;
3494                         /*
3495                          * Header reformat actions can be shared between
3496                          * non-root tables. One bit to indicate non-root
3497                          * table or not.
3498                          */
3499                         uint32_t is_root:1;
3500                         uint32_t reserve:15;
3501                 };
3502                 uint32_t v32;
3503         } encap_decap_key = {
3504                 {
3505                         .ft_type = resource->ft_type,
3506                         .refmt_type = resource->reformat_type,
3507                         .is_root = !!dev_flow->dv.group,
3508                         .reserve = 0,
3509                 }
3510         };
3511         struct mlx5_flow_cb_ctx ctx = {
3512                 .error = error,
3513                 .data = resource,
3514         };
3515         uint64_t key64;
3516
3517         resource->flags = dev_flow->dv.group ? 0 : 1;
3518         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3519                                  sizeof(encap_decap_key.v32), 0);
3520         if (resource->reformat_type !=
3521             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3522             resource->size)
3523                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3524         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3525         if (!entry)
3526                 return -rte_errno;
3527         resource = container_of(entry, typeof(*resource), entry);
3528         dev_flow->dv.encap_decap = resource;
3529         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3530         return 0;
3531 }
3532
3533 /**
3534  * Find existing table jump resource or create and register a new one.
3535  *
3536  * @param[in, out] dev
3537  *   Pointer to rte_eth_dev structure.
3538  * @param[in, out] tbl
3539  *   Pointer to flow table resource.
3540  * @parm[in, out] dev_flow
3541  *   Pointer to the dev_flow.
3542  * @param[out] error
3543  *   pointer to error structure.
3544  *
3545  * @return
3546  *   0 on success otherwise -errno and errno is set.
3547  */
3548 static int
3549 flow_dv_jump_tbl_resource_register
3550                         (struct rte_eth_dev *dev __rte_unused,
3551                          struct mlx5_flow_tbl_resource *tbl,
3552                          struct mlx5_flow *dev_flow,
3553                          struct rte_flow_error *error __rte_unused)
3554 {
3555         struct mlx5_flow_tbl_data_entry *tbl_data =
3556                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3557
3558         MLX5_ASSERT(tbl);
3559         MLX5_ASSERT(tbl_data->jump.action);
3560         dev_flow->handle->rix_jump = tbl_data->idx;
3561         dev_flow->dv.jump = &tbl_data->jump;
3562         return 0;
3563 }
3564
3565 int
3566 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3567                          struct mlx5_cache_entry *entry, void *cb_ctx)
3568 {
3569         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3570         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3571         struct mlx5_flow_dv_port_id_action_resource *res =
3572                         container_of(entry, typeof(*res), entry);
3573
3574         return ref->port_id != res->port_id;
3575 }
3576
3577 struct mlx5_cache_entry *
3578 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3579                           struct mlx5_cache_entry *entry __rte_unused,
3580                           void *cb_ctx)
3581 {
3582         struct mlx5_dev_ctx_shared *sh = list->ctx;
3583         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3584         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3585         struct mlx5_flow_dv_port_id_action_resource *cache;
3586         uint32_t idx;
3587         int ret;
3588
3589         /* Register new port id action resource. */
3590         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3591         if (!cache) {
3592                 rte_flow_error_set(ctx->error, ENOMEM,
3593                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3594                                    "cannot allocate port_id action cache memory");
3595                 return NULL;
3596         }
3597         *cache = *ref;
3598         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3599                                                         ref->port_id,
3600                                                         &cache->action);
3601         if (ret) {
3602                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3603                 rte_flow_error_set(ctx->error, ENOMEM,
3604                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3605                                    "cannot create action");
3606                 return NULL;
3607         }
3608         return &cache->entry;
3609 }
3610
3611 /**
3612  * Find existing table port ID resource or create and register a new one.
3613  *
3614  * @param[in, out] dev
3615  *   Pointer to rte_eth_dev structure.
3616  * @param[in, out] resource
3617  *   Pointer to port ID action resource.
3618  * @parm[in, out] dev_flow
3619  *   Pointer to the dev_flow.
3620  * @param[out] error
3621  *   pointer to error structure.
3622  *
3623  * @return
3624  *   0 on success otherwise -errno and errno is set.
3625  */
3626 static int
3627 flow_dv_port_id_action_resource_register
3628                         (struct rte_eth_dev *dev,
3629                          struct mlx5_flow_dv_port_id_action_resource *resource,
3630                          struct mlx5_flow *dev_flow,
3631                          struct rte_flow_error *error)
3632 {
3633         struct mlx5_priv *priv = dev->data->dev_private;
3634         struct mlx5_cache_entry *entry;
3635         struct mlx5_flow_dv_port_id_action_resource *cache;
3636         struct mlx5_flow_cb_ctx ctx = {
3637                 .error = error,
3638                 .data = resource,
3639         };
3640
3641         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3642         if (!entry)
3643                 return -rte_errno;
3644         cache = container_of(entry, typeof(*cache), entry);
3645         dev_flow->dv.port_id_action = cache;
3646         dev_flow->handle->rix_port_id_action = cache->idx;
3647         return 0;
3648 }
3649
3650 int
3651 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3652                          struct mlx5_cache_entry *entry, void *cb_ctx)
3653 {
3654         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3655         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3656         struct mlx5_flow_dv_push_vlan_action_resource *res =
3657                         container_of(entry, typeof(*res), entry);
3658
3659         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3660 }
3661
3662 struct mlx5_cache_entry *
3663 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3664                           struct mlx5_cache_entry *entry __rte_unused,
3665                           void *cb_ctx)
3666 {
3667         struct mlx5_dev_ctx_shared *sh = list->ctx;
3668         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3669         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3670         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3671         struct mlx5dv_dr_domain *domain;
3672         uint32_t idx;
3673         int ret;
3674
3675         /* Register new port id action resource. */
3676         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3677         if (!cache) {
3678                 rte_flow_error_set(ctx->error, ENOMEM,
3679                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3680                                    "cannot allocate push_vlan action cache memory");
3681                 return NULL;
3682         }
3683         *cache = *ref;
3684         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3685                 domain = sh->fdb_domain;
3686         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3687                 domain = sh->rx_domain;
3688         else
3689                 domain = sh->tx_domain;
3690         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3691                                                         &cache->action);
3692         if (ret) {
3693                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3694                 rte_flow_error_set(ctx->error, ENOMEM,
3695                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3696                                    "cannot create push vlan action");
3697                 return NULL;
3698         }
3699         return &cache->entry;
3700 }
3701
3702 /**
3703  * Find existing push vlan resource or create and register a new one.
3704  *
3705  * @param [in, out] dev
3706  *   Pointer to rte_eth_dev structure.
3707  * @param[in, out] resource
3708  *   Pointer to port ID action resource.
3709  * @parm[in, out] dev_flow
3710  *   Pointer to the dev_flow.
3711  * @param[out] error
3712  *   pointer to error structure.
3713  *
3714  * @return
3715  *   0 on success otherwise -errno and errno is set.
3716  */
3717 static int
3718 flow_dv_push_vlan_action_resource_register
3719                        (struct rte_eth_dev *dev,
3720                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3721                         struct mlx5_flow *dev_flow,
3722                         struct rte_flow_error *error)
3723 {
3724         struct mlx5_priv *priv = dev->data->dev_private;
3725         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3726         struct mlx5_cache_entry *entry;
3727         struct mlx5_flow_cb_ctx ctx = {
3728                 .error = error,
3729                 .data = resource,
3730         };
3731
3732         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3733         if (!entry)
3734                 return -rte_errno;
3735         cache = container_of(entry, typeof(*cache), entry);
3736
3737         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3738         dev_flow->dv.push_vlan_res = cache;
3739         return 0;
3740 }
3741
3742 /**
3743  * Get the size of specific rte_flow_item_type hdr size
3744  *
3745  * @param[in] item_type
3746  *   Tested rte_flow_item_type.
3747  *
3748  * @return
3749  *   sizeof struct item_type, 0 if void or irrelevant.
3750  */
3751 static size_t
3752 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3753 {
3754         size_t retval;
3755
3756         switch (item_type) {
3757         case RTE_FLOW_ITEM_TYPE_ETH:
3758                 retval = sizeof(struct rte_ether_hdr);
3759                 break;
3760         case RTE_FLOW_ITEM_TYPE_VLAN:
3761                 retval = sizeof(struct rte_vlan_hdr);
3762                 break;
3763         case RTE_FLOW_ITEM_TYPE_IPV4:
3764                 retval = sizeof(struct rte_ipv4_hdr);
3765                 break;
3766         case RTE_FLOW_ITEM_TYPE_IPV6:
3767                 retval = sizeof(struct rte_ipv6_hdr);
3768                 break;
3769         case RTE_FLOW_ITEM_TYPE_UDP:
3770                 retval = sizeof(struct rte_udp_hdr);
3771                 break;
3772         case RTE_FLOW_ITEM_TYPE_TCP:
3773                 retval = sizeof(struct rte_tcp_hdr);
3774                 break;
3775         case RTE_FLOW_ITEM_TYPE_VXLAN:
3776         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3777                 retval = sizeof(struct rte_vxlan_hdr);
3778                 break;
3779         case RTE_FLOW_ITEM_TYPE_GRE:
3780         case RTE_FLOW_ITEM_TYPE_NVGRE:
3781                 retval = sizeof(struct rte_gre_hdr);
3782                 break;
3783         case RTE_FLOW_ITEM_TYPE_MPLS:
3784                 retval = sizeof(struct rte_mpls_hdr);
3785                 break;
3786         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3787         default:
3788                 retval = 0;
3789                 break;
3790         }
3791         return retval;
3792 }
3793
3794 #define MLX5_ENCAP_IPV4_VERSION         0x40
3795 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3796 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3797 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3798 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3799 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3800 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3801
3802 /**
3803  * Convert the encap action data from list of rte_flow_item to raw buffer
3804  *
3805  * @param[in] items
3806  *   Pointer to rte_flow_item objects list.
3807  * @param[out] buf
3808  *   Pointer to the output buffer.
3809  * @param[out] size
3810  *   Pointer to the output buffer size.
3811  * @param[out] error
3812  *   Pointer to the error structure.
3813  *
3814  * @return
3815  *   0 on success, a negative errno value otherwise and rte_errno is set.
3816  */
3817 static int
3818 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3819                            size_t *size, struct rte_flow_error *error)
3820 {
3821         struct rte_ether_hdr *eth = NULL;
3822         struct rte_vlan_hdr *vlan = NULL;
3823         struct rte_ipv4_hdr *ipv4 = NULL;
3824         struct rte_ipv6_hdr *ipv6 = NULL;
3825         struct rte_udp_hdr *udp = NULL;
3826         struct rte_vxlan_hdr *vxlan = NULL;
3827         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3828         struct rte_gre_hdr *gre = NULL;
3829         size_t len;
3830         size_t temp_size = 0;
3831
3832         if (!items)
3833                 return rte_flow_error_set(error, EINVAL,
3834                                           RTE_FLOW_ERROR_TYPE_ACTION,
3835                                           NULL, "invalid empty data");
3836         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3837                 len = flow_dv_get_item_hdr_len(items->type);
3838                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3839                         return rte_flow_error_set(error, EINVAL,
3840                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3841                                                   (void *)items->type,
3842                                                   "items total size is too big"
3843                                                   " for encap action");
3844                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3845                 switch (items->type) {
3846                 case RTE_FLOW_ITEM_TYPE_ETH:
3847                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3848                         break;
3849                 case RTE_FLOW_ITEM_TYPE_VLAN:
3850                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3851                         if (!eth)
3852                                 return rte_flow_error_set(error, EINVAL,
3853                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3854                                                 (void *)items->type,
3855                                                 "eth header not found");
3856                         if (!eth->ether_type)
3857                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3858                         break;
3859                 case RTE_FLOW_ITEM_TYPE_IPV4:
3860                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3861                         if (!vlan && !eth)
3862                                 return rte_flow_error_set(error, EINVAL,
3863                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3864                                                 (void *)items->type,
3865                                                 "neither eth nor vlan"
3866                                                 " header found");
3867                         if (vlan && !vlan->eth_proto)
3868                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3869                         else if (eth && !eth->ether_type)
3870                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3871                         if (!ipv4->version_ihl)
3872                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3873                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3874                         if (!ipv4->time_to_live)
3875                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3876                         break;
3877                 case RTE_FLOW_ITEM_TYPE_IPV6:
3878                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3879                         if (!vlan && !eth)
3880                                 return rte_flow_error_set(error, EINVAL,
3881                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3882                                                 (void *)items->type,
3883                                                 "neither eth nor vlan"
3884                                                 " header found");
3885                         if (vlan && !vlan->eth_proto)
3886                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3887                         else if (eth && !eth->ether_type)
3888                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3889                         if (!ipv6->vtc_flow)
3890                                 ipv6->vtc_flow =
3891                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3892                         if (!ipv6->hop_limits)
3893                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3894                         break;
3895                 case RTE_FLOW_ITEM_TYPE_UDP:
3896                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3897                         if (!ipv4 && !ipv6)
3898                                 return rte_flow_error_set(error, EINVAL,
3899                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3900                                                 (void *)items->type,
3901                                                 "ip header not found");
3902                         if (ipv4 && !ipv4->next_proto_id)
3903                                 ipv4->next_proto_id = IPPROTO_UDP;
3904                         else if (ipv6 && !ipv6->proto)
3905                                 ipv6->proto = IPPROTO_UDP;
3906                         break;
3907                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3908                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3909                         if (!udp)
3910                                 return rte_flow_error_set(error, EINVAL,
3911                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3912                                                 (void *)items->type,
3913                                                 "udp header not found");
3914                         if (!udp->dst_port)
3915                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3916                         if (!vxlan->vx_flags)
3917                                 vxlan->vx_flags =
3918                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3919                         break;
3920                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3921                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3922                         if (!udp)
3923                                 return rte_flow_error_set(error, EINVAL,
3924                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3925                                                 (void *)items->type,
3926                                                 "udp header not found");
3927                         if (!vxlan_gpe->proto)
3928                                 return rte_flow_error_set(error, EINVAL,
3929                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3930                                                 (void *)items->type,
3931                                                 "next protocol not found");
3932                         if (!udp->dst_port)
3933                                 udp->dst_port =
3934                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3935                         if (!vxlan_gpe->vx_flags)
3936                                 vxlan_gpe->vx_flags =
3937                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3938                         break;
3939                 case RTE_FLOW_ITEM_TYPE_GRE:
3940                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3941                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3942                         if (!gre->proto)
3943                                 return rte_flow_error_set(error, EINVAL,
3944                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3945                                                 (void *)items->type,
3946                                                 "next protocol not found");
3947                         if (!ipv4 && !ipv6)
3948                                 return rte_flow_error_set(error, EINVAL,
3949                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3950                                                 (void *)items->type,
3951                                                 "ip header not found");
3952                         if (ipv4 && !ipv4->next_proto_id)
3953                                 ipv4->next_proto_id = IPPROTO_GRE;
3954                         else if (ipv6 && !ipv6->proto)
3955                                 ipv6->proto = IPPROTO_GRE;
3956                         break;
3957                 case RTE_FLOW_ITEM_TYPE_VOID:
3958                         break;
3959                 default:
3960                         return rte_flow_error_set(error, EINVAL,
3961                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3962                                                   (void *)items->type,
3963                                                   "unsupported item type");
3964                         break;
3965                 }
3966                 temp_size += len;
3967         }
3968         *size = temp_size;
3969         return 0;
3970 }
3971
3972 static int
3973 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3974 {
3975         struct rte_ether_hdr *eth = NULL;
3976         struct rte_vlan_hdr *vlan = NULL;
3977         struct rte_ipv6_hdr *ipv6 = NULL;
3978         struct rte_udp_hdr *udp = NULL;
3979         char *next_hdr;
3980         uint16_t proto;
3981
3982         eth = (struct rte_ether_hdr *)data;
3983         next_hdr = (char *)(eth + 1);
3984         proto = RTE_BE16(eth->ether_type);
3985
3986         /* VLAN skipping */
3987         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3988                 vlan = (struct rte_vlan_hdr *)next_hdr;
3989                 proto = RTE_BE16(vlan->eth_proto);
3990                 next_hdr += sizeof(struct rte_vlan_hdr);
3991         }
3992
3993         /* HW calculates IPv4 csum. no need to proceed */
3994         if (proto == RTE_ETHER_TYPE_IPV4)
3995                 return 0;
3996
3997         /* non IPv4/IPv6 header. not supported */
3998         if (proto != RTE_ETHER_TYPE_IPV6) {
3999                 return rte_flow_error_set(error, ENOTSUP,
4000                                           RTE_FLOW_ERROR_TYPE_ACTION,
4001                                           NULL, "Cannot offload non IPv4/IPv6");
4002         }
4003
4004         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4005
4006         /* ignore non UDP */
4007         if (ipv6->proto != IPPROTO_UDP)
4008                 return 0;
4009
4010         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4011         udp->dgram_cksum = 0;
4012
4013         return 0;
4014 }
4015
4016 /**
4017  * Convert L2 encap action to DV specification.
4018  *
4019  * @param[in] dev
4020  *   Pointer to rte_eth_dev structure.
4021  * @param[in] action
4022  *   Pointer to action structure.
4023  * @param[in, out] dev_flow
4024  *   Pointer to the mlx5_flow.
4025  * @param[in] transfer
4026  *   Mark if the flow is E-Switch flow.
4027  * @param[out] error
4028  *   Pointer to the error structure.
4029  *
4030  * @return
4031  *   0 on success, a negative errno value otherwise and rte_errno is set.
4032  */
4033 static int
4034 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4035                                const struct rte_flow_action *action,
4036                                struct mlx5_flow *dev_flow,
4037                                uint8_t transfer,
4038                                struct rte_flow_error *error)
4039 {
4040         const struct rte_flow_item *encap_data;
4041         const struct rte_flow_action_raw_encap *raw_encap_data;
4042         struct mlx5_flow_dv_encap_decap_resource res = {
4043                 .reformat_type =
4044                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4045                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4046                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4047         };
4048
4049         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4050                 raw_encap_data =
4051                         (const struct rte_flow_action_raw_encap *)action->conf;
4052                 res.size = raw_encap_data->size;
4053                 memcpy(res.buf, raw_encap_data->data, res.size);
4054         } else {
4055                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4056                         encap_data =
4057                                 ((const struct rte_flow_action_vxlan_encap *)
4058                                                 action->conf)->definition;
4059                 else
4060                         encap_data =
4061                                 ((const struct rte_flow_action_nvgre_encap *)
4062                                                 action->conf)->definition;
4063                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4064                                                &res.size, error))
4065                         return -rte_errno;
4066         }
4067         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4068                 return -rte_errno;
4069         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4070                 return rte_flow_error_set(error, EINVAL,
4071                                           RTE_FLOW_ERROR_TYPE_ACTION,
4072                                           NULL, "can't create L2 encap action");
4073         return 0;
4074 }
4075
4076 /**
4077  * Convert L2 decap action to DV specification.
4078  *
4079  * @param[in] dev
4080  *   Pointer to rte_eth_dev structure.
4081  * @param[in, out] dev_flow
4082  *   Pointer to the mlx5_flow.
4083  * @param[in] transfer
4084  *   Mark if the flow is E-Switch flow.
4085  * @param[out] error
4086  *   Pointer to the error structure.
4087  *
4088  * @return
4089  *   0 on success, a negative errno value otherwise and rte_errno is set.
4090  */
4091 static int
4092 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4093                                struct mlx5_flow *dev_flow,
4094                                uint8_t transfer,
4095                                struct rte_flow_error *error)
4096 {
4097         struct mlx5_flow_dv_encap_decap_resource res = {
4098                 .size = 0,
4099                 .reformat_type =
4100                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4101                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4102                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4103         };
4104
4105         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4106                 return rte_flow_error_set(error, EINVAL,
4107                                           RTE_FLOW_ERROR_TYPE_ACTION,
4108                                           NULL, "can't create L2 decap action");
4109         return 0;
4110 }
4111
4112 /**
4113  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4114  *
4115  * @param[in] dev
4116  *   Pointer to rte_eth_dev structure.
4117  * @param[in] action
4118  *   Pointer to action structure.
4119  * @param[in, out] dev_flow
4120  *   Pointer to the mlx5_flow.
4121  * @param[in] attr
4122  *   Pointer to the flow attributes.
4123  * @param[out] error
4124  *   Pointer to the error structure.
4125  *
4126  * @return
4127  *   0 on success, a negative errno value otherwise and rte_errno is set.
4128  */
4129 static int
4130 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4131                                 const struct rte_flow_action *action,
4132                                 struct mlx5_flow *dev_flow,
4133                                 const struct rte_flow_attr *attr,
4134                                 struct rte_flow_error *error)
4135 {
4136         const struct rte_flow_action_raw_encap *encap_data;
4137         struct mlx5_flow_dv_encap_decap_resource res;
4138
4139         memset(&res, 0, sizeof(res));
4140         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4141         res.size = encap_data->size;
4142         memcpy(res.buf, encap_data->data, res.size);
4143         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4144                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4145                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4146         if (attr->transfer)
4147                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4148         else
4149                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4150                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4151         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4152                 return rte_flow_error_set(error, EINVAL,
4153                                           RTE_FLOW_ERROR_TYPE_ACTION,
4154                                           NULL, "can't create encap action");
4155         return 0;
4156 }
4157
4158 /**
4159  * Create action push VLAN.
4160  *
4161  * @param[in] dev
4162  *   Pointer to rte_eth_dev structure.
4163  * @param[in] attr
4164  *   Pointer to the flow attributes.
4165  * @param[in] vlan
4166  *   Pointer to the vlan to push to the Ethernet header.
4167  * @param[in, out] dev_flow
4168  *   Pointer to the mlx5_flow.
4169  * @param[out] error
4170  *   Pointer to the error structure.
4171  *
4172  * @return
4173  *   0 on success, a negative errno value otherwise and rte_errno is set.
4174  */
4175 static int
4176 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4177                                 const struct rte_flow_attr *attr,
4178                                 const struct rte_vlan_hdr *vlan,
4179                                 struct mlx5_flow *dev_flow,
4180                                 struct rte_flow_error *error)
4181 {
4182         struct mlx5_flow_dv_push_vlan_action_resource res;
4183
4184         memset(&res, 0, sizeof(res));
4185         res.vlan_tag =
4186                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4187                                  vlan->vlan_tci);
4188         if (attr->transfer)
4189                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4190         else
4191                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4192                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4193         return flow_dv_push_vlan_action_resource_register
4194                                             (dev, &res, dev_flow, error);
4195 }
4196
4197 /**
4198  * Validate the modify-header actions.
4199  *
4200  * @param[in] action_flags
4201  *   Holds the actions detected until now.
4202  * @param[in] action
4203  *   Pointer to the modify action.
4204  * @param[out] error
4205  *   Pointer to error structure.
4206  *
4207  * @return
4208  *   0 on success, a negative errno value otherwise and rte_errno is set.
4209  */
4210 static int
4211 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4212                                    const struct rte_flow_action *action,
4213                                    struct rte_flow_error *error)
4214 {
4215         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4216                 return rte_flow_error_set(error, EINVAL,
4217                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4218                                           NULL, "action configuration not set");
4219         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4220                 return rte_flow_error_set(error, EINVAL,
4221                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4222                                           "can't have encap action before"
4223                                           " modify action");
4224         return 0;
4225 }
4226
4227 /**
4228  * Validate the modify-header MAC address actions.
4229  *
4230  * @param[in] action_flags
4231  *   Holds the actions detected until now.
4232  * @param[in] action
4233  *   Pointer to the modify action.
4234  * @param[in] item_flags
4235  *   Holds the items detected.
4236  * @param[out] error
4237  *   Pointer to error structure.
4238  *
4239  * @return
4240  *   0 on success, a negative errno value otherwise and rte_errno is set.
4241  */
4242 static int
4243 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4244                                    const struct rte_flow_action *action,
4245                                    const uint64_t item_flags,
4246                                    struct rte_flow_error *error)
4247 {
4248         int ret = 0;
4249
4250         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4251         if (!ret) {
4252                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4253                         return rte_flow_error_set(error, EINVAL,
4254                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4255                                                   NULL,
4256                                                   "no L2 item in pattern");
4257         }
4258         return ret;
4259 }
4260
4261 /**
4262  * Validate the modify-header IPv4 address actions.
4263  *
4264  * @param[in] action_flags
4265  *   Holds the actions detected until now.
4266  * @param[in] action
4267  *   Pointer to the modify action.
4268  * @param[in] item_flags
4269  *   Holds the items detected.
4270  * @param[out] error
4271  *   Pointer to error structure.
4272  *
4273  * @return
4274  *   0 on success, a negative errno value otherwise and rte_errno is set.
4275  */
4276 static int
4277 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4278                                     const struct rte_flow_action *action,
4279                                     const uint64_t item_flags,
4280                                     struct rte_flow_error *error)
4281 {
4282         int ret = 0;
4283         uint64_t layer;
4284
4285         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4286         if (!ret) {
4287                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4288                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4289                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4290                 if (!(item_flags & layer))
4291                         return rte_flow_error_set(error, EINVAL,
4292                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4293                                                   NULL,
4294                                                   "no ipv4 item in pattern");
4295         }
4296         return ret;
4297 }
4298
4299 /**
4300  * Validate the modify-header IPv6 address actions.
4301  *
4302  * @param[in] action_flags
4303  *   Holds the actions detected until now.
4304  * @param[in] action
4305  *   Pointer to the modify action.
4306  * @param[in] item_flags
4307  *   Holds the items detected.
4308  * @param[out] error
4309  *   Pointer to error structure.
4310  *
4311  * @return
4312  *   0 on success, a negative errno value otherwise and rte_errno is set.
4313  */
4314 static int
4315 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4316                                     const struct rte_flow_action *action,
4317                                     const uint64_t item_flags,
4318                                     struct rte_flow_error *error)
4319 {
4320         int ret = 0;
4321         uint64_t layer;
4322
4323         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4324         if (!ret) {
4325                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4326                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4327                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4328                 if (!(item_flags & layer))
4329                         return rte_flow_error_set(error, EINVAL,
4330                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4331                                                   NULL,
4332                                                   "no ipv6 item in pattern");
4333         }
4334         return ret;
4335 }
4336
4337 /**
4338  * Validate the modify-header TP actions.
4339  *
4340  * @param[in] action_flags
4341  *   Holds the actions detected until now.
4342  * @param[in] action
4343  *   Pointer to the modify action.
4344  * @param[in] item_flags
4345  *   Holds the items detected.
4346  * @param[out] error
4347  *   Pointer to error structure.
4348  *
4349  * @return
4350  *   0 on success, a negative errno value otherwise and rte_errno is set.
4351  */
4352 static int
4353 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4354                                   const struct rte_flow_action *action,
4355                                   const uint64_t item_flags,
4356                                   struct rte_flow_error *error)
4357 {
4358         int ret = 0;
4359         uint64_t layer;
4360
4361         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4362         if (!ret) {
4363                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4364                                  MLX5_FLOW_LAYER_INNER_L4 :
4365                                  MLX5_FLOW_LAYER_OUTER_L4;
4366                 if (!(item_flags & layer))
4367                         return rte_flow_error_set(error, EINVAL,
4368                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4369                                                   NULL, "no transport layer "
4370                                                   "in pattern");
4371         }
4372         return ret;
4373 }
4374
4375 /**
4376  * Validate the modify-header actions of increment/decrement
4377  * TCP Sequence-number.
4378  *
4379  * @param[in] action_flags
4380  *   Holds the actions detected until now.
4381  * @param[in] action
4382  *   Pointer to the modify action.
4383  * @param[in] item_flags
4384  *   Holds the items detected.
4385  * @param[out] error
4386  *   Pointer to error structure.
4387  *
4388  * @return
4389  *   0 on success, a negative errno value otherwise and rte_errno is set.
4390  */
4391 static int
4392 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4393                                        const struct rte_flow_action *action,
4394                                        const uint64_t item_flags,
4395                                        struct rte_flow_error *error)
4396 {
4397         int ret = 0;
4398         uint64_t layer;
4399
4400         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4401         if (!ret) {
4402                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4403                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4404                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4405                 if (!(item_flags & layer))
4406                         return rte_flow_error_set(error, EINVAL,
4407                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4408                                                   NULL, "no TCP item in"
4409                                                   " pattern");
4410                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4411                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4412                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4413                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4414                         return rte_flow_error_set(error, EINVAL,
4415                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4416                                                   NULL,
4417                                                   "cannot decrease and increase"
4418                                                   " TCP sequence number"
4419                                                   " at the same time");
4420         }
4421         return ret;
4422 }
4423
4424 /**
4425  * Validate the modify-header actions of increment/decrement
4426  * TCP Acknowledgment number.
4427  *
4428  * @param[in] action_flags
4429  *   Holds the actions detected until now.
4430  * @param[in] action
4431  *   Pointer to the modify action.
4432  * @param[in] item_flags
4433  *   Holds the items detected.
4434  * @param[out] error
4435  *   Pointer to error structure.
4436  *
4437  * @return
4438  *   0 on success, a negative errno value otherwise and rte_errno is set.
4439  */
4440 static int
4441 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4442                                        const struct rte_flow_action *action,
4443                                        const uint64_t item_flags,
4444                                        struct rte_flow_error *error)
4445 {
4446         int ret = 0;
4447         uint64_t layer;
4448
4449         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4450         if (!ret) {
4451                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4452                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4453                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4454                 if (!(item_flags & layer))
4455                         return rte_flow_error_set(error, EINVAL,
4456                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4457                                                   NULL, "no TCP item in"
4458                                                   " pattern");
4459                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4460                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4461                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4462                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4463                         return rte_flow_error_set(error, EINVAL,
4464                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4465                                                   NULL,
4466                                                   "cannot decrease and increase"
4467                                                   " TCP acknowledgment number"
4468                                                   " at the same time");
4469         }
4470         return ret;
4471 }
4472
4473 /**
4474  * Validate the modify-header TTL actions.
4475  *
4476  * @param[in] action_flags
4477  *   Holds the actions detected until now.
4478  * @param[in] action
4479  *   Pointer to the modify action.
4480  * @param[in] item_flags
4481  *   Holds the items detected.
4482  * @param[out] error
4483  *   Pointer to error structure.
4484  *
4485  * @return
4486  *   0 on success, a negative errno value otherwise and rte_errno is set.
4487  */
4488 static int
4489 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4490                                    const struct rte_flow_action *action,
4491                                    const uint64_t item_flags,
4492                                    struct rte_flow_error *error)
4493 {
4494         int ret = 0;
4495         uint64_t layer;
4496
4497         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4498         if (!ret) {
4499                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4500                                  MLX5_FLOW_LAYER_INNER_L3 :
4501                                  MLX5_FLOW_LAYER_OUTER_L3;
4502                 if (!(item_flags & layer))
4503                         return rte_flow_error_set(error, EINVAL,
4504                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4505                                                   NULL,
4506                                                   "no IP protocol in pattern");
4507         }
4508         return ret;
4509 }
4510
4511 static int
4512 mlx5_flow_item_field_width(enum rte_flow_field_id field)
4513 {
4514         switch (field) {
4515         case RTE_FLOW_FIELD_START:
4516                 return 32;
4517         case RTE_FLOW_FIELD_MAC_DST:
4518         case RTE_FLOW_FIELD_MAC_SRC:
4519                 return 48;
4520         case RTE_FLOW_FIELD_VLAN_TYPE:
4521                 return 16;
4522         case RTE_FLOW_FIELD_VLAN_ID:
4523                 return 12;
4524         case RTE_FLOW_FIELD_MAC_TYPE:
4525                 return 16;
4526         case RTE_FLOW_FIELD_IPV4_DSCP:
4527                 return 6;
4528         case RTE_FLOW_FIELD_IPV4_TTL:
4529                 return 8;
4530         case RTE_FLOW_FIELD_IPV4_SRC:
4531         case RTE_FLOW_FIELD_IPV4_DST:
4532                 return 32;
4533         case RTE_FLOW_FIELD_IPV6_DSCP:
4534                 return 6;
4535         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
4536                 return 8;
4537         case RTE_FLOW_FIELD_IPV6_SRC:
4538         case RTE_FLOW_FIELD_IPV6_DST:
4539                 return 128;
4540         case RTE_FLOW_FIELD_TCP_PORT_SRC:
4541         case RTE_FLOW_FIELD_TCP_PORT_DST:
4542                 return 16;
4543         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
4544         case RTE_FLOW_FIELD_TCP_ACK_NUM:
4545                 return 32;
4546         case RTE_FLOW_FIELD_TCP_FLAGS:
4547                 return 6;
4548         case RTE_FLOW_FIELD_UDP_PORT_SRC:
4549         case RTE_FLOW_FIELD_UDP_PORT_DST:
4550                 return 16;
4551         case RTE_FLOW_FIELD_VXLAN_VNI:
4552         case RTE_FLOW_FIELD_GENEVE_VNI:
4553                 return 24;
4554         case RTE_FLOW_FIELD_GTP_TEID:
4555         case RTE_FLOW_FIELD_TAG:
4556                 return 32;
4557         case RTE_FLOW_FIELD_MARK:
4558                 return 24;
4559         case RTE_FLOW_FIELD_META:
4560         case RTE_FLOW_FIELD_POINTER:
4561         case RTE_FLOW_FIELD_VALUE:
4562                 return 32;
4563         default:
4564                 MLX5_ASSERT(false);
4565         }
4566         return 0;
4567 }
4568
4569 /**
4570  * Validate the generic modify field actions.
4571  *
4572  * @param[in] action_flags
4573  *   Holds the actions detected until now.
4574  * @param[in] action
4575  *   Pointer to the modify action.
4576  * @param[in] item_flags
4577  *   Holds the items detected.
4578  * @param[out] error
4579  *   Pointer to error structure.
4580  *
4581  * @return
4582  *   Number of header fields to modify (0 or more) on success,
4583  *   a negative errno value otherwise and rte_errno is set.
4584  */
4585 static int
4586 flow_dv_validate_action_modify_field(const uint64_t action_flags,
4587                                    const struct rte_flow_action *action,
4588                                    struct rte_flow_error *error)
4589 {
4590         int ret = 0;
4591         const struct rte_flow_action_modify_field *action_modify_field =
4592                 action->conf;
4593         uint32_t dst_width =
4594                 mlx5_flow_item_field_width(action_modify_field->dst.field);
4595         uint32_t src_width =
4596                 mlx5_flow_item_field_width(action_modify_field->src.field);
4597
4598         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4599         if (ret)
4600                 return ret;
4601
4602         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4603             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4604                 if (action_modify_field->dst.offset >= dst_width ||
4605                     (action_modify_field->dst.offset % 32))
4606                         return rte_flow_error_set(error, EINVAL,
4607                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4608                                                 NULL,
4609                                                 "destination offset is too big"
4610                                                 " or not aligned to 4 bytes");
4611                 if (action_modify_field->dst.level &&
4612                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4613                         return rte_flow_error_set(error, EINVAL,
4614                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4615                                                 NULL,
4616                                                 "cannot modify inner headers");
4617         }
4618         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4619             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4620                 if (action_modify_field->src.offset >= src_width ||
4621                     (action_modify_field->src.offset % 32))
4622                         return rte_flow_error_set(error, EINVAL,
4623                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4624                                                 NULL,
4625                                                 "source offset is too big"
4626                                                 " or not aligned to 4 bytes");
4627                 if (action_modify_field->src.level &&
4628                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4629                         return rte_flow_error_set(error, EINVAL,
4630                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4631                                                 NULL,
4632                                                 "cannot copy from inner headers");
4633         }
4634         if (action_modify_field->width == 0)
4635                 return rte_flow_error_set(error, EINVAL,
4636                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4637                                                 NULL,
4638                                                 "width is required for modify action");
4639         if (action_modify_field->dst.field ==
4640             action_modify_field->src.field)
4641                 return rte_flow_error_set(error, EINVAL,
4642                                         RTE_FLOW_ERROR_TYPE_ACTION,
4643                                         NULL,
4644                                         "source and destination fields"
4645                                         " cannot be the same");
4646         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4647             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4648                 return rte_flow_error_set(error, EINVAL,
4649                                         RTE_FLOW_ERROR_TYPE_ACTION,
4650                                         NULL,
4651                                         "immediate value or a pointer to it"
4652                                         " cannot be used as a destination");
4653         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4654             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4655                 return rte_flow_error_set(error, EINVAL,
4656                                 RTE_FLOW_ERROR_TYPE_ACTION,
4657                                 NULL,
4658                                 "modifications of an arbitrary"
4659                                 " place in a packet is not supported");
4660         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4661                 return rte_flow_error_set(error, EINVAL,
4662                                 RTE_FLOW_ERROR_TYPE_ACTION,
4663                                 NULL,
4664                                 "add and sub operations"
4665                                 " are not supported");
4666         return (action_modify_field->width / 32) +
4667                !!(action_modify_field->width % 32);
4668 }
4669
4670 /**
4671  * Validate jump action.
4672  *
4673  * @param[in] action
4674  *   Pointer to the jump action.
4675  * @param[in] action_flags
4676  *   Holds the actions detected until now.
4677  * @param[in] attributes
4678  *   Pointer to flow attributes
4679  * @param[in] external
4680  *   Action belongs to flow rule created by request external to PMD.
4681  * @param[out] error
4682  *   Pointer to error structure.
4683  *
4684  * @return
4685  *   0 on success, a negative errno value otherwise and rte_errno is set.
4686  */
4687 static int
4688 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4689                              const struct mlx5_flow_tunnel *tunnel,
4690                              const struct rte_flow_action *action,
4691                              uint64_t action_flags,
4692                              const struct rte_flow_attr *attributes,
4693                              bool external, struct rte_flow_error *error)
4694 {
4695         uint32_t target_group, table;
4696         int ret = 0;
4697         struct flow_grp_info grp_info = {
4698                 .external = !!external,
4699                 .transfer = !!attributes->transfer,
4700                 .fdb_def_rule = 1,
4701                 .std_tbl_fix = 0
4702         };
4703         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4704                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4705                 return rte_flow_error_set(error, EINVAL,
4706                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4707                                           "can't have 2 fate actions in"
4708                                           " same flow");
4709         if (action_flags & MLX5_FLOW_ACTION_METER)
4710                 return rte_flow_error_set(error, ENOTSUP,
4711                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4712                                           "jump with meter not support");
4713         if (!action->conf)
4714                 return rte_flow_error_set(error, EINVAL,
4715                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4716                                           NULL, "action configuration not set");
4717         target_group =
4718                 ((const struct rte_flow_action_jump *)action->conf)->group;
4719         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4720                                        &grp_info, error);
4721         if (ret)
4722                 return ret;
4723         if (attributes->group == target_group &&
4724             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4725                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4726                 return rte_flow_error_set(error, EINVAL,
4727                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4728                                           "target group must be other than"
4729                                           " the current flow group");
4730         return 0;
4731 }
4732
4733 /*
4734  * Validate the port_id action.
4735  *
4736  * @param[in] dev
4737  *   Pointer to rte_eth_dev structure.
4738  * @param[in] action_flags
4739  *   Bit-fields that holds the actions detected until now.
4740  * @param[in] action
4741  *   Port_id RTE action structure.
4742  * @param[in] attr
4743  *   Attributes of flow that includes this action.
4744  * @param[out] error
4745  *   Pointer to error structure.
4746  *
4747  * @return
4748  *   0 on success, a negative errno value otherwise and rte_errno is set.
4749  */
4750 static int
4751 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4752                                 uint64_t action_flags,
4753                                 const struct rte_flow_action *action,
4754                                 const struct rte_flow_attr *attr,
4755                                 struct rte_flow_error *error)
4756 {
4757         const struct rte_flow_action_port_id *port_id;
4758         struct mlx5_priv *act_priv;
4759         struct mlx5_priv *dev_priv;
4760         uint16_t port;
4761
4762         if (!attr->transfer)
4763                 return rte_flow_error_set(error, ENOTSUP,
4764                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4765                                           NULL,
4766                                           "port id action is valid in transfer"
4767                                           " mode only");
4768         if (!action || !action->conf)
4769                 return rte_flow_error_set(error, ENOTSUP,
4770                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4771                                           NULL,
4772                                           "port id action parameters must be"
4773                                           " specified");
4774         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4775                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4776                 return rte_flow_error_set(error, EINVAL,
4777                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4778                                           "can have only one fate actions in"
4779                                           " a flow");
4780         dev_priv = mlx5_dev_to_eswitch_info(dev);
4781         if (!dev_priv)
4782                 return rte_flow_error_set(error, rte_errno,
4783                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4784                                           NULL,
4785                                           "failed to obtain E-Switch info");
4786         port_id = action->conf;
4787         port = port_id->original ? dev->data->port_id : port_id->id;
4788         act_priv = mlx5_port_to_eswitch_info(port, false);
4789         if (!act_priv)
4790                 return rte_flow_error_set
4791                                 (error, rte_errno,
4792                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4793                                  "failed to obtain E-Switch port id for port");
4794         if (act_priv->domain_id != dev_priv->domain_id)
4795                 return rte_flow_error_set
4796                                 (error, EINVAL,
4797                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4798                                  "port does not belong to"
4799                                  " E-Switch being configured");
4800         return 0;
4801 }
4802
4803 /**
4804  * Get the maximum number of modify header actions.
4805  *
4806  * @param dev
4807  *   Pointer to rte_eth_dev structure.
4808  * @param flags
4809  *   Flags bits to check if root level.
4810  *
4811  * @return
4812  *   Max number of modify header actions device can support.
4813  */
4814 static inline unsigned int
4815 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4816                               uint64_t flags)
4817 {
4818         /*
4819          * There's no way to directly query the max capacity from FW.
4820          * The maximal value on root table should be assumed to be supported.
4821          */
4822         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4823                 return MLX5_MAX_MODIFY_NUM;
4824         else
4825                 return MLX5_ROOT_TBL_MODIFY_NUM;
4826 }
4827
4828 /**
4829  * Validate the meter action.
4830  *
4831  * @param[in] dev
4832  *   Pointer to rte_eth_dev structure.
4833  * @param[in] action_flags
4834  *   Bit-fields that holds the actions detected until now.
4835  * @param[in] action
4836  *   Pointer to the meter action.
4837  * @param[in] attr
4838  *   Attributes of flow that includes this action.
4839  * @param[out] error
4840  *   Pointer to error structure.
4841  *
4842  * @return
4843  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4844  */
4845 static int
4846 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4847                                 uint64_t action_flags,
4848                                 const struct rte_flow_action *action,
4849                                 const struct rte_flow_attr *attr,
4850                                 struct rte_flow_error *error)
4851 {
4852         struct mlx5_priv *priv = dev->data->dev_private;
4853         const struct rte_flow_action_meter *am = action->conf;
4854         struct mlx5_flow_meter *fm;
4855
4856         if (!am)
4857                 return rte_flow_error_set(error, EINVAL,
4858                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4859                                           "meter action conf is NULL");
4860
4861         if (action_flags & MLX5_FLOW_ACTION_METER)
4862                 return rte_flow_error_set(error, ENOTSUP,
4863                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4864                                           "meter chaining not support");
4865         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4866                 return rte_flow_error_set(error, ENOTSUP,
4867                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4868                                           "meter with jump not support");
4869         if (!priv->mtr_en)
4870                 return rte_flow_error_set(error, ENOTSUP,
4871                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4872                                           NULL,
4873                                           "meter action not supported");
4874         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4875         if (!fm)
4876                 return rte_flow_error_set(error, EINVAL,
4877                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4878                                           "Meter not found");
4879         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4880               (!fm->ingress && !attr->ingress && attr->egress) ||
4881               (!fm->egress && !attr->egress && attr->ingress))))
4882                 return rte_flow_error_set(error, EINVAL,
4883                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4884                                           "Flow attributes are either invalid "
4885                                           "or have a conflict with current "
4886                                           "meter attributes");
4887         return 0;
4888 }
4889
4890 /**
4891  * Validate the age action.
4892  *
4893  * @param[in] action_flags
4894  *   Holds the actions detected until now.
4895  * @param[in] action
4896  *   Pointer to the age action.
4897  * @param[in] dev
4898  *   Pointer to the Ethernet device structure.
4899  * @param[out] error
4900  *   Pointer to error structure.
4901  *
4902  * @return
4903  *   0 on success, a negative errno value otherwise and rte_errno is set.
4904  */
4905 static int
4906 flow_dv_validate_action_age(uint64_t action_flags,
4907                             const struct rte_flow_action *action,
4908                             struct rte_eth_dev *dev,
4909                             struct rte_flow_error *error)
4910 {
4911         struct mlx5_priv *priv = dev->data->dev_private;
4912         const struct rte_flow_action_age *age = action->conf;
4913
4914         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4915             !priv->sh->aso_age_mng))
4916                 return rte_flow_error_set(error, ENOTSUP,
4917                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4918                                           NULL,
4919                                           "age action not supported");
4920         if (!(action->conf))
4921                 return rte_flow_error_set(error, EINVAL,
4922                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4923                                           "configuration cannot be null");
4924         if (!(age->timeout))
4925                 return rte_flow_error_set(error, EINVAL,
4926                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4927                                           "invalid timeout value 0");
4928         if (action_flags & MLX5_FLOW_ACTION_AGE)
4929                 return rte_flow_error_set(error, EINVAL,
4930                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4931                                           "duplicate age actions set");
4932         return 0;
4933 }
4934
4935 /**
4936  * Validate the modify-header IPv4 DSCP actions.
4937  *
4938  * @param[in] action_flags
4939  *   Holds the actions detected until now.
4940  * @param[in] action
4941  *   Pointer to the modify action.
4942  * @param[in] item_flags
4943  *   Holds the items detected.
4944  * @param[out] error
4945  *   Pointer to error structure.
4946  *
4947  * @return
4948  *   0 on success, a negative errno value otherwise and rte_errno is set.
4949  */
4950 static int
4951 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4952                                          const struct rte_flow_action *action,
4953                                          const uint64_t item_flags,
4954                                          struct rte_flow_error *error)
4955 {
4956         int ret = 0;
4957
4958         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4959         if (!ret) {
4960                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4961                         return rte_flow_error_set(error, EINVAL,
4962                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4963                                                   NULL,
4964                                                   "no ipv4 item in pattern");
4965         }
4966         return ret;
4967 }
4968
4969 /**
4970  * Validate the modify-header IPv6 DSCP actions.
4971  *
4972  * @param[in] action_flags
4973  *   Holds the actions detected until now.
4974  * @param[in] action
4975  *   Pointer to the modify action.
4976  * @param[in] item_flags
4977  *   Holds the items detected.
4978  * @param[out] error
4979  *   Pointer to error structure.
4980  *
4981  * @return
4982  *   0 on success, a negative errno value otherwise and rte_errno is set.
4983  */
4984 static int
4985 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4986                                          const struct rte_flow_action *action,
4987                                          const uint64_t item_flags,
4988                                          struct rte_flow_error *error)
4989 {
4990         int ret = 0;
4991
4992         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4993         if (!ret) {
4994                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4995                         return rte_flow_error_set(error, EINVAL,
4996                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4997                                                   NULL,
4998                                                   "no ipv6 item in pattern");
4999         }
5000         return ret;
5001 }
5002
5003 /**
5004  * Match modify-header resource.
5005  *
5006  * @param list
5007  *   Pointer to the hash list.
5008  * @param entry
5009  *   Pointer to exist resource entry object.
5010  * @param key
5011  *   Key of the new entry.
5012  * @param ctx
5013  *   Pointer to new modify-header resource.
5014  *
5015  * @return
5016  *   0 on matching, non-zero otherwise.
5017  */
5018 int
5019 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5020                         struct mlx5_hlist_entry *entry,
5021                         uint64_t key __rte_unused, void *cb_ctx)
5022 {
5023         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5024         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5025         struct mlx5_flow_dv_modify_hdr_resource *resource =
5026                         container_of(entry, typeof(*resource), entry);
5027         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5028
5029         key_len += ref->actions_num * sizeof(ref->actions[0]);
5030         return ref->actions_num != resource->actions_num ||
5031                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5032 }
5033
5034 struct mlx5_hlist_entry *
5035 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5036                          void *cb_ctx)
5037 {
5038         struct mlx5_dev_ctx_shared *sh = list->ctx;
5039         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5040         struct mlx5dv_dr_domain *ns;
5041         struct mlx5_flow_dv_modify_hdr_resource *entry;
5042         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5043         int ret;
5044         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5045         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5046
5047         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5048                             SOCKET_ID_ANY);
5049         if (!entry) {
5050                 rte_flow_error_set(ctx->error, ENOMEM,
5051                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5052                                    "cannot allocate resource memory");
5053                 return NULL;
5054         }
5055         rte_memcpy(&entry->ft_type,
5056                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5057                    key_len + data_len);
5058         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5059                 ns = sh->fdb_domain;
5060         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5061                 ns = sh->tx_domain;
5062         else
5063                 ns = sh->rx_domain;
5064         ret = mlx5_flow_os_create_flow_action_modify_header
5065                                         (sh->ctx, ns, entry,
5066                                          data_len, &entry->action);
5067         if (ret) {
5068                 mlx5_free(entry);
5069                 rte_flow_error_set(ctx->error, ENOMEM,
5070                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5071                                    NULL, "cannot create modification action");
5072                 return NULL;
5073         }
5074         return &entry->entry;
5075 }
5076
5077 /**
5078  * Validate the sample action.
5079  *
5080  * @param[in, out] action_flags
5081  *   Holds the actions detected until now.
5082  * @param[in] action
5083  *   Pointer to the sample action.
5084  * @param[in] dev
5085  *   Pointer to the Ethernet device structure.
5086  * @param[in] attr
5087  *   Attributes of flow that includes this action.
5088  * @param[in] item_flags
5089  *   Holds the items detected.
5090  * @param[in] rss
5091  *   Pointer to the RSS action.
5092  * @param[out] sample_rss
5093  *   Pointer to the RSS action in sample action list.
5094  * @param[out] count
5095  *   Pointer to the COUNT action in sample action list.
5096  * @param[out] error
5097  *   Pointer to error structure.
5098  *
5099  * @return
5100  *   0 on success, a negative errno value otherwise and rte_errno is set.
5101  */
5102 static int
5103 flow_dv_validate_action_sample(uint64_t *action_flags,
5104                                const struct rte_flow_action *action,
5105                                struct rte_eth_dev *dev,
5106                                const struct rte_flow_attr *attr,
5107                                uint64_t item_flags,
5108                                const struct rte_flow_action_rss *rss,
5109                                const struct rte_flow_action_rss **sample_rss,
5110                                const struct rte_flow_action_count **count,
5111                                struct rte_flow_error *error)
5112 {
5113         struct mlx5_priv *priv = dev->data->dev_private;
5114         struct mlx5_dev_config *dev_conf = &priv->config;
5115         const struct rte_flow_action_sample *sample = action->conf;
5116         const struct rte_flow_action *act;
5117         uint64_t sub_action_flags = 0;
5118         uint16_t queue_index = 0xFFFF;
5119         int actions_n = 0;
5120         int ret;
5121
5122         if (!sample)
5123                 return rte_flow_error_set(error, EINVAL,
5124                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5125                                           "configuration cannot be NULL");
5126         if (sample->ratio == 0)
5127                 return rte_flow_error_set(error, EINVAL,
5128                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5129                                           "ratio value starts from 1");
5130         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5131                 return rte_flow_error_set(error, ENOTSUP,
5132                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5133                                           NULL,
5134                                           "sample action not supported");
5135         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5136                 return rte_flow_error_set(error, EINVAL,
5137                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5138                                           "Multiple sample actions not "
5139                                           "supported");
5140         if (*action_flags & MLX5_FLOW_ACTION_METER)
5141                 return rte_flow_error_set(error, EINVAL,
5142                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5143                                           "wrong action order, meter should "
5144                                           "be after sample action");
5145         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5146                 return rte_flow_error_set(error, EINVAL,
5147                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5148                                           "wrong action order, jump should "
5149                                           "be after sample action");
5150         act = sample->actions;
5151         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5152                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5153                         return rte_flow_error_set(error, ENOTSUP,
5154                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5155                                                   act, "too many actions");
5156                 switch (act->type) {
5157                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5158                         ret = mlx5_flow_validate_action_queue(act,
5159                                                               sub_action_flags,
5160                                                               dev,
5161                                                               attr, error);
5162                         if (ret < 0)
5163                                 return ret;
5164                         queue_index = ((const struct rte_flow_action_queue *)
5165                                                         (act->conf))->index;
5166                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5167                         ++actions_n;
5168                         break;
5169                 case RTE_FLOW_ACTION_TYPE_RSS:
5170                         *sample_rss = act->conf;
5171                         ret = mlx5_flow_validate_action_rss(act,
5172                                                             sub_action_flags,
5173                                                             dev, attr,
5174                                                             item_flags,
5175                                                             error);
5176                         if (ret < 0)
5177                                 return ret;
5178                         if (rss && *sample_rss &&
5179                             ((*sample_rss)->level != rss->level ||
5180                             (*sample_rss)->types != rss->types))
5181                                 return rte_flow_error_set(error, ENOTSUP,
5182                                         RTE_FLOW_ERROR_TYPE_ACTION,
5183                                         NULL,
5184                                         "Can't use the different RSS types "
5185                                         "or level in the same flow");
5186                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5187                                 queue_index = (*sample_rss)->queue[0];
5188                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5189                         ++actions_n;
5190                         break;
5191                 case RTE_FLOW_ACTION_TYPE_MARK:
5192                         ret = flow_dv_validate_action_mark(dev, act,
5193                                                            sub_action_flags,
5194                                                            attr, error);
5195                         if (ret < 0)
5196                                 return ret;
5197                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5198                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5199                                                 MLX5_FLOW_ACTION_MARK_EXT;
5200                         else
5201                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5202                         ++actions_n;
5203                         break;
5204                 case RTE_FLOW_ACTION_TYPE_COUNT:
5205                         ret = flow_dv_validate_action_count
5206                                 (dev, act,
5207                                  *action_flags | sub_action_flags,
5208                                  error);
5209                         if (ret < 0)
5210                                 return ret;
5211                         *count = act->conf;
5212                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5213                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5214                         ++actions_n;
5215                         break;
5216                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5217                         ret = flow_dv_validate_action_port_id(dev,
5218                                                               sub_action_flags,
5219                                                               act,
5220                                                               attr,
5221                                                               error);
5222                         if (ret)
5223                                 return ret;
5224                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5225                         ++actions_n;
5226                         break;
5227                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5228                         ret = flow_dv_validate_action_raw_encap_decap
5229                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5230                                  &actions_n, action, item_flags, error);
5231                         if (ret < 0)
5232                                 return ret;
5233                         ++actions_n;
5234                         break;
5235                 default:
5236                         return rte_flow_error_set(error, ENOTSUP,
5237                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5238                                                   NULL,
5239                                                   "Doesn't support optional "
5240                                                   "action");
5241                 }
5242         }
5243         if (attr->ingress && !attr->transfer) {
5244                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5245                                           MLX5_FLOW_ACTION_RSS)))
5246                         return rte_flow_error_set(error, EINVAL,
5247                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5248                                                   NULL,
5249                                                   "Ingress must has a dest "
5250                                                   "QUEUE for Sample");
5251         } else if (attr->egress && !attr->transfer) {
5252                 return rte_flow_error_set(error, ENOTSUP,
5253                                           RTE_FLOW_ERROR_TYPE_ACTION,
5254                                           NULL,
5255                                           "Sample Only support Ingress "
5256                                           "or E-Switch");
5257         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5258                 MLX5_ASSERT(attr->transfer);
5259                 if (sample->ratio > 1)
5260                         return rte_flow_error_set(error, ENOTSUP,
5261                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5262                                                   NULL,
5263                                                   "E-Switch doesn't support "
5264                                                   "any optional action "
5265                                                   "for sampling");
5266                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5267                         return rte_flow_error_set(error, ENOTSUP,
5268                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5269                                                   NULL,
5270                                                   "unsupported action QUEUE");
5271                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5272                         return rte_flow_error_set(error, ENOTSUP,
5273                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5274                                                   NULL,
5275                                                   "unsupported action QUEUE");
5276                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5277                         return rte_flow_error_set(error, EINVAL,
5278                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5279                                                   NULL,
5280                                                   "E-Switch must has a dest "
5281                                                   "port for mirroring");
5282         }
5283         /* Continue validation for Xcap actions.*/
5284         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5285             (queue_index == 0xFFFF ||
5286              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5287                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5288                      MLX5_FLOW_XCAP_ACTIONS)
5289                         return rte_flow_error_set(error, ENOTSUP,
5290                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5291                                                   NULL, "encap and decap "
5292                                                   "combination aren't "
5293                                                   "supported");
5294                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5295                                                         MLX5_FLOW_ACTION_ENCAP))
5296                         return rte_flow_error_set(error, ENOTSUP,
5297                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5298                                                   NULL, "encap is not supported"
5299                                                   " for ingress traffic");
5300         }
5301         return 0;
5302 }
5303
5304 /**
5305  * Find existing modify-header resource or create and register a new one.
5306  *
5307  * @param dev[in, out]
5308  *   Pointer to rte_eth_dev structure.
5309  * @param[in, out] resource
5310  *   Pointer to modify-header resource.
5311  * @parm[in, out] dev_flow
5312  *   Pointer to the dev_flow.
5313  * @param[out] error
5314  *   pointer to error structure.
5315  *
5316  * @return
5317  *   0 on success otherwise -errno and errno is set.
5318  */
5319 static int
5320 flow_dv_modify_hdr_resource_register
5321                         (struct rte_eth_dev *dev,
5322                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5323                          struct mlx5_flow *dev_flow,
5324                          struct rte_flow_error *error)
5325 {
5326         struct mlx5_priv *priv = dev->data->dev_private;
5327         struct mlx5_dev_ctx_shared *sh = priv->sh;
5328         uint32_t key_len = sizeof(*resource) -
5329                            offsetof(typeof(*resource), ft_type) +
5330                            resource->actions_num * sizeof(resource->actions[0]);
5331         struct mlx5_hlist_entry *entry;
5332         struct mlx5_flow_cb_ctx ctx = {
5333                 .error = error,
5334                 .data = resource,
5335         };
5336         uint64_t key64;
5337
5338         resource->flags = dev_flow->dv.group ? 0 :
5339                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5340         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5341                                     resource->flags))
5342                 return rte_flow_error_set(error, EOVERFLOW,
5343                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5344                                           "too many modify header items");
5345         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5346         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5347         if (!entry)
5348                 return -rte_errno;
5349         resource = container_of(entry, typeof(*resource), entry);
5350         dev_flow->handle->dvh.modify_hdr = resource;
5351         return 0;
5352 }
5353
5354 /**
5355  * Get DV flow counter by index.
5356  *
5357  * @param[in] dev
5358  *   Pointer to the Ethernet device structure.
5359  * @param[in] idx
5360  *   mlx5 flow counter index in the container.
5361  * @param[out] ppool
5362  *   mlx5 flow counter pool in the container,
5363  *
5364  * @return
5365  *   Pointer to the counter, NULL otherwise.
5366  */
5367 static struct mlx5_flow_counter *
5368 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5369                            uint32_t idx,
5370                            struct mlx5_flow_counter_pool **ppool)
5371 {
5372         struct mlx5_priv *priv = dev->data->dev_private;
5373         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5374         struct mlx5_flow_counter_pool *pool;
5375
5376         /* Decrease to original index and clear shared bit. */
5377         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5378         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5379         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5380         MLX5_ASSERT(pool);
5381         if (ppool)
5382                 *ppool = pool;
5383         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5384 }
5385
5386 /**
5387  * Check the devx counter belongs to the pool.
5388  *
5389  * @param[in] pool
5390  *   Pointer to the counter pool.
5391  * @param[in] id
5392  *   The counter devx ID.
5393  *
5394  * @return
5395  *   True if counter belongs to the pool, false otherwise.
5396  */
5397 static bool
5398 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5399 {
5400         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5401                    MLX5_COUNTERS_PER_POOL;
5402
5403         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5404                 return true;
5405         return false;
5406 }
5407
5408 /**
5409  * Get a pool by devx counter ID.
5410  *
5411  * @param[in] cmng
5412  *   Pointer to the counter management.
5413  * @param[in] id
5414  *   The counter devx ID.
5415  *
5416  * @return
5417  *   The counter pool pointer if exists, NULL otherwise,
5418  */
5419 static struct mlx5_flow_counter_pool *
5420 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5421 {
5422         uint32_t i;
5423         struct mlx5_flow_counter_pool *pool = NULL;
5424
5425         rte_spinlock_lock(&cmng->pool_update_sl);
5426         /* Check last used pool. */
5427         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5428             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5429                 pool = cmng->pools[cmng->last_pool_idx];
5430                 goto out;
5431         }
5432         /* ID out of range means no suitable pool in the container. */
5433         if (id > cmng->max_id || id < cmng->min_id)
5434                 goto out;
5435         /*
5436          * Find the pool from the end of the container, since mostly counter
5437          * ID is sequence increasing, and the last pool should be the needed
5438          * one.
5439          */
5440         i = cmng->n_valid;
5441         while (i--) {
5442                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5443
5444                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5445                         pool = pool_tmp;
5446                         break;
5447                 }
5448         }
5449 out:
5450         rte_spinlock_unlock(&cmng->pool_update_sl);
5451         return pool;
5452 }
5453
5454 /**
5455  * Resize a counter container.
5456  *
5457  * @param[in] dev
5458  *   Pointer to the Ethernet device structure.
5459  *
5460  * @return
5461  *   0 on success, otherwise negative errno value and rte_errno is set.
5462  */
5463 static int
5464 flow_dv_container_resize(struct rte_eth_dev *dev)
5465 {
5466         struct mlx5_priv *priv = dev->data->dev_private;
5467         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5468         void *old_pools = cmng->pools;
5469         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5470         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5471         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5472
5473         if (!pools) {
5474                 rte_errno = ENOMEM;
5475                 return -ENOMEM;
5476         }
5477         if (old_pools)
5478                 memcpy(pools, old_pools, cmng->n *
5479                                        sizeof(struct mlx5_flow_counter_pool *));
5480         cmng->n = resize;
5481         cmng->pools = pools;
5482         if (old_pools)
5483                 mlx5_free(old_pools);
5484         return 0;
5485 }
5486
5487 /**
5488  * Query a devx flow counter.
5489  *
5490  * @param[in] dev
5491  *   Pointer to the Ethernet device structure.
5492  * @param[in] cnt
5493  *   Index to the flow counter.
5494  * @param[out] pkts
5495  *   The statistics value of packets.
5496  * @param[out] bytes
5497  *   The statistics value of bytes.
5498  *
5499  * @return
5500  *   0 on success, otherwise a negative errno value and rte_errno is set.
5501  */
5502 static inline int
5503 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5504                      uint64_t *bytes)
5505 {
5506         struct mlx5_priv *priv = dev->data->dev_private;
5507         struct mlx5_flow_counter_pool *pool = NULL;
5508         struct mlx5_flow_counter *cnt;
5509         int offset;
5510
5511         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5512         MLX5_ASSERT(pool);
5513         if (priv->sh->cmng.counter_fallback)
5514                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5515                                         0, pkts, bytes, 0, NULL, NULL, 0);
5516         rte_spinlock_lock(&pool->sl);
5517         if (!pool->raw) {
5518                 *pkts = 0;
5519                 *bytes = 0;
5520         } else {
5521                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5522                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5523                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5524         }
5525         rte_spinlock_unlock(&pool->sl);
5526         return 0;
5527 }
5528
5529 /**
5530  * Create and initialize a new counter pool.
5531  *
5532  * @param[in] dev
5533  *   Pointer to the Ethernet device structure.
5534  * @param[out] dcs
5535  *   The devX counter handle.
5536  * @param[in] age
5537  *   Whether the pool is for counter that was allocated for aging.
5538  * @param[in/out] cont_cur
5539  *   Pointer to the container pointer, it will be update in pool resize.
5540  *
5541  * @return
5542  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5543  */
5544 static struct mlx5_flow_counter_pool *
5545 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5546                     uint32_t age)
5547 {
5548         struct mlx5_priv *priv = dev->data->dev_private;
5549         struct mlx5_flow_counter_pool *pool;
5550         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5551         bool fallback = priv->sh->cmng.counter_fallback;
5552         uint32_t size = sizeof(*pool);
5553
5554         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5555         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5556         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5557         if (!pool) {
5558                 rte_errno = ENOMEM;
5559                 return NULL;
5560         }
5561         pool->raw = NULL;
5562         pool->is_aged = !!age;
5563         pool->query_gen = 0;
5564         pool->min_dcs = dcs;
5565         rte_spinlock_init(&pool->sl);
5566         rte_spinlock_init(&pool->csl);
5567         TAILQ_INIT(&pool->counters[0]);
5568         TAILQ_INIT(&pool->counters[1]);
5569         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5570         rte_spinlock_lock(&cmng->pool_update_sl);
5571         pool->index = cmng->n_valid;
5572         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5573                 mlx5_free(pool);
5574                 rte_spinlock_unlock(&cmng->pool_update_sl);
5575                 return NULL;
5576         }
5577         cmng->pools[pool->index] = pool;
5578         cmng->n_valid++;
5579         if (unlikely(fallback)) {
5580                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5581
5582                 if (base < cmng->min_id)
5583                         cmng->min_id = base;
5584                 if (base > cmng->max_id)
5585                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5586                 cmng->last_pool_idx = pool->index;
5587         }
5588         rte_spinlock_unlock(&cmng->pool_update_sl);
5589         return pool;
5590 }
5591
5592 /**
5593  * Prepare a new counter and/or a new counter pool.
5594  *
5595  * @param[in] dev
5596  *   Pointer to the Ethernet device structure.
5597  * @param[out] cnt_free
5598  *   Where to put the pointer of a new counter.
5599  * @param[in] age
5600  *   Whether the pool is for counter that was allocated for aging.
5601  *
5602  * @return
5603  *   The counter pool pointer and @p cnt_free is set on success,
5604  *   NULL otherwise and rte_errno is set.
5605  */
5606 static struct mlx5_flow_counter_pool *
5607 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5608                              struct mlx5_flow_counter **cnt_free,
5609                              uint32_t age)
5610 {
5611         struct mlx5_priv *priv = dev->data->dev_private;
5612         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5613         struct mlx5_flow_counter_pool *pool;
5614         struct mlx5_counters tmp_tq;
5615         struct mlx5_devx_obj *dcs = NULL;
5616         struct mlx5_flow_counter *cnt;
5617         enum mlx5_counter_type cnt_type =
5618                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5619         bool fallback = priv->sh->cmng.counter_fallback;
5620         uint32_t i;
5621
5622         if (fallback) {
5623                 /* bulk_bitmap must be 0 for single counter allocation. */
5624                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5625                 if (!dcs)
5626                         return NULL;
5627                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5628                 if (!pool) {
5629                         pool = flow_dv_pool_create(dev, dcs, age);
5630                         if (!pool) {
5631                                 mlx5_devx_cmd_destroy(dcs);
5632                                 return NULL;
5633                         }
5634                 }
5635                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5636                 cnt = MLX5_POOL_GET_CNT(pool, i);
5637                 cnt->pool = pool;
5638                 cnt->dcs_when_free = dcs;
5639                 *cnt_free = cnt;
5640                 return pool;
5641         }
5642         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5643         if (!dcs) {
5644                 rte_errno = ENODATA;
5645                 return NULL;
5646         }
5647         pool = flow_dv_pool_create(dev, dcs, age);
5648         if (!pool) {
5649                 mlx5_devx_cmd_destroy(dcs);
5650                 return NULL;
5651         }
5652         TAILQ_INIT(&tmp_tq);
5653         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5654                 cnt = MLX5_POOL_GET_CNT(pool, i);
5655                 cnt->pool = pool;
5656                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5657         }
5658         rte_spinlock_lock(&cmng->csl[cnt_type]);
5659         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5660         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5661         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5662         (*cnt_free)->pool = pool;
5663         return pool;
5664 }
5665
5666 /**
5667  * Allocate a flow counter.
5668  *
5669  * @param[in] dev
5670  *   Pointer to the Ethernet device structure.
5671  * @param[in] age
5672  *   Whether the counter was allocated for aging.
5673  *
5674  * @return
5675  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5676  */
5677 static uint32_t
5678 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5679 {
5680         struct mlx5_priv *priv = dev->data->dev_private;
5681         struct mlx5_flow_counter_pool *pool = NULL;
5682         struct mlx5_flow_counter *cnt_free = NULL;
5683         bool fallback = priv->sh->cmng.counter_fallback;
5684         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5685         enum mlx5_counter_type cnt_type =
5686                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5687         uint32_t cnt_idx;
5688
5689         if (!priv->config.devx) {
5690                 rte_errno = ENOTSUP;
5691                 return 0;
5692         }
5693         /* Get free counters from container. */
5694         rte_spinlock_lock(&cmng->csl[cnt_type]);
5695         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5696         if (cnt_free)
5697                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5698         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5699         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5700                 goto err;
5701         pool = cnt_free->pool;
5702         if (fallback)
5703                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5704         /* Create a DV counter action only in the first time usage. */
5705         if (!cnt_free->action) {
5706                 uint16_t offset;
5707                 struct mlx5_devx_obj *dcs;
5708                 int ret;
5709
5710                 if (!fallback) {
5711                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5712                         dcs = pool->min_dcs;
5713                 } else {
5714                         offset = 0;
5715                         dcs = cnt_free->dcs_when_free;
5716                 }
5717                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5718                                                             &cnt_free->action);
5719                 if (ret) {
5720                         rte_errno = errno;
5721                         goto err;
5722                 }
5723         }
5724         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5725                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5726         /* Update the counter reset values. */
5727         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5728                                  &cnt_free->bytes))
5729                 goto err;
5730         if (!fallback && !priv->sh->cmng.query_thread_on)
5731                 /* Start the asynchronous batch query by the host thread. */
5732                 mlx5_set_query_alarm(priv->sh);
5733         return cnt_idx;
5734 err:
5735         if (cnt_free) {
5736                 cnt_free->pool = pool;
5737                 if (fallback)
5738                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5739                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5740                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5741                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5742         }
5743         return 0;
5744 }
5745
5746 /**
5747  * Allocate a shared flow counter.
5748  *
5749  * @param[in] ctx
5750  *   Pointer to the shared counter configuration.
5751  * @param[in] data
5752  *   Pointer to save the allocated counter index.
5753  *
5754  * @return
5755  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5756  */
5757
5758 static int32_t
5759 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5760 {
5761         struct mlx5_shared_counter_conf *conf = ctx;
5762         struct rte_eth_dev *dev = conf->dev;
5763         struct mlx5_flow_counter *cnt;
5764
5765         data->dword = flow_dv_counter_alloc(dev, 0);
5766         data->dword |= MLX5_CNT_SHARED_OFFSET;
5767         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5768         cnt->shared_info.id = conf->id;
5769         return 0;
5770 }
5771
5772 /**
5773  * Get a shared flow counter.
5774  *
5775  * @param[in] dev
5776  *   Pointer to the Ethernet device structure.
5777  * @param[in] id
5778  *   Counter identifier.
5779  *
5780  * @return
5781  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5782  */
5783 static uint32_t
5784 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5785 {
5786         struct mlx5_priv *priv = dev->data->dev_private;
5787         struct mlx5_shared_counter_conf conf = {
5788                 .dev = dev,
5789                 .id = id,
5790         };
5791         union mlx5_l3t_data data = {
5792                 .dword = 0,
5793         };
5794
5795         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5796                                flow_dv_counter_alloc_shared_cb, &conf);
5797         return data.dword;
5798 }
5799
5800 /**
5801  * Get age param from counter index.
5802  *
5803  * @param[in] dev
5804  *   Pointer to the Ethernet device structure.
5805  * @param[in] counter
5806  *   Index to the counter handler.
5807  *
5808  * @return
5809  *   The aging parameter specified for the counter index.
5810  */
5811 static struct mlx5_age_param*
5812 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5813                                 uint32_t counter)
5814 {
5815         struct mlx5_flow_counter *cnt;
5816         struct mlx5_flow_counter_pool *pool = NULL;
5817
5818         flow_dv_counter_get_by_idx(dev, counter, &pool);
5819         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5820         cnt = MLX5_POOL_GET_CNT(pool, counter);
5821         return MLX5_CNT_TO_AGE(cnt);
5822 }
5823
5824 /**
5825  * Remove a flow counter from aged counter list.
5826  *
5827  * @param[in] dev
5828  *   Pointer to the Ethernet device structure.
5829  * @param[in] counter
5830  *   Index to the counter handler.
5831  * @param[in] cnt
5832  *   Pointer to the counter handler.
5833  */
5834 static void
5835 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5836                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5837 {
5838         struct mlx5_age_info *age_info;
5839         struct mlx5_age_param *age_param;
5840         struct mlx5_priv *priv = dev->data->dev_private;
5841         uint16_t expected = AGE_CANDIDATE;
5842
5843         age_info = GET_PORT_AGE_INFO(priv);
5844         age_param = flow_dv_counter_idx_get_age(dev, counter);
5845         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5846                                          AGE_FREE, false, __ATOMIC_RELAXED,
5847                                          __ATOMIC_RELAXED)) {
5848                 /**
5849                  * We need the lock even it is age timeout,
5850                  * since counter may still in process.
5851                  */
5852                 rte_spinlock_lock(&age_info->aged_sl);
5853                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5854                 rte_spinlock_unlock(&age_info->aged_sl);
5855                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5856         }
5857 }
5858
5859 /**
5860  * Release a flow counter.
5861  *
5862  * @param[in] dev
5863  *   Pointer to the Ethernet device structure.
5864  * @param[in] counter
5865  *   Index to the counter handler.
5866  */
5867 static void
5868 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5869 {
5870         struct mlx5_priv *priv = dev->data->dev_private;
5871         struct mlx5_flow_counter_pool *pool = NULL;
5872         struct mlx5_flow_counter *cnt;
5873         enum mlx5_counter_type cnt_type;
5874
5875         if (!counter)
5876                 return;
5877         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5878         MLX5_ASSERT(pool);
5879         if (IS_SHARED_CNT(counter) &&
5880             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5881                 return;
5882         if (pool->is_aged)
5883                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5884         cnt->pool = pool;
5885         /*
5886          * Put the counter back to list to be updated in none fallback mode.
5887          * Currently, we are using two list alternately, while one is in query,
5888          * add the freed counter to the other list based on the pool query_gen
5889          * value. After query finishes, add counter the list to the global
5890          * container counter list. The list changes while query starts. In
5891          * this case, lock will not be needed as query callback and release
5892          * function both operate with the different list.
5893          *
5894          */
5895         if (!priv->sh->cmng.counter_fallback) {
5896                 rte_spinlock_lock(&pool->csl);
5897                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5898                 rte_spinlock_unlock(&pool->csl);
5899         } else {
5900                 cnt->dcs_when_free = cnt->dcs_when_active;
5901                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5902                                            MLX5_COUNTER_TYPE_ORIGIN;
5903                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5904                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5905                                   cnt, next);
5906                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5907         }
5908 }
5909
5910 /**
5911  * Verify the @p attributes will be correctly understood by the NIC and store
5912  * them in the @p flow if everything is correct.
5913  *
5914  * @param[in] dev
5915  *   Pointer to dev struct.
5916  * @param[in] attributes
5917  *   Pointer to flow attributes
5918  * @param[in] external
5919  *   This flow rule is created by request external to PMD.
5920  * @param[out] error
5921  *   Pointer to error structure.
5922  *
5923  * @return
5924  *   - 0 on success and non root table.
5925  *   - 1 on success and root table.
5926  *   - a negative errno value otherwise and rte_errno is set.
5927  */
5928 static int
5929 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5930                             const struct mlx5_flow_tunnel *tunnel,
5931                             const struct rte_flow_attr *attributes,
5932                             const struct flow_grp_info *grp_info,
5933                             struct rte_flow_error *error)
5934 {
5935         struct mlx5_priv *priv = dev->data->dev_private;
5936         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
5937         int ret = 0;
5938
5939 #ifndef HAVE_MLX5DV_DR
5940         RTE_SET_USED(tunnel);
5941         RTE_SET_USED(grp_info);
5942         if (attributes->group)
5943                 return rte_flow_error_set(error, ENOTSUP,
5944                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5945                                           NULL,
5946                                           "groups are not supported");
5947 #else
5948         uint32_t table = 0;
5949
5950         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5951                                        grp_info, error);
5952         if (ret)
5953                 return ret;
5954         if (!table)
5955                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5956 #endif
5957         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
5958             attributes->priority > lowest_priority)
5959                 return rte_flow_error_set(error, ENOTSUP,
5960                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5961                                           NULL,
5962                                           "priority out of range");
5963         if (attributes->transfer) {
5964                 if (!priv->config.dv_esw_en)
5965                         return rte_flow_error_set
5966                                 (error, ENOTSUP,
5967                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5968                                  "E-Switch dr is not supported");
5969                 if (!(priv->representor || priv->master))
5970                         return rte_flow_error_set
5971                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5972                                  NULL, "E-Switch configuration can only be"
5973                                  " done by a master or a representor device");
5974                 if (attributes->egress)
5975                         return rte_flow_error_set
5976                                 (error, ENOTSUP,
5977                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5978                                  "egress is not supported");
5979         }
5980         if (!(attributes->egress ^ attributes->ingress))
5981                 return rte_flow_error_set(error, ENOTSUP,
5982                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5983                                           "must specify exactly one of "
5984                                           "ingress or egress");
5985         return ret;
5986 }
5987
5988 /**
5989  * Internal validation function. For validating both actions and items.
5990  *
5991  * @param[in] dev
5992  *   Pointer to the rte_eth_dev structure.
5993  * @param[in] attr
5994  *   Pointer to the flow attributes.
5995  * @param[in] items
5996  *   Pointer to the list of items.
5997  * @param[in] actions
5998  *   Pointer to the list of actions.
5999  * @param[in] external
6000  *   This flow rule is created by request external to PMD.
6001  * @param[in] hairpin
6002  *   Number of hairpin TX actions, 0 means classic flow.
6003  * @param[out] error
6004  *   Pointer to the error structure.
6005  *
6006  * @return
6007  *   0 on success, a negative errno value otherwise and rte_errno is set.
6008  */
6009 static int
6010 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6011                  const struct rte_flow_item items[],
6012                  const struct rte_flow_action actions[],
6013                  bool external, int hairpin, struct rte_flow_error *error)
6014 {
6015         int ret;
6016         uint64_t action_flags = 0;
6017         uint64_t item_flags = 0;
6018         uint64_t last_item = 0;
6019         uint8_t next_protocol = 0xff;
6020         uint16_t ether_type = 0;
6021         int actions_n = 0;
6022         uint8_t item_ipv6_proto = 0;
6023         const struct rte_flow_item *geneve_item = NULL;
6024         const struct rte_flow_item *gre_item = NULL;
6025         const struct rte_flow_item *gtp_item = NULL;
6026         const struct rte_flow_action_raw_decap *decap;
6027         const struct rte_flow_action_raw_encap *encap;
6028         const struct rte_flow_action_rss *rss = NULL;
6029         const struct rte_flow_action_rss *sample_rss = NULL;
6030         const struct rte_flow_action_count *count = NULL;
6031         const struct rte_flow_action_count *sample_count = NULL;
6032         const struct rte_flow_item_tcp nic_tcp_mask = {
6033                 .hdr = {
6034                         .tcp_flags = 0xFF,
6035                         .src_port = RTE_BE16(UINT16_MAX),
6036                         .dst_port = RTE_BE16(UINT16_MAX),
6037                 }
6038         };
6039         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6040                 .hdr = {
6041                         .src_addr =
6042                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6043                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6044                         .dst_addr =
6045                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6046                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6047                         .vtc_flow = RTE_BE32(0xffffffff),
6048                         .proto = 0xff,
6049                         .hop_limits = 0xff,
6050                 },
6051                 .has_frag_ext = 1,
6052         };
6053         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6054                 .hdr = {
6055                         .common = {
6056                                 .u32 =
6057                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6058                                         .type = 0xFF,
6059                                         }).u32),
6060                         },
6061                         .dummy[0] = 0xffffffff,
6062                 },
6063         };
6064         struct mlx5_priv *priv = dev->data->dev_private;
6065         struct mlx5_dev_config *dev_conf = &priv->config;
6066         uint16_t queue_index = 0xFFFF;
6067         const struct rte_flow_item_vlan *vlan_m = NULL;
6068         uint32_t rw_act_num = 0;
6069         uint64_t is_root;
6070         const struct mlx5_flow_tunnel *tunnel;
6071         struct flow_grp_info grp_info = {
6072                 .external = !!external,
6073                 .transfer = !!attr->transfer,
6074                 .fdb_def_rule = !!priv->fdb_def_rule,
6075         };
6076         const struct rte_eth_hairpin_conf *conf;
6077
6078         if (items == NULL)
6079                 return -1;
6080         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
6081                 tunnel = flow_items_to_tunnel(items);
6082                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6083                                 MLX5_FLOW_ACTION_DECAP;
6084         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
6085                 tunnel = flow_actions_to_tunnel(actions);
6086                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6087         } else {
6088                 tunnel = NULL;
6089         }
6090         if (tunnel && priv->representor)
6091                 return rte_flow_error_set(error, ENOTSUP,
6092                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6093                                           "decap not supported "
6094                                           "for VF representor");
6095         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6096                                 (dev, tunnel, attr, items, actions);
6097         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6098         if (ret < 0)
6099                 return ret;
6100         is_root = (uint64_t)ret;
6101         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6102                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6103                 int type = items->type;
6104
6105                 if (!mlx5_flow_os_item_supported(type))
6106                         return rte_flow_error_set(error, ENOTSUP,
6107                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6108                                                   NULL, "item not supported");
6109                 switch (type) {
6110                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6111                         if (items[0].type != (typeof(items[0].type))
6112                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
6113                                 return rte_flow_error_set
6114                                                 (error, EINVAL,
6115                                                 RTE_FLOW_ERROR_TYPE_ITEM,
6116                                                 NULL, "MLX5 private items "
6117                                                 "must be the first");
6118                         break;
6119                 case RTE_FLOW_ITEM_TYPE_VOID:
6120                         break;
6121                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6122                         ret = flow_dv_validate_item_port_id
6123                                         (dev, items, attr, item_flags, error);
6124                         if (ret < 0)
6125                                 return ret;
6126                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6127                         break;
6128                 case RTE_FLOW_ITEM_TYPE_ETH:
6129                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6130                                                           true, error);
6131                         if (ret < 0)
6132                                 return ret;
6133                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6134                                              MLX5_FLOW_LAYER_OUTER_L2;
6135                         if (items->mask != NULL && items->spec != NULL) {
6136                                 ether_type =
6137                                         ((const struct rte_flow_item_eth *)
6138                                          items->spec)->type;
6139                                 ether_type &=
6140                                         ((const struct rte_flow_item_eth *)
6141                                          items->mask)->type;
6142                                 ether_type = rte_be_to_cpu_16(ether_type);
6143                         } else {
6144                                 ether_type = 0;
6145                         }
6146                         break;
6147                 case RTE_FLOW_ITEM_TYPE_VLAN:
6148                         ret = flow_dv_validate_item_vlan(items, item_flags,
6149                                                          dev, error);
6150                         if (ret < 0)
6151                                 return ret;
6152                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6153                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6154                         if (items->mask != NULL && items->spec != NULL) {
6155                                 ether_type =
6156                                         ((const struct rte_flow_item_vlan *)
6157                                          items->spec)->inner_type;
6158                                 ether_type &=
6159                                         ((const struct rte_flow_item_vlan *)
6160                                          items->mask)->inner_type;
6161                                 ether_type = rte_be_to_cpu_16(ether_type);
6162                         } else {
6163                                 ether_type = 0;
6164                         }
6165                         /* Store outer VLAN mask for of_push_vlan action. */
6166                         if (!tunnel)
6167                                 vlan_m = items->mask;
6168                         break;
6169                 case RTE_FLOW_ITEM_TYPE_IPV4:
6170                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6171                                                   &item_flags, &tunnel);
6172                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6173                                                          last_item, ether_type,
6174                                                          error);
6175                         if (ret < 0)
6176                                 return ret;
6177                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6178                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6179                         if (items->mask != NULL &&
6180                             ((const struct rte_flow_item_ipv4 *)
6181                              items->mask)->hdr.next_proto_id) {
6182                                 next_protocol =
6183                                         ((const struct rte_flow_item_ipv4 *)
6184                                          (items->spec))->hdr.next_proto_id;
6185                                 next_protocol &=
6186                                         ((const struct rte_flow_item_ipv4 *)
6187                                          (items->mask))->hdr.next_proto_id;
6188                         } else {
6189                                 /* Reset for inner layer. */
6190                                 next_protocol = 0xff;
6191                         }
6192                         break;
6193                 case RTE_FLOW_ITEM_TYPE_IPV6:
6194                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6195                                                   &item_flags, &tunnel);
6196                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6197                                                            last_item,
6198                                                            ether_type,
6199                                                            &nic_ipv6_mask,
6200                                                            error);
6201                         if (ret < 0)
6202                                 return ret;
6203                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6204                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6205                         if (items->mask != NULL &&
6206                             ((const struct rte_flow_item_ipv6 *)
6207                              items->mask)->hdr.proto) {
6208                                 item_ipv6_proto =
6209                                         ((const struct rte_flow_item_ipv6 *)
6210                                          items->spec)->hdr.proto;
6211                                 next_protocol =
6212                                         ((const struct rte_flow_item_ipv6 *)
6213                                          items->spec)->hdr.proto;
6214                                 next_protocol &=
6215                                         ((const struct rte_flow_item_ipv6 *)
6216                                          items->mask)->hdr.proto;
6217                         } else {
6218                                 /* Reset for inner layer. */
6219                                 next_protocol = 0xff;
6220                         }
6221                         break;
6222                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6223                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6224                                                                   item_flags,
6225                                                                   error);
6226                         if (ret < 0)
6227                                 return ret;
6228                         last_item = tunnel ?
6229                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6230                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6231                         if (items->mask != NULL &&
6232                             ((const struct rte_flow_item_ipv6_frag_ext *)
6233                              items->mask)->hdr.next_header) {
6234                                 next_protocol =
6235                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6236                                  items->spec)->hdr.next_header;
6237                                 next_protocol &=
6238                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6239                                  items->mask)->hdr.next_header;
6240                         } else {
6241                                 /* Reset for inner layer. */
6242                                 next_protocol = 0xff;
6243                         }
6244                         break;
6245                 case RTE_FLOW_ITEM_TYPE_TCP:
6246                         ret = mlx5_flow_validate_item_tcp
6247                                                 (items, item_flags,
6248                                                  next_protocol,
6249                                                  &nic_tcp_mask,
6250                                                  error);
6251                         if (ret < 0)
6252                                 return ret;
6253                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6254                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6255                         break;
6256                 case RTE_FLOW_ITEM_TYPE_UDP:
6257                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6258                                                           next_protocol,
6259                                                           error);
6260                         if (ret < 0)
6261                                 return ret;
6262                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6263                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6264                         break;
6265                 case RTE_FLOW_ITEM_TYPE_GRE:
6266                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6267                                                           next_protocol, error);
6268                         if (ret < 0)
6269                                 return ret;
6270                         gre_item = items;
6271                         last_item = MLX5_FLOW_LAYER_GRE;
6272                         break;
6273                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6274                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6275                                                             next_protocol,
6276                                                             error);
6277                         if (ret < 0)
6278                                 return ret;
6279                         last_item = MLX5_FLOW_LAYER_NVGRE;
6280                         break;
6281                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6282                         ret = mlx5_flow_validate_item_gre_key
6283                                 (items, item_flags, gre_item, error);
6284                         if (ret < 0)
6285                                 return ret;
6286                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6287                         break;
6288                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6289                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6290                                                             error);
6291                         if (ret < 0)
6292                                 return ret;
6293                         last_item = MLX5_FLOW_LAYER_VXLAN;
6294                         break;
6295                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6296                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6297                                                                 item_flags, dev,
6298                                                                 error);
6299                         if (ret < 0)
6300                                 return ret;
6301                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6302                         break;
6303                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6304                         ret = mlx5_flow_validate_item_geneve(items,
6305                                                              item_flags, dev,
6306                                                              error);
6307                         if (ret < 0)
6308                                 return ret;
6309                         geneve_item = items;
6310                         last_item = MLX5_FLOW_LAYER_GENEVE;
6311                         break;
6312                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6313                         ret = mlx5_flow_validate_item_geneve_opt(items,
6314                                                                  last_item,
6315                                                                  geneve_item,
6316                                                                  dev,
6317                                                                  error);
6318                         if (ret < 0)
6319                                 return ret;
6320                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6321                         break;
6322                 case RTE_FLOW_ITEM_TYPE_MPLS:
6323                         ret = mlx5_flow_validate_item_mpls(dev, items,
6324                                                            item_flags,
6325                                                            last_item, error);
6326                         if (ret < 0)
6327                                 return ret;
6328                         last_item = MLX5_FLOW_LAYER_MPLS;
6329                         break;
6330
6331                 case RTE_FLOW_ITEM_TYPE_MARK:
6332                         ret = flow_dv_validate_item_mark(dev, items, attr,
6333                                                          error);
6334                         if (ret < 0)
6335                                 return ret;
6336                         last_item = MLX5_FLOW_ITEM_MARK;
6337                         break;
6338                 case RTE_FLOW_ITEM_TYPE_META:
6339                         ret = flow_dv_validate_item_meta(dev, items, attr,
6340                                                          error);
6341                         if (ret < 0)
6342                                 return ret;
6343                         last_item = MLX5_FLOW_ITEM_METADATA;
6344                         break;
6345                 case RTE_FLOW_ITEM_TYPE_ICMP:
6346                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6347                                                            next_protocol,
6348                                                            error);
6349                         if (ret < 0)
6350                                 return ret;
6351                         last_item = MLX5_FLOW_LAYER_ICMP;
6352                         break;
6353                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6354                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6355                                                             next_protocol,
6356                                                             error);
6357                         if (ret < 0)
6358                                 return ret;
6359                         item_ipv6_proto = IPPROTO_ICMPV6;
6360                         last_item = MLX5_FLOW_LAYER_ICMP6;
6361                         break;
6362                 case RTE_FLOW_ITEM_TYPE_TAG:
6363                         ret = flow_dv_validate_item_tag(dev, items,
6364                                                         attr, error);
6365                         if (ret < 0)
6366                                 return ret;
6367                         last_item = MLX5_FLOW_ITEM_TAG;
6368                         break;
6369                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6370                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6371                         break;
6372                 case RTE_FLOW_ITEM_TYPE_GTP:
6373                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6374                                                         error);
6375                         if (ret < 0)
6376                                 return ret;
6377                         gtp_item = items;
6378                         last_item = MLX5_FLOW_LAYER_GTP;
6379                         break;
6380                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6381                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6382                                                             gtp_item, attr,
6383                                                             error);
6384                         if (ret < 0)
6385                                 return ret;
6386                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6387                         break;
6388                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6389                         /* Capacity will be checked in the translate stage. */
6390                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6391                                                             last_item,
6392                                                             ether_type,
6393                                                             &nic_ecpri_mask,
6394                                                             error);
6395                         if (ret < 0)
6396                                 return ret;
6397                         last_item = MLX5_FLOW_LAYER_ECPRI;
6398                         break;
6399                 default:
6400                         return rte_flow_error_set(error, ENOTSUP,
6401                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6402                                                   NULL, "item not supported");
6403                 }
6404                 item_flags |= last_item;
6405         }
6406         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6407                 int type = actions->type;
6408
6409                 if (!mlx5_flow_os_action_supported(type))
6410                         return rte_flow_error_set(error, ENOTSUP,
6411                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6412                                                   actions,
6413                                                   "action not supported");
6414                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
6415                         return rte_flow_error_set(error, ENOTSUP,
6416                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6417                                                   actions, "too many actions");
6418                 switch (type) {
6419                 case RTE_FLOW_ACTION_TYPE_VOID:
6420                         break;
6421                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
6422                         ret = flow_dv_validate_action_port_id(dev,
6423                                                               action_flags,
6424                                                               actions,
6425                                                               attr,
6426                                                               error);
6427                         if (ret)
6428                                 return ret;
6429                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6430                         ++actions_n;
6431                         break;
6432                 case RTE_FLOW_ACTION_TYPE_FLAG:
6433                         ret = flow_dv_validate_action_flag(dev, action_flags,
6434                                                            attr, error);
6435                         if (ret < 0)
6436                                 return ret;
6437                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6438                                 /* Count all modify-header actions as one. */
6439                                 if (!(action_flags &
6440                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6441                                         ++actions_n;
6442                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
6443                                                 MLX5_FLOW_ACTION_MARK_EXT;
6444                         } else {
6445                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
6446                                 ++actions_n;
6447                         }
6448                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6449                         break;
6450                 case RTE_FLOW_ACTION_TYPE_MARK:
6451                         ret = flow_dv_validate_action_mark(dev, actions,
6452                                                            action_flags,
6453                                                            attr, error);
6454                         if (ret < 0)
6455                                 return ret;
6456                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6457                                 /* Count all modify-header actions as one. */
6458                                 if (!(action_flags &
6459                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6460                                         ++actions_n;
6461                                 action_flags |= MLX5_FLOW_ACTION_MARK |
6462                                                 MLX5_FLOW_ACTION_MARK_EXT;
6463                         } else {
6464                                 action_flags |= MLX5_FLOW_ACTION_MARK;
6465                                 ++actions_n;
6466                         }
6467                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6468                         break;
6469                 case RTE_FLOW_ACTION_TYPE_SET_META:
6470                         ret = flow_dv_validate_action_set_meta(dev, actions,
6471                                                                action_flags,
6472                                                                attr, error);
6473                         if (ret < 0)
6474                                 return ret;
6475                         /* Count all modify-header actions as one action. */
6476                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6477                                 ++actions_n;
6478                         action_flags |= MLX5_FLOW_ACTION_SET_META;
6479                         rw_act_num += MLX5_ACT_NUM_SET_META;
6480                         break;
6481                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
6482                         ret = flow_dv_validate_action_set_tag(dev, actions,
6483                                                               action_flags,
6484                                                               attr, error);
6485                         if (ret < 0)
6486                                 return ret;
6487                         /* Count all modify-header actions as one action. */
6488                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6489                                 ++actions_n;
6490                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6491                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6492                         break;
6493                 case RTE_FLOW_ACTION_TYPE_DROP:
6494                         ret = mlx5_flow_validate_action_drop(action_flags,
6495                                                              attr, error);
6496                         if (ret < 0)
6497                                 return ret;
6498                         action_flags |= MLX5_FLOW_ACTION_DROP;
6499                         ++actions_n;
6500                         break;
6501                 case RTE_FLOW_ACTION_TYPE_QUEUE:
6502                         ret = mlx5_flow_validate_action_queue(actions,
6503                                                               action_flags, dev,
6504                                                               attr, error);
6505                         if (ret < 0)
6506                                 return ret;
6507                         queue_index = ((const struct rte_flow_action_queue *)
6508                                                         (actions->conf))->index;
6509                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
6510                         ++actions_n;
6511                         break;
6512                 case RTE_FLOW_ACTION_TYPE_RSS:
6513                         rss = actions->conf;
6514                         ret = mlx5_flow_validate_action_rss(actions,
6515                                                             action_flags, dev,
6516                                                             attr, item_flags,
6517                                                             error);
6518                         if (ret < 0)
6519                                 return ret;
6520                         if (rss && sample_rss &&
6521                             (sample_rss->level != rss->level ||
6522                             sample_rss->types != rss->types))
6523                                 return rte_flow_error_set(error, ENOTSUP,
6524                                         RTE_FLOW_ERROR_TYPE_ACTION,
6525                                         NULL,
6526                                         "Can't use the different RSS types "
6527                                         "or level in the same flow");
6528                         if (rss != NULL && rss->queue_num)
6529                                 queue_index = rss->queue[0];
6530                         action_flags |= MLX5_FLOW_ACTION_RSS;
6531                         ++actions_n;
6532                         break;
6533                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6534                         ret =
6535                         mlx5_flow_validate_action_default_miss(action_flags,
6536                                         attr, error);
6537                         if (ret < 0)
6538                                 return ret;
6539                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6540                         ++actions_n;
6541                         break;
6542                 case RTE_FLOW_ACTION_TYPE_COUNT:
6543                         ret = flow_dv_validate_action_count(dev, actions,
6544                                                             action_flags,
6545                                                             error);
6546                         if (ret < 0)
6547                                 return ret;
6548                         count = actions->conf;
6549                         action_flags |= MLX5_FLOW_ACTION_COUNT;
6550                         ++actions_n;
6551                         break;
6552                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6553                         if (flow_dv_validate_action_pop_vlan(dev,
6554                                                              action_flags,
6555                                                              actions,
6556                                                              item_flags, attr,
6557                                                              error))
6558                                 return -rte_errno;
6559                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6560                         ++actions_n;
6561                         break;
6562                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6563                         ret = flow_dv_validate_action_push_vlan(dev,
6564                                                                 action_flags,
6565                                                                 vlan_m,
6566                                                                 actions, attr,
6567                                                                 error);
6568                         if (ret < 0)
6569                                 return ret;
6570                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6571                         ++actions_n;
6572                         break;
6573                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6574                         ret = flow_dv_validate_action_set_vlan_pcp
6575                                                 (action_flags, actions, error);
6576                         if (ret < 0)
6577                                 return ret;
6578                         /* Count PCP with push_vlan command. */
6579                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
6580                         break;
6581                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6582                         ret = flow_dv_validate_action_set_vlan_vid
6583                                                 (item_flags, action_flags,
6584                                                  actions, error);
6585                         if (ret < 0)
6586                                 return ret;
6587                         /* Count VID with push_vlan command. */
6588                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6589                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
6590                         break;
6591                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6592                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6593                         ret = flow_dv_validate_action_l2_encap(dev,
6594                                                                action_flags,
6595                                                                actions, attr,
6596                                                                error);
6597                         if (ret < 0)
6598                                 return ret;
6599                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
6600                         ++actions_n;
6601                         break;
6602                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6603                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6604                         ret = flow_dv_validate_action_decap(dev, action_flags,
6605                                                             actions, item_flags,
6606                                                             attr, error);
6607                         if (ret < 0)
6608                                 return ret;
6609                         action_flags |= MLX5_FLOW_ACTION_DECAP;
6610                         ++actions_n;
6611                         break;
6612                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6613                         ret = flow_dv_validate_action_raw_encap_decap
6614                                 (dev, NULL, actions->conf, attr, &action_flags,
6615                                  &actions_n, actions, item_flags, error);
6616                         if (ret < 0)
6617                                 return ret;
6618                         break;
6619                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6620                         decap = actions->conf;
6621                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
6622                                 ;
6623                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6624                                 encap = NULL;
6625                                 actions--;
6626                         } else {
6627                                 encap = actions->conf;
6628                         }
6629                         ret = flow_dv_validate_action_raw_encap_decap
6630                                            (dev,
6631                                             decap ? decap : &empty_decap, encap,
6632                                             attr, &action_flags, &actions_n,
6633                                             actions, item_flags, error);
6634                         if (ret < 0)
6635                                 return ret;
6636                         break;
6637                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6638                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6639                         ret = flow_dv_validate_action_modify_mac(action_flags,
6640                                                                  actions,
6641                                                                  item_flags,
6642                                                                  error);
6643                         if (ret < 0)
6644                                 return ret;
6645                         /* Count all modify-header actions as one action. */
6646                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6647                                 ++actions_n;
6648                         action_flags |= actions->type ==
6649                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
6650                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
6651                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
6652                         /*
6653                          * Even if the source and destination MAC addresses have
6654                          * overlap in the header with 4B alignment, the convert
6655                          * function will handle them separately and 4 SW actions
6656                          * will be created. And 2 actions will be added each
6657                          * time no matter how many bytes of address will be set.
6658                          */
6659                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
6660                         break;
6661                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6662                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6663                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
6664                                                                   actions,
6665                                                                   item_flags,
6666                                                                   error);
6667                         if (ret < 0)
6668                                 return ret;
6669                         /* Count all modify-header actions as one action. */
6670                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6671                                 ++actions_n;
6672                         action_flags |= actions->type ==
6673                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
6674                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
6675                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
6676                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
6677                         break;
6678                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
6679                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
6680                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
6681                                                                   actions,
6682                                                                   item_flags,
6683                                                                   error);
6684                         if (ret < 0)
6685                                 return ret;
6686                         if (item_ipv6_proto == IPPROTO_ICMPV6)
6687                                 return rte_flow_error_set(error, ENOTSUP,
6688                                         RTE_FLOW_ERROR_TYPE_ACTION,
6689                                         actions,
6690                                         "Can't change header "
6691                                         "with ICMPv6 proto");
6692                         /* Count all modify-header actions as one action. */
6693                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6694                                 ++actions_n;
6695                         action_flags |= actions->type ==
6696                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6697                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
6698                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
6699                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6700                         break;
6701                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6702                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6703                         ret = flow_dv_validate_action_modify_tp(action_flags,
6704                                                                 actions,
6705                                                                 item_flags,
6706                                                                 error);
6707                         if (ret < 0)
6708                                 return ret;
6709                         /* Count all modify-header actions as one action. */
6710                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6711                                 ++actions_n;
6712                         action_flags |= actions->type ==
6713                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6714                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
6715                                                 MLX5_FLOW_ACTION_SET_TP_DST;
6716                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
6717                         break;
6718                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
6719                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
6720                         ret = flow_dv_validate_action_modify_ttl(action_flags,
6721                                                                  actions,
6722                                                                  item_flags,
6723                                                                  error);
6724                         if (ret < 0)
6725                                 return ret;
6726                         /* Count all modify-header actions as one action. */
6727                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6728                                 ++actions_n;
6729                         action_flags |= actions->type ==
6730                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
6731                                                 MLX5_FLOW_ACTION_SET_TTL :
6732                                                 MLX5_FLOW_ACTION_DEC_TTL;
6733                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
6734                         break;
6735                 case RTE_FLOW_ACTION_TYPE_JUMP:
6736                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
6737                                                            action_flags,
6738                                                            attr, external,
6739                                                            error);
6740                         if (ret)
6741                                 return ret;
6742                         ++actions_n;
6743                         action_flags |= MLX5_FLOW_ACTION_JUMP;
6744                         break;
6745                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
6746                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
6747                         ret = flow_dv_validate_action_modify_tcp_seq
6748                                                                 (action_flags,
6749                                                                  actions,
6750                                                                  item_flags,
6751                                                                  error);
6752                         if (ret < 0)
6753                                 return ret;
6754                         /* Count all modify-header actions as one action. */
6755                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6756                                 ++actions_n;
6757                         action_flags |= actions->type ==
6758                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
6759                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
6760                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
6761                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
6762                         break;
6763                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6764                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6765                         ret = flow_dv_validate_action_modify_tcp_ack
6766                                                                 (action_flags,
6767                                                                  actions,
6768                                                                  item_flags,
6769                                                                  error);
6770                         if (ret < 0)
6771                                 return ret;
6772                         /* Count all modify-header actions as one action. */
6773                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6774                                 ++actions_n;
6775                         action_flags |= actions->type ==
6776                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6777                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
6778                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
6779                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
6780                         break;
6781                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
6782                         break;
6783                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6784                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6785                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6786                         break;
6787                 case RTE_FLOW_ACTION_TYPE_METER:
6788                         ret = mlx5_flow_validate_action_meter(dev,
6789                                                               action_flags,
6790                                                               actions, attr,
6791                                                               error);
6792                         if (ret < 0)
6793                                 return ret;
6794                         action_flags |= MLX5_FLOW_ACTION_METER;
6795                         ++actions_n;
6796                         /* Meter action will add one more TAG action. */
6797                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6798                         break;
6799                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
6800                         if (!attr->transfer && !attr->group)
6801                                 return rte_flow_error_set(error, ENOTSUP,
6802                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6803                                                                            NULL,
6804                           "Shared ASO age action is not supported for group 0");
6805                         action_flags |= MLX5_FLOW_ACTION_AGE;
6806                         ++actions_n;
6807                         break;
6808                 case RTE_FLOW_ACTION_TYPE_AGE:
6809                         ret = flow_dv_validate_action_age(action_flags,
6810                                                           actions, dev,
6811                                                           error);
6812                         if (ret < 0)
6813                                 return ret;
6814                         /*
6815                          * Validate the regular AGE action (using counter)
6816                          * mutual exclusion with share counter actions.
6817                          */
6818                         if (!priv->sh->flow_hit_aso_en) {
6819                                 if (count && count->shared)
6820                                         return rte_flow_error_set
6821                                                 (error, EINVAL,
6822                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6823                                                 NULL,
6824                                                 "old age and shared count combination is not supported");
6825                                 if (sample_count)
6826                                         return rte_flow_error_set
6827                                                 (error, EINVAL,
6828                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6829                                                 NULL,
6830                                                 "old age action and count must be in the same sub flow");
6831                         }
6832                         action_flags |= MLX5_FLOW_ACTION_AGE;
6833                         ++actions_n;
6834                         break;
6835                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6836                         ret = flow_dv_validate_action_modify_ipv4_dscp
6837                                                          (action_flags,
6838                                                           actions,
6839                                                           item_flags,
6840                                                           error);
6841                         if (ret < 0)
6842                                 return ret;
6843                         /* Count all modify-header actions as one action. */
6844                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6845                                 ++actions_n;
6846                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
6847                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6848                         break;
6849                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6850                         ret = flow_dv_validate_action_modify_ipv6_dscp
6851                                                                 (action_flags,
6852                                                                  actions,
6853                                                                  item_flags,
6854                                                                  error);
6855                         if (ret < 0)
6856                                 return ret;
6857                         /* Count all modify-header actions as one action. */
6858                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6859                                 ++actions_n;
6860                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6861                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6862                         break;
6863                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6864                         ret = flow_dv_validate_action_sample(&action_flags,
6865                                                              actions, dev,
6866                                                              attr, item_flags,
6867                                                              rss, &sample_rss,
6868                                                              &sample_count,
6869                                                              error);
6870                         if (ret < 0)
6871                                 return ret;
6872                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6873                         ++actions_n;
6874                         break;
6875                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6876                         if (actions[0].type != (typeof(actions[0].type))
6877                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6878                                 return rte_flow_error_set
6879                                                 (error, EINVAL,
6880                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6881                                                 NULL, "MLX5 private action "
6882                                                 "must be the first");
6883
6884                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6885                         break;
6886                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6887                         if (!attr->transfer && !attr->group)
6888                                 return rte_flow_error_set(error, ENOTSUP,
6889                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6890                                                 NULL, "modify field action "
6891                                                 "is not supported for group 0");
6892                         ret = flow_dv_validate_action_modify_field(action_flags,
6893                                                                  actions,
6894                                                                  error);
6895                         if (ret < 0)
6896                                 return ret;
6897                         /* Count all modify-header actions as one action. */
6898                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
6899                                 ++actions_n;
6900                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6901                         rw_act_num += ret;
6902                         break;
6903                 default:
6904                         return rte_flow_error_set(error, ENOTSUP,
6905                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6906                                                   actions,
6907                                                   "action not supported");
6908                 }
6909         }
6910         /*
6911          * Validate actions in flow rules
6912          * - Explicit decap action is prohibited by the tunnel offload API.
6913          * - Drop action in tunnel steer rule is prohibited by the API.
6914          * - Application cannot use MARK action because it's value can mask
6915          *   tunnel default miss nitification.
6916          * - JUMP in tunnel match rule has no support in current PMD
6917          *   implementation.
6918          * - TAG & META are reserved for future uses.
6919          */
6920         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6921                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6922                                             MLX5_FLOW_ACTION_MARK     |
6923                                             MLX5_FLOW_ACTION_SET_TAG  |
6924                                             MLX5_FLOW_ACTION_SET_META |
6925                                             MLX5_FLOW_ACTION_DROP;
6926
6927                 if (action_flags & bad_actions_mask)
6928                         return rte_flow_error_set
6929                                         (error, EINVAL,
6930                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6931                                         "Invalid RTE action in tunnel "
6932                                         "set decap rule");
6933                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6934                         return rte_flow_error_set
6935                                         (error, EINVAL,
6936                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6937                                         "tunnel set decap rule must terminate "
6938                                         "with JUMP");
6939                 if (!attr->ingress)
6940                         return rte_flow_error_set
6941                                         (error, EINVAL,
6942                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6943                                         "tunnel flows for ingress traffic only");
6944         }
6945         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6946                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6947                                             MLX5_FLOW_ACTION_MARK    |
6948                                             MLX5_FLOW_ACTION_SET_TAG |
6949                                             MLX5_FLOW_ACTION_SET_META;
6950
6951                 if (action_flags & bad_actions_mask)
6952                         return rte_flow_error_set
6953                                         (error, EINVAL,
6954                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6955                                         "Invalid RTE action in tunnel "
6956                                         "set match rule");
6957         }
6958         /*
6959          * Validate the drop action mutual exclusion with other actions.
6960          * Drop action is mutually-exclusive with any other action, except for
6961          * Count action.
6962          * Drop action compatibility with tunnel offload was already validated.
6963          */
6964         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
6965                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
6966         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6967             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6968                 return rte_flow_error_set(error, EINVAL,
6969                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6970                                           "Drop action is mutually-exclusive "
6971                                           "with any other action, except for "
6972                                           "Count action");
6973         /* Eswitch has few restrictions on using items and actions */
6974         if (attr->transfer) {
6975                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6976                     action_flags & MLX5_FLOW_ACTION_FLAG)
6977                         return rte_flow_error_set(error, ENOTSUP,
6978                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6979                                                   NULL,
6980                                                   "unsupported action FLAG");
6981                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6982                     action_flags & MLX5_FLOW_ACTION_MARK)
6983                         return rte_flow_error_set(error, ENOTSUP,
6984                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6985                                                   NULL,
6986                                                   "unsupported action MARK");
6987                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6988                         return rte_flow_error_set(error, ENOTSUP,
6989                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6990                                                   NULL,
6991                                                   "unsupported action QUEUE");
6992                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6993                         return rte_flow_error_set(error, ENOTSUP,
6994                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6995                                                   NULL,
6996                                                   "unsupported action RSS");
6997                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6998                         return rte_flow_error_set(error, EINVAL,
6999                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7000                                                   actions,
7001                                                   "no fate action is found");
7002         } else {
7003                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7004                         return rte_flow_error_set(error, EINVAL,
7005                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7006                                                   actions,
7007                                                   "no fate action is found");
7008         }
7009         /*
7010          * Continue validation for Xcap and VLAN actions.
7011          * If hairpin is working in explicit TX rule mode, there is no actions
7012          * splitting and the validation of hairpin ingress flow should be the
7013          * same as other standard flows.
7014          */
7015         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7016                              MLX5_FLOW_VLAN_ACTIONS)) &&
7017             (queue_index == 0xFFFF ||
7018              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7019              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7020              conf->tx_explicit != 0))) {
7021                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7022                     MLX5_FLOW_XCAP_ACTIONS)
7023                         return rte_flow_error_set(error, ENOTSUP,
7024                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7025                                                   NULL, "encap and decap "
7026                                                   "combination aren't supported");
7027                 if (!attr->transfer && attr->ingress) {
7028                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7029                                 return rte_flow_error_set
7030                                                 (error, ENOTSUP,
7031                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7032                                                  NULL, "encap is not supported"
7033                                                  " for ingress traffic");
7034                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7035                                 return rte_flow_error_set
7036                                                 (error, ENOTSUP,
7037                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7038                                                  NULL, "push VLAN action not "
7039                                                  "supported for ingress");
7040                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7041                                         MLX5_FLOW_VLAN_ACTIONS)
7042                                 return rte_flow_error_set
7043                                                 (error, ENOTSUP,
7044                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7045                                                  NULL, "no support for "
7046                                                  "multiple VLAN actions");
7047                 }
7048         }
7049         /*
7050          * Hairpin flow will add one more TAG action in TX implicit mode.
7051          * In TX explicit mode, there will be no hairpin flow ID.
7052          */
7053         if (hairpin > 0)
7054                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7055         /* extra metadata enabled: one more TAG action will be add. */
7056         if (dev_conf->dv_flow_en &&
7057             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7058             mlx5_flow_ext_mreg_supported(dev))
7059                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7060         if (rw_act_num >
7061                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7062                 return rte_flow_error_set(error, ENOTSUP,
7063                                           RTE_FLOW_ERROR_TYPE_ACTION,
7064                                           NULL, "too many header modify"
7065                                           " actions to support");
7066         }
7067         return 0;
7068 }
7069
7070 /**
7071  * Internal preparation function. Allocates the DV flow size,
7072  * this size is constant.
7073  *
7074  * @param[in] dev
7075  *   Pointer to the rte_eth_dev structure.
7076  * @param[in] attr
7077  *   Pointer to the flow attributes.
7078  * @param[in] items
7079  *   Pointer to the list of items.
7080  * @param[in] actions
7081  *   Pointer to the list of actions.
7082  * @param[out] error
7083  *   Pointer to the error structure.
7084  *
7085  * @return
7086  *   Pointer to mlx5_flow object on success,
7087  *   otherwise NULL and rte_errno is set.
7088  */
7089 static struct mlx5_flow *
7090 flow_dv_prepare(struct rte_eth_dev *dev,
7091                 const struct rte_flow_attr *attr __rte_unused,
7092                 const struct rte_flow_item items[] __rte_unused,
7093                 const struct rte_flow_action actions[] __rte_unused,
7094                 struct rte_flow_error *error)
7095 {
7096         uint32_t handle_idx = 0;
7097         struct mlx5_flow *dev_flow;
7098         struct mlx5_flow_handle *dev_handle;
7099         struct mlx5_priv *priv = dev->data->dev_private;
7100         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7101
7102         MLX5_ASSERT(wks);
7103         /* In case of corrupting the memory. */
7104         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7105                 rte_flow_error_set(error, ENOSPC,
7106                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7107                                    "not free temporary device flow");
7108                 return NULL;
7109         }
7110         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7111                                    &handle_idx);
7112         if (!dev_handle) {
7113                 rte_flow_error_set(error, ENOMEM,
7114                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7115                                    "not enough memory to create flow handle");
7116                 return NULL;
7117         }
7118         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7119         dev_flow = &wks->flows[wks->flow_idx++];
7120         memset(dev_flow, 0, sizeof(*dev_flow));
7121         dev_flow->handle = dev_handle;
7122         dev_flow->handle_idx = handle_idx;
7123         /*
7124          * In some old rdma-core releases, before continuing, a check of the
7125          * length of matching parameter will be done at first. It needs to use
7126          * the length without misc4 param. If the flow has misc4 support, then
7127          * the length needs to be adjusted accordingly. Each param member is
7128          * aligned with a 64B boundary naturally.
7129          */
7130         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7131                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7132         dev_flow->ingress = attr->ingress;
7133         dev_flow->dv.transfer = attr->transfer;
7134         return dev_flow;
7135 }
7136
7137 #ifdef RTE_LIBRTE_MLX5_DEBUG
7138 /**
7139  * Sanity check for match mask and value. Similar to check_valid_spec() in
7140  * kernel driver. If unmasked bit is present in value, it returns failure.
7141  *
7142  * @param match_mask
7143  *   pointer to match mask buffer.
7144  * @param match_value
7145  *   pointer to match value buffer.
7146  *
7147  * @return
7148  *   0 if valid, -EINVAL otherwise.
7149  */
7150 static int
7151 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7152 {
7153         uint8_t *m = match_mask;
7154         uint8_t *v = match_value;
7155         unsigned int i;
7156
7157         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7158                 if (v[i] & ~m[i]) {
7159                         DRV_LOG(ERR,
7160                                 "match_value differs from match_criteria"
7161                                 " %p[%u] != %p[%u]",
7162                                 match_value, i, match_mask, i);
7163                         return -EINVAL;
7164                 }
7165         }
7166         return 0;
7167 }
7168 #endif
7169
7170 /**
7171  * Add match of ip_version.
7172  *
7173  * @param[in] group
7174  *   Flow group.
7175  * @param[in] headers_v
7176  *   Values header pointer.
7177  * @param[in] headers_m
7178  *   Masks header pointer.
7179  * @param[in] ip_version
7180  *   The IP version to set.
7181  */
7182 static inline void
7183 flow_dv_set_match_ip_version(uint32_t group,
7184                              void *headers_v,
7185                              void *headers_m,
7186                              uint8_t ip_version)
7187 {
7188         if (group == 0)
7189                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7190         else
7191                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7192                          ip_version);
7193         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7194         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7195         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7196 }
7197
7198 /**
7199  * Add Ethernet item to matcher and to the value.
7200  *
7201  * @param[in, out] matcher
7202  *   Flow matcher.
7203  * @param[in, out] key
7204  *   Flow matcher value.
7205  * @param[in] item
7206  *   Flow pattern to translate.
7207  * @param[in] inner
7208  *   Item is inner pattern.
7209  */
7210 static void
7211 flow_dv_translate_item_eth(void *matcher, void *key,
7212                            const struct rte_flow_item *item, int inner,
7213                            uint32_t group)
7214 {
7215         const struct rte_flow_item_eth *eth_m = item->mask;
7216         const struct rte_flow_item_eth *eth_v = item->spec;
7217         const struct rte_flow_item_eth nic_mask = {
7218                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7219                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7220                 .type = RTE_BE16(0xffff),
7221                 .has_vlan = 0,
7222         };
7223         void *hdrs_m;
7224         void *hdrs_v;
7225         char *l24_v;
7226         unsigned int i;
7227
7228         if (!eth_v)
7229                 return;
7230         if (!eth_m)
7231                 eth_m = &nic_mask;
7232         if (inner) {
7233                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7234                                          inner_headers);
7235                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7236         } else {
7237                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7238                                          outer_headers);
7239                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7240         }
7241         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7242                &eth_m->dst, sizeof(eth_m->dst));
7243         /* The value must be in the range of the mask. */
7244         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7245         for (i = 0; i < sizeof(eth_m->dst); ++i)
7246                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7247         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7248                &eth_m->src, sizeof(eth_m->src));
7249         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7250         /* The value must be in the range of the mask. */
7251         for (i = 0; i < sizeof(eth_m->dst); ++i)
7252                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7253         /*
7254          * HW supports match on one Ethertype, the Ethertype following the last
7255          * VLAN tag of the packet (see PRM).
7256          * Set match on ethertype only if ETH header is not followed by VLAN.
7257          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7258          * ethertype, and use ip_version field instead.
7259          * eCPRI over Ether layer will use type value 0xAEFE.
7260          */
7261         if (eth_m->type == 0xFFFF) {
7262                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7263                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7264                 switch (eth_v->type) {
7265                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7266                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7267                         return;
7268                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7269                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7270                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7271                         return;
7272                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7273                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7274                         return;
7275                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7276                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7277                         return;
7278                 default:
7279                         break;
7280                 }
7281         }
7282         if (eth_m->has_vlan) {
7283                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7284                 if (eth_v->has_vlan) {
7285                         /*
7286                          * Here, when also has_more_vlan field in VLAN item is
7287                          * not set, only single-tagged packets will be matched.
7288                          */
7289                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7290                         return;
7291                 }
7292         }
7293         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7294                  rte_be_to_cpu_16(eth_m->type));
7295         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7296         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7297 }
7298
7299 /**
7300  * Add VLAN item to matcher and to the value.
7301  *
7302  * @param[in, out] dev_flow
7303  *   Flow descriptor.
7304  * @param[in, out] matcher
7305  *   Flow matcher.
7306  * @param[in, out] key
7307  *   Flow matcher value.
7308  * @param[in] item
7309  *   Flow pattern to translate.
7310  * @param[in] inner
7311  *   Item is inner pattern.
7312  */
7313 static void
7314 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
7315                             void *matcher, void *key,
7316                             const struct rte_flow_item *item,
7317                             int inner, uint32_t group)
7318 {
7319         const struct rte_flow_item_vlan *vlan_m = item->mask;
7320         const struct rte_flow_item_vlan *vlan_v = item->spec;
7321         void *hdrs_m;
7322         void *hdrs_v;
7323         uint16_t tci_m;
7324         uint16_t tci_v;
7325
7326         if (inner) {
7327                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7328                                          inner_headers);
7329                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7330         } else {
7331                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7332                                          outer_headers);
7333                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7334                 /*
7335                  * This is workaround, masks are not supported,
7336                  * and pre-validated.
7337                  */
7338                 if (vlan_v)
7339                         dev_flow->handle->vf_vlan.tag =
7340                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
7341         }
7342         /*
7343          * When VLAN item exists in flow, mark packet as tagged,
7344          * even if TCI is not specified.
7345          */
7346         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
7347                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7348                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7349         }
7350         if (!vlan_v)
7351                 return;
7352         if (!vlan_m)
7353                 vlan_m = &rte_flow_item_vlan_mask;
7354         tci_m = rte_be_to_cpu_16(vlan_m->tci);
7355         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
7356         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
7357         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
7358         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
7359         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
7360         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
7361         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
7362         /*
7363          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7364          * ethertype, and use ip_version field instead.
7365          */
7366         if (vlan_m->inner_type == 0xFFFF) {
7367                 switch (vlan_v->inner_type) {
7368                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7369                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7370                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7371                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7372                         return;
7373                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7374                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7375                         return;
7376                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7377                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7378                         return;
7379                 default:
7380                         break;
7381                 }
7382         }
7383         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
7384                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7385                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7386                 /* Only one vlan_tag bit can be set. */
7387                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7388                 return;
7389         }
7390         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7391                  rte_be_to_cpu_16(vlan_m->inner_type));
7392         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
7393                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
7394 }
7395
7396 /**
7397  * Add IPV4 item to matcher and to the value.
7398  *
7399  * @param[in, out] matcher
7400  *   Flow matcher.
7401  * @param[in, out] key
7402  *   Flow matcher value.
7403  * @param[in] item
7404  *   Flow pattern to translate.
7405  * @param[in] inner
7406  *   Item is inner pattern.
7407  * @param[in] group
7408  *   The group to insert the rule.
7409  */
7410 static void
7411 flow_dv_translate_item_ipv4(void *matcher, void *key,
7412                             const struct rte_flow_item *item,
7413                             int inner, uint32_t group)
7414 {
7415         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
7416         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
7417         const struct rte_flow_item_ipv4 nic_mask = {
7418                 .hdr = {
7419                         .src_addr = RTE_BE32(0xffffffff),
7420                         .dst_addr = RTE_BE32(0xffffffff),
7421                         .type_of_service = 0xff,
7422                         .next_proto_id = 0xff,
7423                         .time_to_live = 0xff,
7424                 },
7425         };
7426         void *headers_m;
7427         void *headers_v;
7428         char *l24_m;
7429         char *l24_v;
7430         uint8_t tos;
7431
7432         if (inner) {
7433                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7434                                          inner_headers);
7435                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7436         } else {
7437                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7438                                          outer_headers);
7439                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7440         }
7441         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
7442         if (!ipv4_v)
7443                 return;
7444         if (!ipv4_m)
7445                 ipv4_m = &nic_mask;
7446         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7447                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7448         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7449                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7450         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
7451         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
7452         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7453                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7454         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7455                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7456         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
7457         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
7458         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
7459         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
7460                  ipv4_m->hdr.type_of_service);
7461         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
7462         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
7463                  ipv4_m->hdr.type_of_service >> 2);
7464         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
7465         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7466                  ipv4_m->hdr.next_proto_id);
7467         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7468                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
7469         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7470                  ipv4_m->hdr.time_to_live);
7471         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7472                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
7473         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7474                  !!(ipv4_m->hdr.fragment_offset));
7475         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7476                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
7477 }
7478
7479 /**
7480  * Add IPV6 item to matcher and to the value.
7481  *
7482  * @param[in, out] matcher
7483  *   Flow matcher.
7484  * @param[in, out] key
7485  *   Flow matcher value.
7486  * @param[in] item
7487  *   Flow pattern to translate.
7488  * @param[in] inner
7489  *   Item is inner pattern.
7490  * @param[in] group
7491  *   The group to insert the rule.
7492  */
7493 static void
7494 flow_dv_translate_item_ipv6(void *matcher, void *key,
7495                             const struct rte_flow_item *item,
7496                             int inner, uint32_t group)
7497 {
7498         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
7499         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
7500         const struct rte_flow_item_ipv6 nic_mask = {
7501                 .hdr = {
7502                         .src_addr =
7503                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7504                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7505                         .dst_addr =
7506                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7507                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7508                         .vtc_flow = RTE_BE32(0xffffffff),
7509                         .proto = 0xff,
7510                         .hop_limits = 0xff,
7511                 },
7512         };
7513         void *headers_m;
7514         void *headers_v;
7515         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7516         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7517         char *l24_m;
7518         char *l24_v;
7519         uint32_t vtc_m;
7520         uint32_t vtc_v;
7521         int i;
7522         int size;
7523
7524         if (inner) {
7525                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7526                                          inner_headers);
7527                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7528         } else {
7529                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7530                                          outer_headers);
7531                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7532         }
7533         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
7534         if (!ipv6_v)
7535                 return;
7536         if (!ipv6_m)
7537                 ipv6_m = &nic_mask;
7538         size = sizeof(ipv6_m->hdr.dst_addr);
7539         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7540                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7541         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7542                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7543         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
7544         for (i = 0; i < size; ++i)
7545                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
7546         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7547                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7548         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7549                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7550         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
7551         for (i = 0; i < size; ++i)
7552                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
7553         /* TOS. */
7554         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
7555         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
7556         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
7557         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
7558         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
7559         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
7560         /* Label. */
7561         if (inner) {
7562                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
7563                          vtc_m);
7564                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
7565                          vtc_v);
7566         } else {
7567                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
7568                          vtc_m);
7569                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
7570                          vtc_v);
7571         }
7572         /* Protocol. */
7573         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7574                  ipv6_m->hdr.proto);
7575         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7576                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
7577         /* Hop limit. */
7578         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7579                  ipv6_m->hdr.hop_limits);
7580         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7581                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
7582         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7583                  !!(ipv6_m->has_frag_ext));
7584         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7585                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
7586 }
7587
7588 /**
7589  * Add IPV6 fragment extension item to matcher and to the value.
7590  *
7591  * @param[in, out] matcher
7592  *   Flow matcher.
7593  * @param[in, out] key
7594  *   Flow matcher value.
7595  * @param[in] item
7596  *   Flow pattern to translate.
7597  * @param[in] inner
7598  *   Item is inner pattern.
7599  */
7600 static void
7601 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
7602                                      const struct rte_flow_item *item,
7603                                      int inner)
7604 {
7605         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
7606         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
7607         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
7608                 .hdr = {
7609                         .next_header = 0xff,
7610                         .frag_data = RTE_BE16(0xffff),
7611                 },
7612         };
7613         void *headers_m;
7614         void *headers_v;
7615
7616         if (inner) {
7617                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7618                                          inner_headers);
7619                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7620         } else {
7621                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7622                                          outer_headers);
7623                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7624         }
7625         /* IPv6 fragment extension item exists, so packet is IP fragment. */
7626         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7627         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
7628         if (!ipv6_frag_ext_v)
7629                 return;
7630         if (!ipv6_frag_ext_m)
7631                 ipv6_frag_ext_m = &nic_mask;
7632         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7633                  ipv6_frag_ext_m->hdr.next_header);
7634         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7635                  ipv6_frag_ext_v->hdr.next_header &
7636                  ipv6_frag_ext_m->hdr.next_header);
7637 }
7638
7639 /**
7640  * Add TCP item to matcher and to the value.
7641  *
7642  * @param[in, out] matcher
7643  *   Flow matcher.
7644  * @param[in, out] key
7645  *   Flow matcher value.
7646  * @param[in] item
7647  *   Flow pattern to translate.
7648  * @param[in] inner
7649  *   Item is inner pattern.
7650  */
7651 static void
7652 flow_dv_translate_item_tcp(void *matcher, void *key,
7653                            const struct rte_flow_item *item,
7654                            int inner)
7655 {
7656         const struct rte_flow_item_tcp *tcp_m = item->mask;
7657         const struct rte_flow_item_tcp *tcp_v = item->spec;
7658         void *headers_m;
7659         void *headers_v;
7660
7661         if (inner) {
7662                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7663                                          inner_headers);
7664                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7665         } else {
7666                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7667                                          outer_headers);
7668                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7669         }
7670         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7671         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
7672         if (!tcp_v)
7673                 return;
7674         if (!tcp_m)
7675                 tcp_m = &rte_flow_item_tcp_mask;
7676         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
7677                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
7678         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
7679                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
7680         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
7681                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
7682         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
7683                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
7684         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
7685                  tcp_m->hdr.tcp_flags);
7686         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
7687                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
7688 }
7689
7690 /**
7691  * Add UDP item to matcher and to the value.
7692  *
7693  * @param[in, out] matcher
7694  *   Flow matcher.
7695  * @param[in, out] key
7696  *   Flow matcher value.
7697  * @param[in] item
7698  *   Flow pattern to translate.
7699  * @param[in] inner
7700  *   Item is inner pattern.
7701  */
7702 static void
7703 flow_dv_translate_item_udp(void *matcher, void *key,
7704                            const struct rte_flow_item *item,
7705                            int inner)
7706 {
7707         const struct rte_flow_item_udp *udp_m = item->mask;
7708         const struct rte_flow_item_udp *udp_v = item->spec;
7709         void *headers_m;
7710         void *headers_v;
7711
7712         if (inner) {
7713                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7714                                          inner_headers);
7715                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7716         } else {
7717                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7718                                          outer_headers);
7719                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7720         }
7721         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7722         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
7723         if (!udp_v)
7724                 return;
7725         if (!udp_m)
7726                 udp_m = &rte_flow_item_udp_mask;
7727         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
7728                  rte_be_to_cpu_16(udp_m->hdr.src_port));
7729         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
7730                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
7731         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
7732                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
7733         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7734                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
7735 }
7736
7737 /**
7738  * Add GRE optional Key item to matcher and to the value.
7739  *
7740  * @param[in, out] matcher
7741  *   Flow matcher.
7742  * @param[in, out] key
7743  *   Flow matcher value.
7744  * @param[in] item
7745  *   Flow pattern to translate.
7746  * @param[in] inner
7747  *   Item is inner pattern.
7748  */
7749 static void
7750 flow_dv_translate_item_gre_key(void *matcher, void *key,
7751                                    const struct rte_flow_item *item)
7752 {
7753         const rte_be32_t *key_m = item->mask;
7754         const rte_be32_t *key_v = item->spec;
7755         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7756         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7757         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7758
7759         /* GRE K bit must be on and should already be validated */
7760         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
7761         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
7762         if (!key_v)
7763                 return;
7764         if (!key_m)
7765                 key_m = &gre_key_default_mask;
7766         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
7767                  rte_be_to_cpu_32(*key_m) >> 8);
7768         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
7769                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
7770         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
7771                  rte_be_to_cpu_32(*key_m) & 0xFF);
7772         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
7773                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
7774 }
7775
7776 /**
7777  * Add GRE item to matcher and to the value.
7778  *
7779  * @param[in, out] matcher
7780  *   Flow matcher.
7781  * @param[in, out] key
7782  *   Flow matcher value.
7783  * @param[in] item
7784  *   Flow pattern to translate.
7785  * @param[in] inner
7786  *   Item is inner pattern.
7787  */
7788 static void
7789 flow_dv_translate_item_gre(void *matcher, void *key,
7790                            const struct rte_flow_item *item,
7791                            int inner)
7792 {
7793         const struct rte_flow_item_gre *gre_m = item->mask;
7794         const struct rte_flow_item_gre *gre_v = item->spec;
7795         void *headers_m;
7796         void *headers_v;
7797         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7798         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7799         struct {
7800                 union {
7801                         __extension__
7802                         struct {
7803                                 uint16_t version:3;
7804                                 uint16_t rsvd0:9;
7805                                 uint16_t s_present:1;
7806                                 uint16_t k_present:1;
7807                                 uint16_t rsvd_bit1:1;
7808                                 uint16_t c_present:1;
7809                         };
7810                         uint16_t value;
7811                 };
7812         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
7813
7814         if (inner) {
7815                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7816                                          inner_headers);
7817                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7818         } else {
7819                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7820                                          outer_headers);
7821                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7822         }
7823         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7824         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
7825         if (!gre_v)
7826                 return;
7827         if (!gre_m)
7828                 gre_m = &rte_flow_item_gre_mask;
7829         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
7830                  rte_be_to_cpu_16(gre_m->protocol));
7831         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7832                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
7833         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
7834         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
7835         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
7836                  gre_crks_rsvd0_ver_m.c_present);
7837         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
7838                  gre_crks_rsvd0_ver_v.c_present &
7839                  gre_crks_rsvd0_ver_m.c_present);
7840         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
7841                  gre_crks_rsvd0_ver_m.k_present);
7842         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
7843                  gre_crks_rsvd0_ver_v.k_present &
7844                  gre_crks_rsvd0_ver_m.k_present);
7845         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
7846                  gre_crks_rsvd0_ver_m.s_present);
7847         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
7848                  gre_crks_rsvd0_ver_v.s_present &
7849                  gre_crks_rsvd0_ver_m.s_present);
7850 }
7851
7852 /**
7853  * Add NVGRE item to matcher and to the value.
7854  *
7855  * @param[in, out] matcher
7856  *   Flow matcher.
7857  * @param[in, out] key
7858  *   Flow matcher value.
7859  * @param[in] item
7860  *   Flow pattern to translate.
7861  * @param[in] inner
7862  *   Item is inner pattern.
7863  */
7864 static void
7865 flow_dv_translate_item_nvgre(void *matcher, void *key,
7866                              const struct rte_flow_item *item,
7867                              int inner)
7868 {
7869         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
7870         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
7871         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7872         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7873         const char *tni_flow_id_m;
7874         const char *tni_flow_id_v;
7875         char *gre_key_m;
7876         char *gre_key_v;
7877         int size;
7878         int i;
7879
7880         /* For NVGRE, GRE header fields must be set with defined values. */
7881         const struct rte_flow_item_gre gre_spec = {
7882                 .c_rsvd0_ver = RTE_BE16(0x2000),
7883                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7884         };
7885         const struct rte_flow_item_gre gre_mask = {
7886                 .c_rsvd0_ver = RTE_BE16(0xB000),
7887                 .protocol = RTE_BE16(UINT16_MAX),
7888         };
7889         const struct rte_flow_item gre_item = {
7890                 .spec = &gre_spec,
7891                 .mask = &gre_mask,
7892                 .last = NULL,
7893         };
7894         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7895         if (!nvgre_v)
7896                 return;
7897         if (!nvgre_m)
7898                 nvgre_m = &rte_flow_item_nvgre_mask;
7899         tni_flow_id_m = (const char *)nvgre_m->tni;
7900         tni_flow_id_v = (const char *)nvgre_v->tni;
7901         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7902         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7903         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7904         memcpy(gre_key_m, tni_flow_id_m, size);
7905         for (i = 0; i < size; ++i)
7906                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7907 }
7908
7909 /**
7910  * Add VXLAN item to matcher and to the value.
7911  *
7912  * @param[in, out] matcher
7913  *   Flow matcher.
7914  * @param[in, out] key
7915  *   Flow matcher value.
7916  * @param[in] item
7917  *   Flow pattern to translate.
7918  * @param[in] inner
7919  *   Item is inner pattern.
7920  */
7921 static void
7922 flow_dv_translate_item_vxlan(void *matcher, void *key,
7923                              const struct rte_flow_item *item,
7924                              int inner)
7925 {
7926         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7927         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7928         void *headers_m;
7929         void *headers_v;
7930         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7931         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7932         char *vni_m;
7933         char *vni_v;
7934         uint16_t dport;
7935         int size;
7936         int i;
7937
7938         if (inner) {
7939                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7940                                          inner_headers);
7941                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7942         } else {
7943                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7944                                          outer_headers);
7945                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7946         }
7947         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7948                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7949         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7950                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7951                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7952         }
7953         if (!vxlan_v)
7954                 return;
7955         if (!vxlan_m)
7956                 vxlan_m = &rte_flow_item_vxlan_mask;
7957         size = sizeof(vxlan_m->vni);
7958         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7959         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7960         memcpy(vni_m, vxlan_m->vni, size);
7961         for (i = 0; i < size; ++i)
7962                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7963 }
7964
7965 /**
7966  * Add VXLAN-GPE item to matcher and to the value.
7967  *
7968  * @param[in, out] matcher
7969  *   Flow matcher.
7970  * @param[in, out] key
7971  *   Flow matcher value.
7972  * @param[in] item
7973  *   Flow pattern to translate.
7974  * @param[in] inner
7975  *   Item is inner pattern.
7976  */
7977
7978 static void
7979 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7980                                  const struct rte_flow_item *item, int inner)
7981 {
7982         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7983         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7984         void *headers_m;
7985         void *headers_v;
7986         void *misc_m =
7987                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7988         void *misc_v =
7989                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7990         char *vni_m;
7991         char *vni_v;
7992         uint16_t dport;
7993         int size;
7994         int i;
7995         uint8_t flags_m = 0xff;
7996         uint8_t flags_v = 0xc;
7997
7998         if (inner) {
7999                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8000                                          inner_headers);
8001                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8002         } else {
8003                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8004                                          outer_headers);
8005                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8006         }
8007         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8008                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8009         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8010                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8011                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8012         }
8013         if (!vxlan_v)
8014                 return;
8015         if (!vxlan_m)
8016                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8017         size = sizeof(vxlan_m->vni);
8018         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8019         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8020         memcpy(vni_m, vxlan_m->vni, size);
8021         for (i = 0; i < size; ++i)
8022                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8023         if (vxlan_m->flags) {
8024                 flags_m = vxlan_m->flags;
8025                 flags_v = vxlan_v->flags;
8026         }
8027         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8028         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8029         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8030                  vxlan_m->protocol);
8031         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8032                  vxlan_v->protocol);
8033 }
8034
8035 /**
8036  * Add Geneve item to matcher and to the value.
8037  *
8038  * @param[in, out] matcher
8039  *   Flow matcher.
8040  * @param[in, out] key
8041  *   Flow matcher value.
8042  * @param[in] item
8043  *   Flow pattern to translate.
8044  * @param[in] inner
8045  *   Item is inner pattern.
8046  */
8047
8048 static void
8049 flow_dv_translate_item_geneve(void *matcher, void *key,
8050                               const struct rte_flow_item *item, int inner)
8051 {
8052         const struct rte_flow_item_geneve *geneve_m = item->mask;
8053         const struct rte_flow_item_geneve *geneve_v = item->spec;
8054         void *headers_m;
8055         void *headers_v;
8056         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8057         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8058         uint16_t dport;
8059         uint16_t gbhdr_m;
8060         uint16_t gbhdr_v;
8061         char *vni_m;
8062         char *vni_v;
8063         size_t size, i;
8064
8065         if (inner) {
8066                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8067                                          inner_headers);
8068                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8069         } else {
8070                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8071                                          outer_headers);
8072                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8073         }
8074         dport = MLX5_UDP_PORT_GENEVE;
8075         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8076                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8077                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8078         }
8079         if (!geneve_v)
8080                 return;
8081         if (!geneve_m)
8082                 geneve_m = &rte_flow_item_geneve_mask;
8083         size = sizeof(geneve_m->vni);
8084         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8085         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8086         memcpy(vni_m, geneve_m->vni, size);
8087         for (i = 0; i < size; ++i)
8088                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8089         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8090                  rte_be_to_cpu_16(geneve_m->protocol));
8091         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8092                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8093         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8094         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8095         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8096                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8097         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8098                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8099         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8100                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8101         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8102                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8103                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8104 }
8105
8106 /**
8107  * Create Geneve TLV option resource.
8108  *
8109  * @param dev[in, out]
8110  *   Pointer to rte_eth_dev structure.
8111  * @param[in, out] tag_be24
8112  *   Tag value in big endian then R-shift 8.
8113  * @parm[in, out] dev_flow
8114  *   Pointer to the dev_flow.
8115  * @param[out] error
8116  *   pointer to error structure.
8117  *
8118  * @return
8119  *   0 on success otherwise -errno and errno is set.
8120  */
8121
8122 int
8123 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8124                                              const struct rte_flow_item *item,
8125                                              struct rte_flow_error *error)
8126 {
8127         struct mlx5_priv *priv = dev->data->dev_private;
8128         struct mlx5_dev_ctx_shared *sh = priv->sh;
8129         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8130                         sh->geneve_tlv_option_resource;
8131         struct mlx5_devx_obj *obj;
8132         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8133         int ret = 0;
8134
8135         if (!geneve_opt_v)
8136                 return -1;
8137         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8138         if (geneve_opt_resource != NULL) {
8139                 if (geneve_opt_resource->option_class ==
8140                         geneve_opt_v->option_class &&
8141                         geneve_opt_resource->option_type ==
8142                         geneve_opt_v->option_type &&
8143                         geneve_opt_resource->length ==
8144                         geneve_opt_v->option_len) {
8145                         /* We already have GENVE TLV option obj allocated. */
8146                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8147                                            __ATOMIC_RELAXED);
8148                 } else {
8149                         ret = rte_flow_error_set(error, ENOMEM,
8150                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8151                                 "Only one GENEVE TLV option supported");
8152                         goto exit;
8153                 }
8154         } else {
8155                 /* Create a GENEVE TLV object and resource. */
8156                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8157                                 geneve_opt_v->option_class,
8158                                 geneve_opt_v->option_type,
8159                                 geneve_opt_v->option_len);
8160                 if (!obj) {
8161                         ret = rte_flow_error_set(error, ENODATA,
8162                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8163                                 "Failed to create GENEVE TLV Devx object");
8164                         goto exit;
8165                 }
8166                 sh->geneve_tlv_option_resource =
8167                                 mlx5_malloc(MLX5_MEM_ZERO,
8168                                                 sizeof(*geneve_opt_resource),
8169                                                 0, SOCKET_ID_ANY);
8170                 if (!sh->geneve_tlv_option_resource) {
8171                         claim_zero(mlx5_devx_cmd_destroy(obj));
8172                         ret = rte_flow_error_set(error, ENOMEM,
8173                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8174                                 "GENEVE TLV object memory allocation failed");
8175                         goto exit;
8176                 }
8177                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8178                 geneve_opt_resource->obj = obj;
8179                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8180                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8181                 geneve_opt_resource->length = geneve_opt_v->option_len;
8182                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8183                                 __ATOMIC_RELAXED);
8184         }
8185 exit:
8186         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8187         return ret;
8188 }
8189
8190 /**
8191  * Add Geneve TLV option item to matcher.
8192  *
8193  * @param[in, out] dev
8194  *   Pointer to rte_eth_dev structure.
8195  * @param[in, out] matcher
8196  *   Flow matcher.
8197  * @param[in, out] key
8198  *   Flow matcher value.
8199  * @param[in] item
8200  *   Flow pattern to translate.
8201  * @param[out] error
8202  *   Pointer to error structure.
8203  */
8204 static int
8205 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8206                                   void *key, const struct rte_flow_item *item,
8207                                   struct rte_flow_error *error)
8208 {
8209         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8210         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8211         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8212         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8213         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8214                         misc_parameters_3);
8215         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8216         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8217         int ret = 0;
8218
8219         if (!geneve_opt_v)
8220                 return -1;
8221         if (!geneve_opt_m)
8222                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8223         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8224                                                            error);
8225         if (ret) {
8226                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8227                 return ret;
8228         }
8229         /*
8230          * Set the option length in GENEVE header if not requested.
8231          * The GENEVE TLV option length is expressed by the option length field
8232          * in the GENEVE header.
8233          * If the option length was not requested but the GENEVE TLV option item
8234          * is present we set the option length field implicitly.
8235          */
8236         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8237                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8238                          MLX5_GENEVE_OPTLEN_MASK);
8239                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8240                          geneve_opt_v->option_len + 1);
8241         }
8242         /* Set the data. */
8243         if (geneve_opt_v->data) {
8244                 memcpy(&opt_data_key, geneve_opt_v->data,
8245                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8246                                 sizeof(opt_data_key)));
8247                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8248                                 sizeof(opt_data_key));
8249                 memcpy(&opt_data_mask, geneve_opt_m->data,
8250                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8251                                 sizeof(opt_data_mask)));
8252                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8253                                 sizeof(opt_data_mask));
8254                 MLX5_SET(fte_match_set_misc3, misc3_m,
8255                                 geneve_tlv_option_0_data,
8256                                 rte_be_to_cpu_32(opt_data_mask));
8257                 MLX5_SET(fte_match_set_misc3, misc3_v,
8258                                 geneve_tlv_option_0_data,
8259                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8260         }
8261         return ret;
8262 }
8263
8264 /**
8265  * Add MPLS item to matcher and to the value.
8266  *
8267  * @param[in, out] matcher
8268  *   Flow matcher.
8269  * @param[in, out] key
8270  *   Flow matcher value.
8271  * @param[in] item
8272  *   Flow pattern to translate.
8273  * @param[in] prev_layer
8274  *   The protocol layer indicated in previous item.
8275  * @param[in] inner
8276  *   Item is inner pattern.
8277  */
8278 static void
8279 flow_dv_translate_item_mpls(void *matcher, void *key,
8280                             const struct rte_flow_item *item,
8281                             uint64_t prev_layer,
8282                             int inner)
8283 {
8284         const uint32_t *in_mpls_m = item->mask;
8285         const uint32_t *in_mpls_v = item->spec;
8286         uint32_t *out_mpls_m = 0;
8287         uint32_t *out_mpls_v = 0;
8288         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8289         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8290         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8291                                      misc_parameters_2);
8292         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8293         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8294         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8295
8296         switch (prev_layer) {
8297         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8298                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
8299                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8300                          MLX5_UDP_PORT_MPLS);
8301                 break;
8302         case MLX5_FLOW_LAYER_GRE:
8303                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
8304                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8305                          RTE_ETHER_TYPE_MPLS);
8306                 break;
8307         default:
8308                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8309                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8310                          IPPROTO_MPLS);
8311                 break;
8312         }
8313         if (!in_mpls_v)
8314                 return;
8315         if (!in_mpls_m)
8316                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
8317         switch (prev_layer) {
8318         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8319                 out_mpls_m =
8320                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8321                                                  outer_first_mpls_over_udp);
8322                 out_mpls_v =
8323                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8324                                                  outer_first_mpls_over_udp);
8325                 break;
8326         case MLX5_FLOW_LAYER_GRE:
8327                 out_mpls_m =
8328                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8329                                                  outer_first_mpls_over_gre);
8330                 out_mpls_v =
8331                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8332                                                  outer_first_mpls_over_gre);
8333                 break;
8334         default:
8335                 /* Inner MPLS not over GRE is not supported. */
8336                 if (!inner) {
8337                         out_mpls_m =
8338                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8339                                                          misc2_m,
8340                                                          outer_first_mpls);
8341                         out_mpls_v =
8342                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8343                                                          misc2_v,
8344                                                          outer_first_mpls);
8345                 }
8346                 break;
8347         }
8348         if (out_mpls_m && out_mpls_v) {
8349                 *out_mpls_m = *in_mpls_m;
8350                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
8351         }
8352 }
8353
8354 /**
8355  * Add metadata register item to matcher
8356  *
8357  * @param[in, out] matcher
8358  *   Flow matcher.
8359  * @param[in, out] key
8360  *   Flow matcher value.
8361  * @param[in] reg_type
8362  *   Type of device metadata register
8363  * @param[in] value
8364  *   Register value
8365  * @param[in] mask
8366  *   Register mask
8367  */
8368 static void
8369 flow_dv_match_meta_reg(void *matcher, void *key,
8370                        enum modify_reg reg_type,
8371                        uint32_t data, uint32_t mask)
8372 {
8373         void *misc2_m =
8374                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
8375         void *misc2_v =
8376                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8377         uint32_t temp;
8378
8379         data &= mask;
8380         switch (reg_type) {
8381         case REG_A:
8382                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
8383                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
8384                 break;
8385         case REG_B:
8386                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
8387                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
8388                 break;
8389         case REG_C_0:
8390                 /*
8391                  * The metadata register C0 field might be divided into
8392                  * source vport index and META item value, we should set
8393                  * this field according to specified mask, not as whole one.
8394                  */
8395                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
8396                 temp |= mask;
8397                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
8398                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
8399                 temp &= ~mask;
8400                 temp |= data;
8401                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
8402                 break;
8403         case REG_C_1:
8404                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
8405                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
8406                 break;
8407         case REG_C_2:
8408                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
8409                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
8410                 break;
8411         case REG_C_3:
8412                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
8413                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
8414                 break;
8415         case REG_C_4:
8416                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
8417                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
8418                 break;
8419         case REG_C_5:
8420                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
8421                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
8422                 break;
8423         case REG_C_6:
8424                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
8425                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
8426                 break;
8427         case REG_C_7:
8428                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
8429                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
8430                 break;
8431         default:
8432                 MLX5_ASSERT(false);
8433                 break;
8434         }
8435 }
8436
8437 /**
8438  * Add MARK item to matcher
8439  *
8440  * @param[in] dev
8441  *   The device to configure through.
8442  * @param[in, out] matcher
8443  *   Flow matcher.
8444  * @param[in, out] key
8445  *   Flow matcher value.
8446  * @param[in] item
8447  *   Flow pattern to translate.
8448  */
8449 static void
8450 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
8451                             void *matcher, void *key,
8452                             const struct rte_flow_item *item)
8453 {
8454         struct mlx5_priv *priv = dev->data->dev_private;
8455         const struct rte_flow_item_mark *mark;
8456         uint32_t value;
8457         uint32_t mask;
8458
8459         mark = item->mask ? (const void *)item->mask :
8460                             &rte_flow_item_mark_mask;
8461         mask = mark->id & priv->sh->dv_mark_mask;
8462         mark = (const void *)item->spec;
8463         MLX5_ASSERT(mark);
8464         value = mark->id & priv->sh->dv_mark_mask & mask;
8465         if (mask) {
8466                 enum modify_reg reg;
8467
8468                 /* Get the metadata register index for the mark. */
8469                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
8470                 MLX5_ASSERT(reg > 0);
8471                 if (reg == REG_C_0) {
8472                         struct mlx5_priv *priv = dev->data->dev_private;
8473                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8474                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8475
8476                         mask &= msk_c0;
8477                         mask <<= shl_c0;
8478                         value <<= shl_c0;
8479                 }
8480                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8481         }
8482 }
8483
8484 /**
8485  * Add META item to matcher
8486  *
8487  * @param[in] dev
8488  *   The devich to configure through.
8489  * @param[in, out] matcher
8490  *   Flow matcher.
8491  * @param[in, out] key
8492  *   Flow matcher value.
8493  * @param[in] attr
8494  *   Attributes of flow that includes this item.
8495  * @param[in] item
8496  *   Flow pattern to translate.
8497  */
8498 static void
8499 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
8500                             void *matcher, void *key,
8501                             const struct rte_flow_attr *attr,
8502                             const struct rte_flow_item *item)
8503 {
8504         const struct rte_flow_item_meta *meta_m;
8505         const struct rte_flow_item_meta *meta_v;
8506
8507         meta_m = (const void *)item->mask;
8508         if (!meta_m)
8509                 meta_m = &rte_flow_item_meta_mask;
8510         meta_v = (const void *)item->spec;
8511         if (meta_v) {
8512                 int reg;
8513                 uint32_t value = meta_v->data;
8514                 uint32_t mask = meta_m->data;
8515
8516                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
8517                 if (reg < 0)
8518                         return;
8519                 MLX5_ASSERT(reg != REG_NON);
8520                 /*
8521                  * In datapath code there is no endianness
8522                  * coversions for perfromance reasons, all
8523                  * pattern conversions are done in rte_flow.
8524                  */
8525                 value = rte_cpu_to_be_32(value);
8526                 mask = rte_cpu_to_be_32(mask);
8527                 if (reg == REG_C_0) {
8528                         struct mlx5_priv *priv = dev->data->dev_private;
8529                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8530                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8531 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
8532                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
8533
8534                         value >>= shr_c0;
8535                         mask >>= shr_c0;
8536 #endif
8537                         value <<= shl_c0;
8538                         mask <<= shl_c0;
8539                         MLX5_ASSERT(msk_c0);
8540                         MLX5_ASSERT(!(~msk_c0 & mask));
8541                 }
8542                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8543         }
8544 }
8545
8546 /**
8547  * Add vport metadata Reg C0 item to matcher
8548  *
8549  * @param[in, out] matcher
8550  *   Flow matcher.
8551  * @param[in, out] key
8552  *   Flow matcher value.
8553  * @param[in] reg
8554  *   Flow pattern to translate.
8555  */
8556 static void
8557 flow_dv_translate_item_meta_vport(void *matcher, void *key,
8558                                   uint32_t value, uint32_t mask)
8559 {
8560         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
8561 }
8562
8563 /**
8564  * Add tag item to matcher
8565  *
8566  * @param[in] dev
8567  *   The devich to configure through.
8568  * @param[in, out] matcher
8569  *   Flow matcher.
8570  * @param[in, out] key
8571  *   Flow matcher value.
8572  * @param[in] item
8573  *   Flow pattern to translate.
8574  */
8575 static void
8576 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
8577                                 void *matcher, void *key,
8578                                 const struct rte_flow_item *item)
8579 {
8580         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
8581         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
8582         uint32_t mask, value;
8583
8584         MLX5_ASSERT(tag_v);
8585         value = tag_v->data;
8586         mask = tag_m ? tag_m->data : UINT32_MAX;
8587         if (tag_v->id == REG_C_0) {
8588                 struct mlx5_priv *priv = dev->data->dev_private;
8589                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8590                 uint32_t shl_c0 = rte_bsf32(msk_c0);
8591
8592                 mask &= msk_c0;
8593                 mask <<= shl_c0;
8594                 value <<= shl_c0;
8595         }
8596         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
8597 }
8598
8599 /**
8600  * Add TAG item to matcher
8601  *
8602  * @param[in] dev
8603  *   The devich to configure through.
8604  * @param[in, out] matcher
8605  *   Flow matcher.
8606  * @param[in, out] key
8607  *   Flow matcher value.
8608  * @param[in] item
8609  *   Flow pattern to translate.
8610  */
8611 static void
8612 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
8613                            void *matcher, void *key,
8614                            const struct rte_flow_item *item)
8615 {
8616         const struct rte_flow_item_tag *tag_v = item->spec;
8617         const struct rte_flow_item_tag *tag_m = item->mask;
8618         enum modify_reg reg;
8619
8620         MLX5_ASSERT(tag_v);
8621         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
8622         /* Get the metadata register index for the tag. */
8623         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
8624         MLX5_ASSERT(reg > 0);
8625         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
8626 }
8627
8628 /**
8629  * Add source vport match to the specified matcher.
8630  *
8631  * @param[in, out] matcher
8632  *   Flow matcher.
8633  * @param[in, out] key
8634  *   Flow matcher value.
8635  * @param[in] port
8636  *   Source vport value to match
8637  * @param[in] mask
8638  *   Mask
8639  */
8640 static void
8641 flow_dv_translate_item_source_vport(void *matcher, void *key,
8642                                     int16_t port, uint16_t mask)
8643 {
8644         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8645         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8646
8647         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
8648         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
8649 }
8650
8651 /**
8652  * Translate port-id item to eswitch match on  port-id.
8653  *
8654  * @param[in] dev
8655  *   The devich to configure through.
8656  * @param[in, out] matcher
8657  *   Flow matcher.
8658  * @param[in, out] key
8659  *   Flow matcher value.
8660  * @param[in] item
8661  *   Flow pattern to translate.
8662  * @param[in]
8663  *   Flow attributes.
8664  *
8665  * @return
8666  *   0 on success, a negative errno value otherwise.
8667  */
8668 static int
8669 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
8670                                void *key, const struct rte_flow_item *item,
8671                                const struct rte_flow_attr *attr)
8672 {
8673         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
8674         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
8675         struct mlx5_priv *priv;
8676         uint16_t mask, id;
8677
8678         mask = pid_m ? pid_m->id : 0xffff;
8679         id = pid_v ? pid_v->id : dev->data->port_id;
8680         priv = mlx5_port_to_eswitch_info(id, item == NULL);
8681         if (!priv)
8682                 return -rte_errno;
8683         /*
8684          * Translate to vport field or to metadata, depending on mode.
8685          * Kernel can use either misc.source_port or half of C0 metadata
8686          * register.
8687          */
8688         if (priv->vport_meta_mask) {
8689                 /*
8690                  * Provide the hint for SW steering library
8691                  * to insert the flow into ingress domain and
8692                  * save the extra vport match.
8693                  */
8694                 if (mask == 0xffff && priv->vport_id == 0xffff &&
8695                     priv->pf_bond < 0 && attr->transfer)
8696                         flow_dv_translate_item_source_vport
8697                                 (matcher, key, priv->vport_id, mask);
8698                 /*
8699                  * We should always set the vport metadata register,
8700                  * otherwise the SW steering library can drop
8701                  * the rule if wire vport metadata value is not zero,
8702                  * it depends on kernel configuration.
8703                  */
8704                 flow_dv_translate_item_meta_vport(matcher, key,
8705                                                   priv->vport_meta_tag,
8706                                                   priv->vport_meta_mask);
8707         } else {
8708                 flow_dv_translate_item_source_vport(matcher, key,
8709                                                     priv->vport_id, mask);
8710         }
8711         return 0;
8712 }
8713
8714 /**
8715  * Add ICMP6 item to matcher and to the value.
8716  *
8717  * @param[in, out] matcher
8718  *   Flow matcher.
8719  * @param[in, out] key
8720  *   Flow matcher value.
8721  * @param[in] item
8722  *   Flow pattern to translate.
8723  * @param[in] inner
8724  *   Item is inner pattern.
8725  */
8726 static void
8727 flow_dv_translate_item_icmp6(void *matcher, void *key,
8728                               const struct rte_flow_item *item,
8729                               int inner)
8730 {
8731         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
8732         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
8733         void *headers_m;
8734         void *headers_v;
8735         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8736                                      misc_parameters_3);
8737         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8738         if (inner) {
8739                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8740                                          inner_headers);
8741                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8742         } else {
8743                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8744                                          outer_headers);
8745                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8746         }
8747         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8748         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
8749         if (!icmp6_v)
8750                 return;
8751         if (!icmp6_m)
8752                 icmp6_m = &rte_flow_item_icmp6_mask;
8753         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
8754         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
8755                  icmp6_v->type & icmp6_m->type);
8756         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
8757         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
8758                  icmp6_v->code & icmp6_m->code);
8759 }
8760
8761 /**
8762  * Add ICMP item to matcher and to the value.
8763  *
8764  * @param[in, out] matcher
8765  *   Flow matcher.
8766  * @param[in, out] key
8767  *   Flow matcher value.
8768  * @param[in] item
8769  *   Flow pattern to translate.
8770  * @param[in] inner
8771  *   Item is inner pattern.
8772  */
8773 static void
8774 flow_dv_translate_item_icmp(void *matcher, void *key,
8775                             const struct rte_flow_item *item,
8776                             int inner)
8777 {
8778         const struct rte_flow_item_icmp *icmp_m = item->mask;
8779         const struct rte_flow_item_icmp *icmp_v = item->spec;
8780         uint32_t icmp_header_data_m = 0;
8781         uint32_t icmp_header_data_v = 0;
8782         void *headers_m;
8783         void *headers_v;
8784         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8785                                      misc_parameters_3);
8786         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8787         if (inner) {
8788                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8789                                          inner_headers);
8790                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8791         } else {
8792                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8793                                          outer_headers);
8794                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8795         }
8796         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8797         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
8798         if (!icmp_v)
8799                 return;
8800         if (!icmp_m)
8801                 icmp_m = &rte_flow_item_icmp_mask;
8802         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
8803                  icmp_m->hdr.icmp_type);
8804         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
8805                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
8806         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
8807                  icmp_m->hdr.icmp_code);
8808         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
8809                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
8810         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
8811         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
8812         if (icmp_header_data_m) {
8813                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
8814                 icmp_header_data_v |=
8815                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
8816                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
8817                          icmp_header_data_m);
8818                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
8819                          icmp_header_data_v & icmp_header_data_m);
8820         }
8821 }
8822
8823 /**
8824  * Add GTP item to matcher and to the value.
8825  *
8826  * @param[in, out] matcher
8827  *   Flow matcher.
8828  * @param[in, out] key
8829  *   Flow matcher value.
8830  * @param[in] item
8831  *   Flow pattern to translate.
8832  * @param[in] inner
8833  *   Item is inner pattern.
8834  */
8835 static void
8836 flow_dv_translate_item_gtp(void *matcher, void *key,
8837                            const struct rte_flow_item *item, int inner)
8838 {
8839         const struct rte_flow_item_gtp *gtp_m = item->mask;
8840         const struct rte_flow_item_gtp *gtp_v = item->spec;
8841         void *headers_m;
8842         void *headers_v;
8843         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8844                                      misc_parameters_3);
8845         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8846         uint16_t dport = RTE_GTPU_UDP_PORT;
8847
8848         if (inner) {
8849                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8850                                          inner_headers);
8851                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8852         } else {
8853                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8854                                          outer_headers);
8855                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8856         }
8857         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8858                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8859                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8860         }
8861         if (!gtp_v)
8862                 return;
8863         if (!gtp_m)
8864                 gtp_m = &rte_flow_item_gtp_mask;
8865         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
8866                  gtp_m->v_pt_rsv_flags);
8867         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
8868                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
8869         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
8870         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
8871                  gtp_v->msg_type & gtp_m->msg_type);
8872         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
8873                  rte_be_to_cpu_32(gtp_m->teid));
8874         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
8875                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
8876 }
8877
8878 /**
8879  * Add GTP PSC item to matcher.
8880  *
8881  * @param[in, out] matcher
8882  *   Flow matcher.
8883  * @param[in, out] key
8884  *   Flow matcher value.
8885  * @param[in] item
8886  *   Flow pattern to translate.
8887  */
8888 static int
8889 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
8890                                const struct rte_flow_item *item)
8891 {
8892         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
8893         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
8894         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8895                         misc_parameters_3);
8896         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8897         union {
8898                 uint32_t w32;
8899                 struct {
8900                         uint16_t seq_num;
8901                         uint8_t npdu_num;
8902                         uint8_t next_ext_header_type;
8903                 };
8904         } dw_2;
8905         uint8_t gtp_flags;
8906
8907         /* Always set E-flag match on one, regardless of GTP item settings. */
8908         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
8909         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8910         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
8911         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
8912         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8913         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
8914         /*Set next extension header type. */
8915         dw_2.seq_num = 0;
8916         dw_2.npdu_num = 0;
8917         dw_2.next_ext_header_type = 0xff;
8918         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
8919                  rte_cpu_to_be_32(dw_2.w32));
8920         dw_2.seq_num = 0;
8921         dw_2.npdu_num = 0;
8922         dw_2.next_ext_header_type = 0x85;
8923         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
8924                  rte_cpu_to_be_32(dw_2.w32));
8925         if (gtp_psc_v) {
8926                 union {
8927                         uint32_t w32;
8928                         struct {
8929                                 uint8_t len;
8930                                 uint8_t type_flags;
8931                                 uint8_t qfi;
8932                                 uint8_t reserved;
8933                         };
8934                 } dw_0;
8935
8936                 /*Set extension header PDU type and Qos. */
8937                 if (!gtp_psc_m)
8938                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
8939                 dw_0.w32 = 0;
8940                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
8941                 dw_0.qfi = gtp_psc_m->qfi;
8942                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
8943                          rte_cpu_to_be_32(dw_0.w32));
8944                 dw_0.w32 = 0;
8945                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
8946                                                         gtp_psc_m->pdu_type);
8947                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
8948                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
8949                          rte_cpu_to_be_32(dw_0.w32));
8950         }
8951         return 0;
8952 }
8953
8954 /**
8955  * Add eCPRI item to matcher and to the value.
8956  *
8957  * @param[in] dev
8958  *   The devich to configure through.
8959  * @param[in, out] matcher
8960  *   Flow matcher.
8961  * @param[in, out] key
8962  *   Flow matcher value.
8963  * @param[in] item
8964  *   Flow pattern to translate.
8965  * @param[in] samples
8966  *   Sample IDs to be used in the matching.
8967  */
8968 static void
8969 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
8970                              void *key, const struct rte_flow_item *item)
8971 {
8972         struct mlx5_priv *priv = dev->data->dev_private;
8973         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
8974         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
8975         struct rte_ecpri_common_hdr common;
8976         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
8977                                      misc_parameters_4);
8978         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
8979         uint32_t *samples;
8980         void *dw_m;
8981         void *dw_v;
8982
8983         if (!ecpri_v)
8984                 return;
8985         if (!ecpri_m)
8986                 ecpri_m = &rte_flow_item_ecpri_mask;
8987         /*
8988          * Maximal four DW samples are supported in a single matching now.
8989          * Two are used now for a eCPRI matching:
8990          * 1. Type: one byte, mask should be 0x00ff0000 in network order
8991          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
8992          *    if any.
8993          */
8994         if (!ecpri_m->hdr.common.u32)
8995                 return;
8996         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
8997         /* Need to take the whole DW as the mask to fill the entry. */
8998         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
8999                             prog_sample_field_value_0);
9000         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9001                             prog_sample_field_value_0);
9002         /* Already big endian (network order) in the header. */
9003         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9004         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9005         /* Sample#0, used for matching type, offset 0. */
9006         MLX5_SET(fte_match_set_misc4, misc4_m,
9007                  prog_sample_field_id_0, samples[0]);
9008         /* It makes no sense to set the sample ID in the mask field. */
9009         MLX5_SET(fte_match_set_misc4, misc4_v,
9010                  prog_sample_field_id_0, samples[0]);
9011         /*
9012          * Checking if message body part needs to be matched.
9013          * Some wildcard rules only matching type field should be supported.
9014          */
9015         if (ecpri_m->hdr.dummy[0]) {
9016                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9017                 switch (common.type) {
9018                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9019                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9020                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9021                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9022                                             prog_sample_field_value_1);
9023                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9024                                             prog_sample_field_value_1);
9025                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9026                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9027                                             ecpri_m->hdr.dummy[0];
9028                         /* Sample#1, to match message body, offset 4. */
9029                         MLX5_SET(fte_match_set_misc4, misc4_m,
9030                                  prog_sample_field_id_1, samples[1]);
9031                         MLX5_SET(fte_match_set_misc4, misc4_v,
9032                                  prog_sample_field_id_1, samples[1]);
9033                         break;
9034                 default:
9035                         /* Others, do not match any sample ID. */
9036                         break;
9037                 }
9038         }
9039 }
9040
9041 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9042
9043 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9044         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9045                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9046
9047 /**
9048  * Calculate flow matcher enable bitmap.
9049  *
9050  * @param match_criteria
9051  *   Pointer to flow matcher criteria.
9052  *
9053  * @return
9054  *   Bitmap of enabled fields.
9055  */
9056 static uint8_t
9057 flow_dv_matcher_enable(uint32_t *match_criteria)
9058 {
9059         uint8_t match_criteria_enable;
9060
9061         match_criteria_enable =
9062                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9063                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9064         match_criteria_enable |=
9065                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9066                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9067         match_criteria_enable |=
9068                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9069                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9070         match_criteria_enable |=
9071                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9072                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9073         match_criteria_enable |=
9074                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9075                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9076         match_criteria_enable |=
9077                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9078                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9079         return match_criteria_enable;
9080 }
9081
9082 struct mlx5_hlist_entry *
9083 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9084 {
9085         struct mlx5_dev_ctx_shared *sh = list->ctx;
9086         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9087         struct rte_eth_dev *dev = ctx->dev;
9088         struct mlx5_flow_tbl_data_entry *tbl_data;
9089         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9090         struct rte_flow_error *error = ctx->error;
9091         union mlx5_flow_tbl_key key = { .v64 = key64 };
9092         struct mlx5_flow_tbl_resource *tbl;
9093         void *domain;
9094         uint32_t idx = 0;
9095         int ret;
9096
9097         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9098         if (!tbl_data) {
9099                 rte_flow_error_set(error, ENOMEM,
9100                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9101                                    NULL,
9102                                    "cannot allocate flow table data entry");
9103                 return NULL;
9104         }
9105         tbl_data->idx = idx;
9106         tbl_data->tunnel = tt_prm->tunnel;
9107         tbl_data->group_id = tt_prm->group_id;
9108         tbl_data->external = !!tt_prm->external;
9109         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9110         tbl_data->is_egress = !!key.direction;
9111         tbl_data->is_transfer = !!key.domain;
9112         tbl_data->dummy = !!key.dummy;
9113         tbl_data->table_id = key.table_id;
9114         tbl = &tbl_data->tbl;
9115         if (key.dummy)
9116                 return &tbl_data->entry;
9117         if (key.domain)
9118                 domain = sh->fdb_domain;
9119         else if (key.direction)
9120                 domain = sh->tx_domain;
9121         else
9122                 domain = sh->rx_domain;
9123         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
9124         if (ret) {
9125                 rte_flow_error_set(error, ENOMEM,
9126                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9127                                    NULL, "cannot create flow table object");
9128                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9129                 return NULL;
9130         }
9131         if (key.table_id) {
9132                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9133                                         (tbl->obj, &tbl_data->jump.action);
9134                 if (ret) {
9135                         rte_flow_error_set(error, ENOMEM,
9136                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9137                                            NULL,
9138                                            "cannot create flow jump action");
9139                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9140                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9141                         return NULL;
9142                 }
9143         }
9144         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
9145               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
9146               key.table_id);
9147         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9148                              flow_dv_matcher_create_cb,
9149                              flow_dv_matcher_match_cb,
9150                              flow_dv_matcher_remove_cb);
9151         return &tbl_data->entry;
9152 }
9153
9154 int
9155 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9156                      struct mlx5_hlist_entry *entry, uint64_t key64,
9157                      void *cb_ctx __rte_unused)
9158 {
9159         struct mlx5_flow_tbl_data_entry *tbl_data =
9160                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9161         union mlx5_flow_tbl_key key = { .v64 = key64 };
9162
9163         return tbl_data->table_id != key.table_id ||
9164                tbl_data->dummy != key.dummy ||
9165                tbl_data->is_transfer != key.domain ||
9166                tbl_data->is_egress != key.direction;
9167 }
9168
9169 /**
9170  * Get a flow table.
9171  *
9172  * @param[in, out] dev
9173  *   Pointer to rte_eth_dev structure.
9174  * @param[in] table_id
9175  *   Table id to use.
9176  * @param[in] egress
9177  *   Direction of the table.
9178  * @param[in] transfer
9179  *   E-Switch or NIC flow.
9180  * @param[in] dummy
9181  *   Dummy entry for dv API.
9182  * @param[out] error
9183  *   pointer to error structure.
9184  *
9185  * @return
9186  *   Returns tables resource based on the index, NULL in case of failed.
9187  */
9188 struct mlx5_flow_tbl_resource *
9189 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9190                          uint32_t table_id, uint8_t egress,
9191                          uint8_t transfer,
9192                          bool external,
9193                          const struct mlx5_flow_tunnel *tunnel,
9194                          uint32_t group_id, uint8_t dummy,
9195                          struct rte_flow_error *error)
9196 {
9197         struct mlx5_priv *priv = dev->data->dev_private;
9198         union mlx5_flow_tbl_key table_key = {
9199                 {
9200                         .table_id = table_id,
9201                         .dummy = dummy,
9202                         .domain = !!transfer,
9203                         .direction = !!egress,
9204                 }
9205         };
9206         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9207                 .tunnel = tunnel,
9208                 .group_id = group_id,
9209                 .external = external,
9210         };
9211         struct mlx5_flow_cb_ctx ctx = {
9212                 .dev = dev,
9213                 .error = error,
9214                 .data = &tt_prm,
9215         };
9216         struct mlx5_hlist_entry *entry;
9217         struct mlx5_flow_tbl_data_entry *tbl_data;
9218
9219         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9220         if (!entry) {
9221                 rte_flow_error_set(error, ENOMEM,
9222                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9223                                    "cannot get table");
9224                 return NULL;
9225         }
9226         DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
9227                 table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
9228         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9229         return &tbl_data->tbl;
9230 }
9231
9232 void
9233 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
9234                       struct mlx5_hlist_entry *entry)
9235 {
9236         struct mlx5_dev_ctx_shared *sh = list->ctx;
9237         struct mlx5_flow_tbl_data_entry *tbl_data =
9238                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9239
9240         MLX5_ASSERT(entry && sh);
9241         if (tbl_data->jump.action)
9242                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
9243         if (tbl_data->tbl.obj)
9244                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
9245         if (tbl_data->tunnel_offload && tbl_data->external) {
9246                 struct mlx5_hlist_entry *he;
9247                 struct mlx5_hlist *tunnel_grp_hash;
9248                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
9249                 union tunnel_tbl_key tunnel_key = {
9250                         .tunnel_id = tbl_data->tunnel ?
9251                                         tbl_data->tunnel->tunnel_id : 0,
9252                         .group = tbl_data->group_id
9253                 };
9254                 uint32_t table_id = tbl_data->table_id;
9255
9256                 tunnel_grp_hash = tbl_data->tunnel ?
9257                                         tbl_data->tunnel->groups :
9258                                         thub->groups;
9259                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
9260                 if (he)
9261                         mlx5_hlist_unregister(tunnel_grp_hash, he);
9262                 DRV_LOG(DEBUG,
9263                         "Table_id %u tunnel %u group %u released.",
9264                         table_id,
9265                         tbl_data->tunnel ?
9266                         tbl_data->tunnel->tunnel_id : 0,
9267                         tbl_data->group_id);
9268         }
9269         mlx5_cache_list_destroy(&tbl_data->matchers);
9270         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
9271 }
9272
9273 /**
9274  * Release a flow table.
9275  *
9276  * @param[in] sh
9277  *   Pointer to device shared structure.
9278  * @param[in] tbl
9279  *   Table resource to be released.
9280  *
9281  * @return
9282  *   Returns 0 if table was released, else return 1;
9283  */
9284 static int
9285 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
9286                              struct mlx5_flow_tbl_resource *tbl)
9287 {
9288         struct mlx5_flow_tbl_data_entry *tbl_data =
9289                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9290
9291         if (!tbl)
9292                 return 0;
9293         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
9294 }
9295
9296 int
9297 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
9298                          struct mlx5_cache_entry *entry, void *cb_ctx)
9299 {
9300         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9301         struct mlx5_flow_dv_matcher *ref = ctx->data;
9302         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
9303                                                         entry);
9304
9305         return cur->crc != ref->crc ||
9306                cur->priority != ref->priority ||
9307                memcmp((const void *)cur->mask.buf,
9308                       (const void *)ref->mask.buf, ref->mask.size);
9309 }
9310
9311 struct mlx5_cache_entry *
9312 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
9313                           struct mlx5_cache_entry *entry __rte_unused,
9314                           void *cb_ctx)
9315 {
9316         struct mlx5_dev_ctx_shared *sh = list->ctx;
9317         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9318         struct mlx5_flow_dv_matcher *ref = ctx->data;
9319         struct mlx5_flow_dv_matcher *cache;
9320         struct mlx5dv_flow_matcher_attr dv_attr = {
9321                 .type = IBV_FLOW_ATTR_NORMAL,
9322                 .match_mask = (void *)&ref->mask,
9323         };
9324         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
9325                                                             typeof(*tbl), tbl);
9326         int ret;
9327
9328         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
9329         if (!cache) {
9330                 rte_flow_error_set(ctx->error, ENOMEM,
9331                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9332                                    "cannot create matcher");
9333                 return NULL;
9334         }
9335         *cache = *ref;
9336         dv_attr.match_criteria_enable =
9337                 flow_dv_matcher_enable(cache->mask.buf);
9338         dv_attr.priority = ref->priority;
9339         if (tbl->is_egress)
9340                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
9341         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
9342                                                &cache->matcher_object);
9343         if (ret) {
9344                 mlx5_free(cache);
9345                 rte_flow_error_set(ctx->error, ENOMEM,
9346                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9347                                    "cannot create matcher");
9348                 return NULL;
9349         }
9350         return &cache->entry;
9351 }
9352
9353 /**
9354  * Register the flow matcher.
9355  *
9356  * @param[in, out] dev
9357  *   Pointer to rte_eth_dev structure.
9358  * @param[in, out] matcher
9359  *   Pointer to flow matcher.
9360  * @param[in, out] key
9361  *   Pointer to flow table key.
9362  * @parm[in, out] dev_flow
9363  *   Pointer to the dev_flow.
9364  * @param[out] error
9365  *   pointer to error structure.
9366  *
9367  * @return
9368  *   0 on success otherwise -errno and errno is set.
9369  */
9370 static int
9371 flow_dv_matcher_register(struct rte_eth_dev *dev,
9372                          struct mlx5_flow_dv_matcher *ref,
9373                          union mlx5_flow_tbl_key *key,
9374                          struct mlx5_flow *dev_flow,
9375                          const struct mlx5_flow_tunnel *tunnel,
9376                          uint32_t group_id,
9377                          struct rte_flow_error *error)
9378 {
9379         struct mlx5_cache_entry *entry;
9380         struct mlx5_flow_dv_matcher *cache;
9381         struct mlx5_flow_tbl_resource *tbl;
9382         struct mlx5_flow_tbl_data_entry *tbl_data;
9383         struct mlx5_flow_cb_ctx ctx = {
9384                 .error = error,
9385                 .data = ref,
9386         };
9387
9388         /**
9389          * tunnel offload API requires this registration for cases when
9390          * tunnel match rule was inserted before tunnel set rule.
9391          */
9392         tbl = flow_dv_tbl_resource_get(dev, key->table_id,
9393                                        key->direction, key->domain,
9394                                        dev_flow->external, tunnel,
9395                                        group_id, 0, error);
9396         if (!tbl)
9397                 return -rte_errno;      /* No need to refill the error info */
9398         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9399         ref->tbl = tbl;
9400         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
9401         if (!entry) {
9402                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9403                 return rte_flow_error_set(error, ENOMEM,
9404                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9405                                           "cannot allocate ref memory");
9406         }
9407         cache = container_of(entry, typeof(*cache), entry);
9408         dev_flow->handle->dvh.matcher = cache;
9409         return 0;
9410 }
9411
9412 struct mlx5_hlist_entry *
9413 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
9414 {
9415         struct mlx5_dev_ctx_shared *sh = list->ctx;
9416         struct rte_flow_error *error = ctx;
9417         struct mlx5_flow_dv_tag_resource *entry;
9418         uint32_t idx = 0;
9419         int ret;
9420
9421         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
9422         if (!entry) {
9423                 rte_flow_error_set(error, ENOMEM,
9424                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9425                                    "cannot allocate resource memory");
9426                 return NULL;
9427         }
9428         entry->idx = idx;
9429         entry->tag_id = key;
9430         ret = mlx5_flow_os_create_flow_action_tag(key,
9431                                                   &entry->action);
9432         if (ret) {
9433                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
9434                 rte_flow_error_set(error, ENOMEM,
9435                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9436                                    NULL, "cannot create action");
9437                 return NULL;
9438         }
9439         return &entry->entry;
9440 }
9441
9442 int
9443 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
9444                      struct mlx5_hlist_entry *entry, uint64_t key,
9445                      void *cb_ctx __rte_unused)
9446 {
9447         struct mlx5_flow_dv_tag_resource *tag =
9448                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9449
9450         return key != tag->tag_id;
9451 }
9452
9453 /**
9454  * Find existing tag resource or create and register a new one.
9455  *
9456  * @param dev[in, out]
9457  *   Pointer to rte_eth_dev structure.
9458  * @param[in, out] tag_be24
9459  *   Tag value in big endian then R-shift 8.
9460  * @parm[in, out] dev_flow
9461  *   Pointer to the dev_flow.
9462  * @param[out] error
9463  *   pointer to error structure.
9464  *
9465  * @return
9466  *   0 on success otherwise -errno and errno is set.
9467  */
9468 static int
9469 flow_dv_tag_resource_register
9470                         (struct rte_eth_dev *dev,
9471                          uint32_t tag_be24,
9472                          struct mlx5_flow *dev_flow,
9473                          struct rte_flow_error *error)
9474 {
9475         struct mlx5_priv *priv = dev->data->dev_private;
9476         struct mlx5_flow_dv_tag_resource *cache_resource;
9477         struct mlx5_hlist_entry *entry;
9478
9479         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
9480         if (entry) {
9481                 cache_resource = container_of
9482                         (entry, struct mlx5_flow_dv_tag_resource, entry);
9483                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
9484                 dev_flow->dv.tag_resource = cache_resource;
9485                 return 0;
9486         }
9487         return -rte_errno;
9488 }
9489
9490 void
9491 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
9492                       struct mlx5_hlist_entry *entry)
9493 {
9494         struct mlx5_dev_ctx_shared *sh = list->ctx;
9495         struct mlx5_flow_dv_tag_resource *tag =
9496                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9497
9498         MLX5_ASSERT(tag && sh && tag->action);
9499         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
9500         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
9501         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
9502 }
9503
9504 /**
9505  * Release the tag.
9506  *
9507  * @param dev
9508  *   Pointer to Ethernet device.
9509  * @param tag_idx
9510  *   Tag index.
9511  *
9512  * @return
9513  *   1 while a reference on it exists, 0 when freed.
9514  */
9515 static int
9516 flow_dv_tag_release(struct rte_eth_dev *dev,
9517                     uint32_t tag_idx)
9518 {
9519         struct mlx5_priv *priv = dev->data->dev_private;
9520         struct mlx5_flow_dv_tag_resource *tag;
9521
9522         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
9523         if (!tag)
9524                 return 0;
9525         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
9526                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
9527         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
9528 }
9529
9530 /**
9531  * Translate port ID action to vport.
9532  *
9533  * @param[in] dev
9534  *   Pointer to rte_eth_dev structure.
9535  * @param[in] action
9536  *   Pointer to the port ID action.
9537  * @param[out] dst_port_id
9538  *   The target port ID.
9539  * @param[out] error
9540  *   Pointer to the error structure.
9541  *
9542  * @return
9543  *   0 on success, a negative errno value otherwise and rte_errno is set.
9544  */
9545 static int
9546 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
9547                                  const struct rte_flow_action *action,
9548                                  uint32_t *dst_port_id,
9549                                  struct rte_flow_error *error)
9550 {
9551         uint32_t port;
9552         struct mlx5_priv *priv;
9553         const struct rte_flow_action_port_id *conf =
9554                         (const struct rte_flow_action_port_id *)action->conf;
9555
9556         port = conf->original ? dev->data->port_id : conf->id;
9557         priv = mlx5_port_to_eswitch_info(port, false);
9558         if (!priv)
9559                 return rte_flow_error_set(error, -rte_errno,
9560                                           RTE_FLOW_ERROR_TYPE_ACTION,
9561                                           NULL,
9562                                           "No eswitch info was found for port");
9563 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
9564         /*
9565          * This parameter is transferred to
9566          * mlx5dv_dr_action_create_dest_ib_port().
9567          */
9568         *dst_port_id = priv->dev_port;
9569 #else
9570         /*
9571          * Legacy mode, no LAG configurations is supported.
9572          * This parameter is transferred to
9573          * mlx5dv_dr_action_create_dest_vport().
9574          */
9575         *dst_port_id = priv->vport_id;
9576 #endif
9577         return 0;
9578 }
9579
9580 /**
9581  * Create a counter with aging configuration.
9582  *
9583  * @param[in] dev
9584  *   Pointer to rte_eth_dev structure.
9585  * @param[out] count
9586  *   Pointer to the counter action configuration.
9587  * @param[in] age
9588  *   Pointer to the aging action configuration.
9589  *
9590  * @return
9591  *   Index to flow counter on success, 0 otherwise.
9592  */
9593 static uint32_t
9594 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
9595                                 struct mlx5_flow *dev_flow,
9596                                 const struct rte_flow_action_count *count,
9597                                 const struct rte_flow_action_age *age)
9598 {
9599         uint32_t counter;
9600         struct mlx5_age_param *age_param;
9601
9602         if (count && count->shared)
9603                 counter = flow_dv_counter_get_shared(dev, count->id);
9604         else
9605                 counter = flow_dv_counter_alloc(dev, !!age);
9606         if (!counter || age == NULL)
9607                 return counter;
9608         age_param  = flow_dv_counter_idx_get_age(dev, counter);
9609         age_param->context = age->context ? age->context :
9610                 (void *)(uintptr_t)(dev_flow->flow_idx);
9611         age_param->timeout = age->timeout;
9612         age_param->port_id = dev->data->port_id;
9613         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
9614         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
9615         return counter;
9616 }
9617
9618 /**
9619  * Add Tx queue matcher
9620  *
9621  * @param[in] dev
9622  *   Pointer to the dev struct.
9623  * @param[in, out] matcher
9624  *   Flow matcher.
9625  * @param[in, out] key
9626  *   Flow matcher value.
9627  * @param[in] item
9628  *   Flow pattern to translate.
9629  * @param[in] inner
9630  *   Item is inner pattern.
9631  */
9632 static void
9633 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
9634                                 void *matcher, void *key,
9635                                 const struct rte_flow_item *item)
9636 {
9637         const struct mlx5_rte_flow_item_tx_queue *queue_m;
9638         const struct mlx5_rte_flow_item_tx_queue *queue_v;
9639         void *misc_m =
9640                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9641         void *misc_v =
9642                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9643         struct mlx5_txq_ctrl *txq;
9644         uint32_t queue;
9645
9646
9647         queue_m = (const void *)item->mask;
9648         if (!queue_m)
9649                 return;
9650         queue_v = (const void *)item->spec;
9651         if (!queue_v)
9652                 return;
9653         txq = mlx5_txq_get(dev, queue_v->queue);
9654         if (!txq)
9655                 return;
9656         queue = txq->obj->sq->id;
9657         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
9658         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
9659                  queue & queue_m->queue);
9660         mlx5_txq_release(dev, queue_v->queue);
9661 }
9662
9663 /**
9664  * Set the hash fields according to the @p flow information.
9665  *
9666  * @param[in] dev_flow
9667  *   Pointer to the mlx5_flow.
9668  * @param[in] rss_desc
9669  *   Pointer to the mlx5_flow_rss_desc.
9670  */
9671 static void
9672 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
9673                        struct mlx5_flow_rss_desc *rss_desc)
9674 {
9675         uint64_t items = dev_flow->handle->layers;
9676         int rss_inner = 0;
9677         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
9678
9679         dev_flow->hash_fields = 0;
9680 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
9681         if (rss_desc->level >= 2) {
9682                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
9683                 rss_inner = 1;
9684         }
9685 #endif
9686         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
9687             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
9688                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
9689                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
9690                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
9691                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
9692                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
9693                         else
9694                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
9695                 }
9696         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
9697                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
9698                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
9699                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
9700                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
9701                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
9702                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
9703                         else
9704                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
9705                 }
9706         }
9707         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
9708             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
9709                 if (rss_types & ETH_RSS_UDP) {
9710                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
9711                                 dev_flow->hash_fields |=
9712                                                 IBV_RX_HASH_SRC_PORT_UDP;
9713                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
9714                                 dev_flow->hash_fields |=
9715                                                 IBV_RX_HASH_DST_PORT_UDP;
9716                         else
9717                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
9718                 }
9719         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
9720                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
9721                 if (rss_types & ETH_RSS_TCP) {
9722                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
9723                                 dev_flow->hash_fields |=
9724                                                 IBV_RX_HASH_SRC_PORT_TCP;
9725                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
9726                                 dev_flow->hash_fields |=
9727                                                 IBV_RX_HASH_DST_PORT_TCP;
9728                         else
9729                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
9730                 }
9731         }
9732 }
9733
9734 /**
9735  * Prepare an Rx Hash queue.
9736  *
9737  * @param dev
9738  *   Pointer to Ethernet device.
9739  * @param[in] dev_flow
9740  *   Pointer to the mlx5_flow.
9741  * @param[in] rss_desc
9742  *   Pointer to the mlx5_flow_rss_desc.
9743  * @param[out] hrxq_idx
9744  *   Hash Rx queue index.
9745  *
9746  * @return
9747  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
9748  */
9749 static struct mlx5_hrxq *
9750 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
9751                      struct mlx5_flow *dev_flow,
9752                      struct mlx5_flow_rss_desc *rss_desc,
9753                      uint32_t *hrxq_idx)
9754 {
9755         struct mlx5_priv *priv = dev->data->dev_private;
9756         struct mlx5_flow_handle *dh = dev_flow->handle;
9757         struct mlx5_hrxq *hrxq;
9758
9759         MLX5_ASSERT(rss_desc->queue_num);
9760         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
9761         rss_desc->hash_fields = dev_flow->hash_fields;
9762         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
9763         rss_desc->shared_rss = 0;
9764         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
9765         if (!*hrxq_idx)
9766                 return NULL;
9767         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
9768                               *hrxq_idx);
9769         return hrxq;
9770 }
9771
9772 /**
9773  * Release sample sub action resource.
9774  *
9775  * @param[in, out] dev
9776  *   Pointer to rte_eth_dev structure.
9777  * @param[in] act_res
9778  *   Pointer to sample sub action resource.
9779  */
9780 static void
9781 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
9782                                    struct mlx5_flow_sub_actions_idx *act_res)
9783 {
9784         if (act_res->rix_hrxq) {
9785                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
9786                 act_res->rix_hrxq = 0;
9787         }
9788         if (act_res->rix_encap_decap) {
9789                 flow_dv_encap_decap_resource_release(dev,
9790                                                      act_res->rix_encap_decap);
9791                 act_res->rix_encap_decap = 0;
9792         }
9793         if (act_res->rix_port_id_action) {
9794                 flow_dv_port_id_action_resource_release(dev,
9795                                                 act_res->rix_port_id_action);
9796                 act_res->rix_port_id_action = 0;
9797         }
9798         if (act_res->rix_tag) {
9799                 flow_dv_tag_release(dev, act_res->rix_tag);
9800                 act_res->rix_tag = 0;
9801         }
9802         if (act_res->rix_jump) {
9803                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
9804                 act_res->rix_jump = 0;
9805         }
9806 }
9807
9808 int
9809 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
9810                         struct mlx5_cache_entry *entry, void *cb_ctx)
9811 {
9812         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9813         struct rte_eth_dev *dev = ctx->dev;
9814         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9815         struct mlx5_flow_dv_sample_resource *cache_resource =
9816                         container_of(entry, typeof(*cache_resource), entry);
9817
9818         if (resource->ratio == cache_resource->ratio &&
9819             resource->ft_type == cache_resource->ft_type &&
9820             resource->ft_id == cache_resource->ft_id &&
9821             resource->set_action == cache_resource->set_action &&
9822             !memcmp((void *)&resource->sample_act,
9823                     (void *)&cache_resource->sample_act,
9824                     sizeof(struct mlx5_flow_sub_actions_list))) {
9825                 /*
9826                  * Existing sample action should release the prepared
9827                  * sub-actions reference counter.
9828                  */
9829                 flow_dv_sample_sub_actions_release(dev,
9830                                                 &resource->sample_idx);
9831                 return 0;
9832         }
9833         return 1;
9834 }
9835
9836 struct mlx5_cache_entry *
9837 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
9838                          struct mlx5_cache_entry *entry __rte_unused,
9839                          void *cb_ctx)
9840 {
9841         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9842         struct rte_eth_dev *dev = ctx->dev;
9843         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9844         void **sample_dv_actions = resource->sub_actions;
9845         struct mlx5_flow_dv_sample_resource *cache_resource;
9846         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
9847         struct mlx5_priv *priv = dev->data->dev_private;
9848         struct mlx5_dev_ctx_shared *sh = priv->sh;
9849         struct mlx5_flow_tbl_resource *tbl;
9850         uint32_t idx = 0;
9851         const uint32_t next_ft_step = 1;
9852         uint32_t next_ft_id = resource->ft_id + next_ft_step;
9853         uint8_t is_egress = 0;
9854         uint8_t is_transfer = 0;
9855         struct rte_flow_error *error = ctx->error;
9856
9857         /* Register new sample resource. */
9858         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
9859         if (!cache_resource) {
9860                 rte_flow_error_set(error, ENOMEM,
9861                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9862                                           NULL,
9863                                           "cannot allocate resource memory");
9864                 return NULL;
9865         }
9866         *cache_resource = *resource;
9867         /* Create normal path table level */
9868         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9869                 is_transfer = 1;
9870         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
9871                 is_egress = 1;
9872         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
9873                                         is_egress, is_transfer,
9874                                         true, NULL, 0, 0, error);
9875         if (!tbl) {
9876                 rte_flow_error_set(error, ENOMEM,
9877                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9878                                           NULL,
9879                                           "fail to create normal path table "
9880                                           "for sample");
9881                 goto error;
9882         }
9883         cache_resource->normal_path_tbl = tbl;
9884         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
9885                 if (!sh->default_miss_action) {
9886                         rte_flow_error_set(error, ENOMEM,
9887                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9888                                                 NULL,
9889                                                 "default miss action was not "
9890                                                 "created");
9891                         goto error;
9892                 }
9893                 sample_dv_actions[resource->sample_act.actions_num++] =
9894                                                 sh->default_miss_action;
9895         }
9896         /* Create a DR sample action */
9897         sampler_attr.sample_ratio = cache_resource->ratio;
9898         sampler_attr.default_next_table = tbl->obj;
9899         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
9900         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
9901                                                         &sample_dv_actions[0];
9902         sampler_attr.action = cache_resource->set_action;
9903         if (mlx5_os_flow_dr_create_flow_action_sampler
9904                         (&sampler_attr, &cache_resource->verbs_action)) {
9905                 rte_flow_error_set(error, ENOMEM,
9906                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9907                                         NULL, "cannot create sample action");
9908                 goto error;
9909         }
9910         cache_resource->idx = idx;
9911         cache_resource->dev = dev;
9912         return &cache_resource->entry;
9913 error:
9914         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
9915                 flow_dv_sample_sub_actions_release(dev,
9916                                                    &cache_resource->sample_idx);
9917         if (cache_resource->normal_path_tbl)
9918                 flow_dv_tbl_resource_release(MLX5_SH(dev),
9919                                 cache_resource->normal_path_tbl);
9920         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
9921         return NULL;
9922
9923 }
9924
9925 /**
9926  * Find existing sample resource or create and register a new one.
9927  *
9928  * @param[in, out] dev
9929  *   Pointer to rte_eth_dev structure.
9930  * @param[in] resource
9931  *   Pointer to sample resource.
9932  * @parm[in, out] dev_flow
9933  *   Pointer to the dev_flow.
9934  * @param[out] error
9935  *   pointer to error structure.
9936  *
9937  * @return
9938  *   0 on success otherwise -errno and errno is set.
9939  */
9940 static int
9941 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
9942                          struct mlx5_flow_dv_sample_resource *resource,
9943                          struct mlx5_flow *dev_flow,
9944                          struct rte_flow_error *error)
9945 {
9946         struct mlx5_flow_dv_sample_resource *cache_resource;
9947         struct mlx5_cache_entry *entry;
9948         struct mlx5_priv *priv = dev->data->dev_private;
9949         struct mlx5_flow_cb_ctx ctx = {
9950                 .dev = dev,
9951                 .error = error,
9952                 .data = resource,
9953         };
9954
9955         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
9956         if (!entry)
9957                 return -rte_errno;
9958         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9959         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
9960         dev_flow->dv.sample_res = cache_resource;
9961         return 0;
9962 }
9963
9964 int
9965 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
9966                             struct mlx5_cache_entry *entry, void *cb_ctx)
9967 {
9968         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9969         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
9970         struct rte_eth_dev *dev = ctx->dev;
9971         struct mlx5_flow_dv_dest_array_resource *cache_resource =
9972                         container_of(entry, typeof(*cache_resource), entry);
9973         uint32_t idx = 0;
9974
9975         if (resource->num_of_dest == cache_resource->num_of_dest &&
9976             resource->ft_type == cache_resource->ft_type &&
9977             !memcmp((void *)cache_resource->sample_act,
9978                     (void *)resource->sample_act,
9979                    (resource->num_of_dest *
9980                    sizeof(struct mlx5_flow_sub_actions_list)))) {
9981                 /*
9982                  * Existing sample action should release the prepared
9983                  * sub-actions reference counter.
9984                  */
9985                 for (idx = 0; idx < resource->num_of_dest; idx++)
9986                         flow_dv_sample_sub_actions_release(dev,
9987                                         &resource->sample_idx[idx]);
9988                 return 0;
9989         }
9990         return 1;
9991 }
9992
9993 struct mlx5_cache_entry *
9994 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
9995                          struct mlx5_cache_entry *entry __rte_unused,
9996                          void *cb_ctx)
9997 {
9998         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9999         struct rte_eth_dev *dev = ctx->dev;
10000         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10001         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10002         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10003         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10004         struct mlx5_priv *priv = dev->data->dev_private;
10005         struct mlx5_dev_ctx_shared *sh = priv->sh;
10006         struct mlx5_flow_sub_actions_list *sample_act;
10007         struct mlx5dv_dr_domain *domain;
10008         uint32_t idx = 0, res_idx = 0;
10009         struct rte_flow_error *error = ctx->error;
10010         uint64_t action_flags;
10011         int ret;
10012
10013         /* Register new destination array resource. */
10014         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10015                                             &res_idx);
10016         if (!cache_resource) {
10017                 rte_flow_error_set(error, ENOMEM,
10018                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10019                                           NULL,
10020                                           "cannot allocate resource memory");
10021                 return NULL;
10022         }
10023         *cache_resource = *resource;
10024         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10025                 domain = sh->fdb_domain;
10026         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10027                 domain = sh->rx_domain;
10028         else
10029                 domain = sh->tx_domain;
10030         for (idx = 0; idx < resource->num_of_dest; idx++) {
10031                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10032                                  mlx5_malloc(MLX5_MEM_ZERO,
10033                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10034                                  0, SOCKET_ID_ANY);
10035                 if (!dest_attr[idx]) {
10036                         rte_flow_error_set(error, ENOMEM,
10037                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10038                                            NULL,
10039                                            "cannot allocate resource memory");
10040                         goto error;
10041                 }
10042                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10043                 sample_act = &resource->sample_act[idx];
10044                 action_flags = sample_act->action_flags;
10045                 switch (action_flags) {
10046                 case MLX5_FLOW_ACTION_QUEUE:
10047                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10048                         break;
10049                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10050                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10051                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10052                         dest_attr[idx]->dest_reformat->reformat =
10053                                         sample_act->dr_encap_action;
10054                         dest_attr[idx]->dest_reformat->dest =
10055                                         sample_act->dr_port_id_action;
10056                         break;
10057                 case MLX5_FLOW_ACTION_PORT_ID:
10058                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10059                         break;
10060                 case MLX5_FLOW_ACTION_JUMP:
10061                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10062                         break;
10063                 default:
10064                         rte_flow_error_set(error, EINVAL,
10065                                            RTE_FLOW_ERROR_TYPE_ACTION,
10066                                            NULL,
10067                                            "unsupported actions type");
10068                         goto error;
10069                 }
10070         }
10071         /* create a dest array actioin */
10072         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10073                                                 (domain,
10074                                                  cache_resource->num_of_dest,
10075                                                  dest_attr,
10076                                                  &cache_resource->action);
10077         if (ret) {
10078                 rte_flow_error_set(error, ENOMEM,
10079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10080                                    NULL,
10081                                    "cannot create destination array action");
10082                 goto error;
10083         }
10084         cache_resource->idx = res_idx;
10085         cache_resource->dev = dev;
10086         for (idx = 0; idx < resource->num_of_dest; idx++)
10087                 mlx5_free(dest_attr[idx]);
10088         return &cache_resource->entry;
10089 error:
10090         for (idx = 0; idx < resource->num_of_dest; idx++) {
10091                 struct mlx5_flow_sub_actions_idx *act_res =
10092                                         &cache_resource->sample_idx[idx];
10093                 if (act_res->rix_hrxq &&
10094                     !mlx5_hrxq_release(dev,
10095                                 act_res->rix_hrxq))
10096                         act_res->rix_hrxq = 0;
10097                 if (act_res->rix_encap_decap &&
10098                         !flow_dv_encap_decap_resource_release(dev,
10099                                 act_res->rix_encap_decap))
10100                         act_res->rix_encap_decap = 0;
10101                 if (act_res->rix_port_id_action &&
10102                         !flow_dv_port_id_action_resource_release(dev,
10103                                 act_res->rix_port_id_action))
10104                         act_res->rix_port_id_action = 0;
10105                 if (act_res->rix_jump &&
10106                         !flow_dv_jump_tbl_resource_release(dev,
10107                                 act_res->rix_jump))
10108                         act_res->rix_jump = 0;
10109                 if (dest_attr[idx])
10110                         mlx5_free(dest_attr[idx]);
10111         }
10112
10113         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10114         return NULL;
10115 }
10116
10117 /**
10118  * Find existing destination array resource or create and register a new one.
10119  *
10120  * @param[in, out] dev
10121  *   Pointer to rte_eth_dev structure.
10122  * @param[in] resource
10123  *   Pointer to destination array resource.
10124  * @parm[in, out] dev_flow
10125  *   Pointer to the dev_flow.
10126  * @param[out] error
10127  *   pointer to error structure.
10128  *
10129  * @return
10130  *   0 on success otherwise -errno and errno is set.
10131  */
10132 static int
10133 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10134                          struct mlx5_flow_dv_dest_array_resource *resource,
10135                          struct mlx5_flow *dev_flow,
10136                          struct rte_flow_error *error)
10137 {
10138         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10139         struct mlx5_priv *priv = dev->data->dev_private;
10140         struct mlx5_cache_entry *entry;
10141         struct mlx5_flow_cb_ctx ctx = {
10142                 .dev = dev,
10143                 .error = error,
10144                 .data = resource,
10145         };
10146
10147         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10148         if (!entry)
10149                 return -rte_errno;
10150         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10151         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10152         dev_flow->dv.dest_array_res = cache_resource;
10153         return 0;
10154 }
10155
10156 /**
10157  * Convert Sample action to DV specification.
10158  *
10159  * @param[in] dev
10160  *   Pointer to rte_eth_dev structure.
10161  * @param[in] action
10162  *   Pointer to sample action structure.
10163  * @param[in, out] dev_flow
10164  *   Pointer to the mlx5_flow.
10165  * @param[in] attr
10166  *   Pointer to the flow attributes.
10167  * @param[in, out] num_of_dest
10168  *   Pointer to the num of destination.
10169  * @param[in, out] sample_actions
10170  *   Pointer to sample actions list.
10171  * @param[in, out] res
10172  *   Pointer to sample resource.
10173  * @param[out] error
10174  *   Pointer to the error structure.
10175  *
10176  * @return
10177  *   0 on success, a negative errno value otherwise and rte_errno is set.
10178  */
10179 static int
10180 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10181                                 const struct rte_flow_action_sample *action,
10182                                 struct mlx5_flow *dev_flow,
10183                                 const struct rte_flow_attr *attr,
10184                                 uint32_t *num_of_dest,
10185                                 void **sample_actions,
10186                                 struct mlx5_flow_dv_sample_resource *res,
10187                                 struct rte_flow_error *error)
10188 {
10189         struct mlx5_priv *priv = dev->data->dev_private;
10190         const struct rte_flow_action *sub_actions;
10191         struct mlx5_flow_sub_actions_list *sample_act;
10192         struct mlx5_flow_sub_actions_idx *sample_idx;
10193         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10194         struct rte_flow *flow = dev_flow->flow;
10195         struct mlx5_flow_rss_desc *rss_desc;
10196         uint64_t action_flags = 0;
10197
10198         MLX5_ASSERT(wks);
10199         rss_desc = &wks->rss_desc;
10200         sample_act = &res->sample_act;
10201         sample_idx = &res->sample_idx;
10202         res->ratio = action->ratio;
10203         sub_actions = action->actions;
10204         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10205                 int type = sub_actions->type;
10206                 uint32_t pre_rix = 0;
10207                 void *pre_r;
10208                 switch (type) {
10209                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10210                 {
10211                         const struct rte_flow_action_queue *queue;
10212                         struct mlx5_hrxq *hrxq;
10213                         uint32_t hrxq_idx;
10214
10215                         queue = sub_actions->conf;
10216                         rss_desc->queue_num = 1;
10217                         rss_desc->queue[0] = queue->index;
10218                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10219                                                     rss_desc, &hrxq_idx);
10220                         if (!hrxq)
10221                                 return rte_flow_error_set
10222                                         (error, rte_errno,
10223                                          RTE_FLOW_ERROR_TYPE_ACTION,
10224                                          NULL,
10225                                          "cannot create fate queue");
10226                         sample_act->dr_queue_action = hrxq->action;
10227                         sample_idx->rix_hrxq = hrxq_idx;
10228                         sample_actions[sample_act->actions_num++] =
10229                                                 hrxq->action;
10230                         (*num_of_dest)++;
10231                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10232                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10233                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10234                         dev_flow->handle->fate_action =
10235                                         MLX5_FLOW_FATE_QUEUE;
10236                         break;
10237                 }
10238                 case RTE_FLOW_ACTION_TYPE_RSS:
10239                 {
10240                         struct mlx5_hrxq *hrxq;
10241                         uint32_t hrxq_idx;
10242                         const struct rte_flow_action_rss *rss;
10243                         const uint8_t *rss_key;
10244
10245                         rss = sub_actions->conf;
10246                         memcpy(rss_desc->queue, rss->queue,
10247                                rss->queue_num * sizeof(uint16_t));
10248                         rss_desc->queue_num = rss->queue_num;
10249                         /* NULL RSS key indicates default RSS key. */
10250                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10251                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10252                         /*
10253                          * rss->level and rss.types should be set in advance
10254                          * when expanding items for RSS.
10255                          */
10256                         flow_dv_hashfields_set(dev_flow, rss_desc);
10257                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10258                                                     rss_desc, &hrxq_idx);
10259                         if (!hrxq)
10260                                 return rte_flow_error_set
10261                                         (error, rte_errno,
10262                                          RTE_FLOW_ERROR_TYPE_ACTION,
10263                                          NULL,
10264                                          "cannot create fate queue");
10265                         sample_act->dr_queue_action = hrxq->action;
10266                         sample_idx->rix_hrxq = hrxq_idx;
10267                         sample_actions[sample_act->actions_num++] =
10268                                                 hrxq->action;
10269                         (*num_of_dest)++;
10270                         action_flags |= MLX5_FLOW_ACTION_RSS;
10271                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10272                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10273                         dev_flow->handle->fate_action =
10274                                         MLX5_FLOW_FATE_QUEUE;
10275                         break;
10276                 }
10277                 case RTE_FLOW_ACTION_TYPE_MARK:
10278                 {
10279                         uint32_t tag_be = mlx5_flow_mark_set
10280                                 (((const struct rte_flow_action_mark *)
10281                                 (sub_actions->conf))->id);
10282
10283                         dev_flow->handle->mark = 1;
10284                         pre_rix = dev_flow->handle->dvh.rix_tag;
10285                         /* Save the mark resource before sample */
10286                         pre_r = dev_flow->dv.tag_resource;
10287                         if (flow_dv_tag_resource_register(dev, tag_be,
10288                                                   dev_flow, error))
10289                                 return -rte_errno;
10290                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10291                         sample_act->dr_tag_action =
10292                                 dev_flow->dv.tag_resource->action;
10293                         sample_idx->rix_tag =
10294                                 dev_flow->handle->dvh.rix_tag;
10295                         sample_actions[sample_act->actions_num++] =
10296                                                 sample_act->dr_tag_action;
10297                         /* Recover the mark resource after sample */
10298                         dev_flow->dv.tag_resource = pre_r;
10299                         dev_flow->handle->dvh.rix_tag = pre_rix;
10300                         action_flags |= MLX5_FLOW_ACTION_MARK;
10301                         break;
10302                 }
10303                 case RTE_FLOW_ACTION_TYPE_COUNT:
10304                 {
10305                         if (!flow->counter) {
10306                                 flow->counter =
10307                                         flow_dv_translate_create_counter(dev,
10308                                                 dev_flow, sub_actions->conf,
10309                                                 0);
10310                                 if (!flow->counter)
10311                                         return rte_flow_error_set
10312                                                 (error, rte_errno,
10313                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10314                                                 NULL,
10315                                                 "cannot create counter"
10316                                                 " object.");
10317                         }
10318                         sample_act->dr_cnt_action =
10319                                   (flow_dv_counter_get_by_idx(dev,
10320                                   flow->counter, NULL))->action;
10321                         sample_actions[sample_act->actions_num++] =
10322                                                 sample_act->dr_cnt_action;
10323                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10324                         break;
10325                 }
10326                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10327                 {
10328                         struct mlx5_flow_dv_port_id_action_resource
10329                                         port_id_resource;
10330                         uint32_t port_id = 0;
10331
10332                         memset(&port_id_resource, 0, sizeof(port_id_resource));
10333                         /* Save the port id resource before sample */
10334                         pre_rix = dev_flow->handle->rix_port_id_action;
10335                         pre_r = dev_flow->dv.port_id_action;
10336                         if (flow_dv_translate_action_port_id(dev, sub_actions,
10337                                                              &port_id, error))
10338                                 return -rte_errno;
10339                         port_id_resource.port_id = port_id;
10340                         if (flow_dv_port_id_action_resource_register
10341                             (dev, &port_id_resource, dev_flow, error))
10342                                 return -rte_errno;
10343                         sample_act->dr_port_id_action =
10344                                 dev_flow->dv.port_id_action->action;
10345                         sample_idx->rix_port_id_action =
10346                                 dev_flow->handle->rix_port_id_action;
10347                         sample_actions[sample_act->actions_num++] =
10348                                                 sample_act->dr_port_id_action;
10349                         /* Recover the port id resource after sample */
10350                         dev_flow->dv.port_id_action = pre_r;
10351                         dev_flow->handle->rix_port_id_action = pre_rix;
10352                         (*num_of_dest)++;
10353                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10354                         break;
10355                 }
10356                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10357                         /* Save the encap resource before sample */
10358                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
10359                         pre_r = dev_flow->dv.encap_decap;
10360                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
10361                                                            dev_flow,
10362                                                            attr->transfer,
10363                                                            error))
10364                                 return -rte_errno;
10365                         sample_act->dr_encap_action =
10366                                 dev_flow->dv.encap_decap->action;
10367                         sample_idx->rix_encap_decap =
10368                                 dev_flow->handle->dvh.rix_encap_decap;
10369                         sample_actions[sample_act->actions_num++] =
10370                                                 sample_act->dr_encap_action;
10371                         /* Recover the encap resource after sample */
10372                         dev_flow->dv.encap_decap = pre_r;
10373                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
10374                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10375                         break;
10376                 default:
10377                         return rte_flow_error_set(error, EINVAL,
10378                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10379                                 NULL,
10380                                 "Not support for sampler action");
10381                 }
10382         }
10383         sample_act->action_flags = action_flags;
10384         res->ft_id = dev_flow->dv.group;
10385         if (attr->transfer) {
10386                 union {
10387                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
10388                         uint64_t set_action;
10389                 } action_ctx = { .set_action = 0 };
10390
10391                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10392                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
10393                          MLX5_MODIFICATION_TYPE_SET);
10394                 MLX5_SET(set_action_in, action_ctx.action_in, field,
10395                          MLX5_MODI_META_REG_C_0);
10396                 MLX5_SET(set_action_in, action_ctx.action_in, data,
10397                          priv->vport_meta_tag);
10398                 res->set_action = action_ctx.set_action;
10399         } else if (attr->ingress) {
10400                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10401         } else {
10402                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
10403         }
10404         return 0;
10405 }
10406
10407 /**
10408  * Convert Sample action to DV specification.
10409  *
10410  * @param[in] dev
10411  *   Pointer to rte_eth_dev structure.
10412  * @param[in, out] dev_flow
10413  *   Pointer to the mlx5_flow.
10414  * @param[in] num_of_dest
10415  *   The num of destination.
10416  * @param[in, out] res
10417  *   Pointer to sample resource.
10418  * @param[in, out] mdest_res
10419  *   Pointer to destination array resource.
10420  * @param[in] sample_actions
10421  *   Pointer to sample path actions list.
10422  * @param[in] action_flags
10423  *   Holds the actions detected until now.
10424  * @param[out] error
10425  *   Pointer to the error structure.
10426  *
10427  * @return
10428  *   0 on success, a negative errno value otherwise and rte_errno is set.
10429  */
10430 static int
10431 flow_dv_create_action_sample(struct rte_eth_dev *dev,
10432                              struct mlx5_flow *dev_flow,
10433                              uint32_t num_of_dest,
10434                              struct mlx5_flow_dv_sample_resource *res,
10435                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
10436                              void **sample_actions,
10437                              uint64_t action_flags,
10438                              struct rte_flow_error *error)
10439 {
10440         /* update normal path action resource into last index of array */
10441         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
10442         struct mlx5_flow_sub_actions_list *sample_act =
10443                                         &mdest_res->sample_act[dest_index];
10444         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10445         struct mlx5_flow_rss_desc *rss_desc;
10446         uint32_t normal_idx = 0;
10447         struct mlx5_hrxq *hrxq;
10448         uint32_t hrxq_idx;
10449
10450         MLX5_ASSERT(wks);
10451         rss_desc = &wks->rss_desc;
10452         if (num_of_dest > 1) {
10453                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
10454                         /* Handle QP action for mirroring */
10455                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10456                                                     rss_desc, &hrxq_idx);
10457                         if (!hrxq)
10458                                 return rte_flow_error_set
10459                                      (error, rte_errno,
10460                                       RTE_FLOW_ERROR_TYPE_ACTION,
10461                                       NULL,
10462                                       "cannot create rx queue");
10463                         normal_idx++;
10464                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
10465                         sample_act->dr_queue_action = hrxq->action;
10466                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10467                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10468                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10469                 }
10470                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
10471                         normal_idx++;
10472                         mdest_res->sample_idx[dest_index].rix_encap_decap =
10473                                 dev_flow->handle->dvh.rix_encap_decap;
10474                         sample_act->dr_encap_action =
10475                                 dev_flow->dv.encap_decap->action;
10476                 }
10477                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
10478                         normal_idx++;
10479                         mdest_res->sample_idx[dest_index].rix_port_id_action =
10480                                 dev_flow->handle->rix_port_id_action;
10481                         sample_act->dr_port_id_action =
10482                                 dev_flow->dv.port_id_action->action;
10483                 }
10484                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
10485                         normal_idx++;
10486                         mdest_res->sample_idx[dest_index].rix_jump =
10487                                 dev_flow->handle->rix_jump;
10488                         sample_act->dr_jump_action =
10489                                 dev_flow->dv.jump->action;
10490                         dev_flow->handle->rix_jump = 0;
10491                 }
10492                 sample_act->actions_num = normal_idx;
10493                 /* update sample action resource into first index of array */
10494                 mdest_res->ft_type = res->ft_type;
10495                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
10496                                 sizeof(struct mlx5_flow_sub_actions_idx));
10497                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
10498                                 sizeof(struct mlx5_flow_sub_actions_list));
10499                 mdest_res->num_of_dest = num_of_dest;
10500                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
10501                                                          dev_flow, error))
10502                         return rte_flow_error_set(error, EINVAL,
10503                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10504                                                   NULL, "can't create sample "
10505                                                   "action");
10506         } else {
10507                 res->sub_actions = sample_actions;
10508                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
10509                         return rte_flow_error_set(error, EINVAL,
10510                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10511                                                   NULL,
10512                                                   "can't create sample action");
10513         }
10514         return 0;
10515 }
10516
10517 /**
10518  * Remove an ASO age action from age actions list.
10519  *
10520  * @param[in] dev
10521  *   Pointer to the Ethernet device structure.
10522  * @param[in] age
10523  *   Pointer to the aso age action handler.
10524  */
10525 static void
10526 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
10527                                 struct mlx5_aso_age_action *age)
10528 {
10529         struct mlx5_age_info *age_info;
10530         struct mlx5_age_param *age_param = &age->age_params;
10531         struct mlx5_priv *priv = dev->data->dev_private;
10532         uint16_t expected = AGE_CANDIDATE;
10533
10534         age_info = GET_PORT_AGE_INFO(priv);
10535         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
10536                                          AGE_FREE, false, __ATOMIC_RELAXED,
10537                                          __ATOMIC_RELAXED)) {
10538                 /**
10539                  * We need the lock even it is age timeout,
10540                  * since age action may still in process.
10541                  */
10542                 rte_spinlock_lock(&age_info->aged_sl);
10543                 LIST_REMOVE(age, next);
10544                 rte_spinlock_unlock(&age_info->aged_sl);
10545                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
10546         }
10547 }
10548
10549 /**
10550  * Release an ASO age action.
10551  *
10552  * @param[in] dev
10553  *   Pointer to the Ethernet device structure.
10554  * @param[in] age_idx
10555  *   Index of ASO age action to release.
10556  * @param[in] flow
10557  *   True if the release operation is during flow destroy operation.
10558  *   False if the release operation is during action destroy operation.
10559  *
10560  * @return
10561  *   0 when age action was removed, otherwise the number of references.
10562  */
10563 static int
10564 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
10565 {
10566         struct mlx5_priv *priv = dev->data->dev_private;
10567         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10568         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
10569         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
10570
10571         if (!ret) {
10572                 flow_dv_aso_age_remove_from_age(dev, age);
10573                 rte_spinlock_lock(&mng->free_sl);
10574                 LIST_INSERT_HEAD(&mng->free, age, next);
10575                 rte_spinlock_unlock(&mng->free_sl);
10576         }
10577         return ret;
10578 }
10579
10580 /**
10581  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
10582  *
10583  * @param[in] dev
10584  *   Pointer to the Ethernet device structure.
10585  *
10586  * @return
10587  *   0 on success, otherwise negative errno value and rte_errno is set.
10588  */
10589 static int
10590 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
10591 {
10592         struct mlx5_priv *priv = dev->data->dev_private;
10593         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10594         void *old_pools = mng->pools;
10595         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
10596         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
10597         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
10598
10599         if (!pools) {
10600                 rte_errno = ENOMEM;
10601                 return -ENOMEM;
10602         }
10603         if (old_pools) {
10604                 memcpy(pools, old_pools,
10605                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
10606                 mlx5_free(old_pools);
10607         } else {
10608                 /* First ASO flow hit allocation - starting ASO data-path. */
10609                 int ret = mlx5_aso_queue_start(priv->sh);
10610
10611                 if (ret) {
10612                         mlx5_free(pools);
10613                         return ret;
10614                 }
10615         }
10616         mng->n = resize;
10617         mng->pools = pools;
10618         return 0;
10619 }
10620
10621 /**
10622  * Create and initialize a new ASO aging pool.
10623  *
10624  * @param[in] dev
10625  *   Pointer to the Ethernet device structure.
10626  * @param[out] age_free
10627  *   Where to put the pointer of a new age action.
10628  *
10629  * @return
10630  *   The age actions pool pointer and @p age_free is set on success,
10631  *   NULL otherwise and rte_errno is set.
10632  */
10633 static struct mlx5_aso_age_pool *
10634 flow_dv_age_pool_create(struct rte_eth_dev *dev,
10635                         struct mlx5_aso_age_action **age_free)
10636 {
10637         struct mlx5_priv *priv = dev->data->dev_private;
10638         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10639         struct mlx5_aso_age_pool *pool = NULL;
10640         struct mlx5_devx_obj *obj = NULL;
10641         uint32_t i;
10642
10643         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
10644                                                     priv->sh->pdn);
10645         if (!obj) {
10646                 rte_errno = ENODATA;
10647                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
10648                 return NULL;
10649         }
10650         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10651         if (!pool) {
10652                 claim_zero(mlx5_devx_cmd_destroy(obj));
10653                 rte_errno = ENOMEM;
10654                 return NULL;
10655         }
10656         pool->flow_hit_aso_obj = obj;
10657         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
10658         rte_spinlock_lock(&mng->resize_sl);
10659         pool->index = mng->next;
10660         /* Resize pools array if there is no room for the new pool in it. */
10661         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
10662                 claim_zero(mlx5_devx_cmd_destroy(obj));
10663                 mlx5_free(pool);
10664                 rte_spinlock_unlock(&mng->resize_sl);
10665                 return NULL;
10666         }
10667         mng->pools[pool->index] = pool;
10668         mng->next++;
10669         rte_spinlock_unlock(&mng->resize_sl);
10670         /* Assign the first action in the new pool, the rest go to free list. */
10671         *age_free = &pool->actions[0];
10672         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
10673                 pool->actions[i].offset = i;
10674                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
10675         }
10676         return pool;
10677 }
10678
10679 /**
10680  * Allocate a ASO aging bit.
10681  *
10682  * @param[in] dev
10683  *   Pointer to the Ethernet device structure.
10684  * @param[out] error
10685  *   Pointer to the error structure.
10686  *
10687  * @return
10688  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
10689  */
10690 static uint32_t
10691 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
10692 {
10693         struct mlx5_priv *priv = dev->data->dev_private;
10694         const struct mlx5_aso_age_pool *pool;
10695         struct mlx5_aso_age_action *age_free = NULL;
10696         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10697
10698         MLX5_ASSERT(mng);
10699         /* Try to get the next free age action bit. */
10700         rte_spinlock_lock(&mng->free_sl);
10701         age_free = LIST_FIRST(&mng->free);
10702         if (age_free) {
10703                 LIST_REMOVE(age_free, next);
10704         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
10705                 rte_spinlock_unlock(&mng->free_sl);
10706                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
10707                                    NULL, "failed to create ASO age pool");
10708                 return 0; /* 0 is an error. */
10709         }
10710         rte_spinlock_unlock(&mng->free_sl);
10711         pool = container_of
10712           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
10713                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
10714                                                                        actions);
10715         if (!age_free->dr_action) {
10716                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
10717                                                  error);
10718
10719                 if (reg_c < 0) {
10720                         rte_flow_error_set(error, rte_errno,
10721                                            RTE_FLOW_ERROR_TYPE_ACTION,
10722                                            NULL, "failed to get reg_c "
10723                                            "for ASO flow hit");
10724                         return 0; /* 0 is an error. */
10725                 }
10726 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
10727                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
10728                                 (priv->sh->rx_domain,
10729                                  pool->flow_hit_aso_obj->obj, age_free->offset,
10730                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
10731                                  (reg_c - REG_C_0));
10732 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
10733                 if (!age_free->dr_action) {
10734                         rte_errno = errno;
10735                         rte_spinlock_lock(&mng->free_sl);
10736                         LIST_INSERT_HEAD(&mng->free, age_free, next);
10737                         rte_spinlock_unlock(&mng->free_sl);
10738                         rte_flow_error_set(error, rte_errno,
10739                                            RTE_FLOW_ERROR_TYPE_ACTION,
10740                                            NULL, "failed to create ASO "
10741                                            "flow hit action");
10742                         return 0; /* 0 is an error. */
10743                 }
10744         }
10745         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
10746         return pool->index | ((age_free->offset + 1) << 16);
10747 }
10748
10749 /**
10750  * Create a age action using ASO mechanism.
10751  *
10752  * @param[in] dev
10753  *   Pointer to rte_eth_dev structure.
10754  * @param[in] age
10755  *   Pointer to the aging action configuration.
10756  * @param[out] error
10757  *   Pointer to the error structure.
10758  *
10759  * @return
10760  *   Index to flow counter on success, 0 otherwise.
10761  */
10762 static uint32_t
10763 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
10764                                  const struct rte_flow_action_age *age,
10765                                  struct rte_flow_error *error)
10766 {
10767         uint32_t age_idx = 0;
10768         struct mlx5_aso_age_action *aso_age;
10769
10770         age_idx = flow_dv_aso_age_alloc(dev, error);
10771         if (!age_idx)
10772                 return 0;
10773         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
10774         aso_age->age_params.context = age->context;
10775         aso_age->age_params.timeout = age->timeout;
10776         aso_age->age_params.port_id = dev->data->port_id;
10777         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
10778                          __ATOMIC_RELAXED);
10779         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
10780                          __ATOMIC_RELAXED);
10781         return age_idx;
10782 }
10783
10784 /**
10785  * Fill the flow with DV spec, lock free
10786  * (mutex should be acquired by caller).
10787  *
10788  * @param[in] dev
10789  *   Pointer to rte_eth_dev structure.
10790  * @param[in, out] dev_flow
10791  *   Pointer to the sub flow.
10792  * @param[in] attr
10793  *   Pointer to the flow attributes.
10794  * @param[in] items
10795  *   Pointer to the list of items.
10796  * @param[in] actions
10797  *   Pointer to the list of actions.
10798  * @param[out] error
10799  *   Pointer to the error structure.
10800  *
10801  * @return
10802  *   0 on success, a negative errno value otherwise and rte_errno is set.
10803  */
10804 static int
10805 flow_dv_translate(struct rte_eth_dev *dev,
10806                   struct mlx5_flow *dev_flow,
10807                   const struct rte_flow_attr *attr,
10808                   const struct rte_flow_item items[],
10809                   const struct rte_flow_action actions[],
10810                   struct rte_flow_error *error)
10811 {
10812         struct mlx5_priv *priv = dev->data->dev_private;
10813         struct mlx5_dev_config *dev_conf = &priv->config;
10814         struct rte_flow *flow = dev_flow->flow;
10815         struct mlx5_flow_handle *handle = dev_flow->handle;
10816         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10817         struct mlx5_flow_rss_desc *rss_desc;
10818         uint64_t item_flags = 0;
10819         uint64_t last_item = 0;
10820         uint64_t action_flags = 0;
10821         struct mlx5_flow_dv_matcher matcher = {
10822                 .mask = {
10823                         .size = sizeof(matcher.mask.buf) -
10824                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
10825                 },
10826         };
10827         int actions_n = 0;
10828         bool actions_end = false;
10829         union {
10830                 struct mlx5_flow_dv_modify_hdr_resource res;
10831                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
10832                             sizeof(struct mlx5_modification_cmd) *
10833                             (MLX5_MAX_MODIFY_NUM + 1)];
10834         } mhdr_dummy;
10835         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
10836         const struct rte_flow_action_count *count = NULL;
10837         const struct rte_flow_action_age *age = NULL;
10838         union flow_dv_attr flow_attr = { .attr = 0 };
10839         uint32_t tag_be;
10840         union mlx5_flow_tbl_key tbl_key;
10841         uint32_t modify_action_position = UINT32_MAX;
10842         void *match_mask = matcher.mask.buf;
10843         void *match_value = dev_flow->dv.value.buf;
10844         uint8_t next_protocol = 0xff;
10845         struct rte_vlan_hdr vlan = { 0 };
10846         struct mlx5_flow_dv_dest_array_resource mdest_res;
10847         struct mlx5_flow_dv_sample_resource sample_res;
10848         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10849         const struct rte_flow_action_sample *sample = NULL;
10850         struct mlx5_flow_sub_actions_list *sample_act;
10851         uint32_t sample_act_pos = UINT32_MAX;
10852         uint32_t num_of_dest = 0;
10853         int tmp_actions_n = 0;
10854         uint32_t table;
10855         int ret = 0;
10856         const struct mlx5_flow_tunnel *tunnel;
10857         struct flow_grp_info grp_info = {
10858                 .external = !!dev_flow->external,
10859                 .transfer = !!attr->transfer,
10860                 .fdb_def_rule = !!priv->fdb_def_rule,
10861                 .skip_scale = dev_flow->skip_scale &
10862                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
10863         };
10864
10865         if (!wks)
10866                 return rte_flow_error_set(error, ENOMEM,
10867                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10868                                           NULL,
10869                                           "failed to push flow workspace");
10870         rss_desc = &wks->rss_desc;
10871         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
10872         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
10873         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10874                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10875         /* update normal path action resource into last index of array */
10876         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
10877         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
10878                  flow_items_to_tunnel(items) :
10879                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
10880                  flow_actions_to_tunnel(actions) :
10881                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
10882         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10883                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10884         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
10885                                 (dev, tunnel, attr, items, actions);
10886         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
10887                                        &grp_info, error);
10888         if (ret)
10889                 return ret;
10890         dev_flow->dv.group = table;
10891         if (attr->transfer)
10892                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10893         /* number of actions must be set to 0 in case of dirty stack. */
10894         mhdr_res->actions_num = 0;
10895         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
10896                 /*
10897                  * do not add decap action if match rule drops packet
10898                  * HW rejects rules with decap & drop
10899                  *
10900                  * if tunnel match rule was inserted before matching tunnel set
10901                  * rule flow table used in the match rule must be registered.
10902                  * current implementation handles that in the
10903                  * flow_dv_match_register() at the function end.
10904                  */
10905                 bool add_decap = true;
10906                 const struct rte_flow_action *ptr = actions;
10907
10908                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
10909                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
10910                                 add_decap = false;
10911                                 break;
10912                         }
10913                 }
10914                 if (add_decap) {
10915                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10916                                                            attr->transfer,
10917                                                            error))
10918                                 return -rte_errno;
10919                         dev_flow->dv.actions[actions_n++] =
10920                                         dev_flow->dv.encap_decap->action;
10921                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10922                 }
10923         }
10924         for (; !actions_end ; actions++) {
10925                 const struct rte_flow_action_queue *queue;
10926                 const struct rte_flow_action_rss *rss;
10927                 const struct rte_flow_action *action = actions;
10928                 const uint8_t *rss_key;
10929                 const struct rte_flow_action_meter *mtr;
10930                 struct mlx5_flow_tbl_resource *tbl;
10931                 struct mlx5_aso_age_action *age_act;
10932                 uint32_t port_id = 0;
10933                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
10934                 int action_type = actions->type;
10935                 const struct rte_flow_action *found_action = NULL;
10936                 struct mlx5_flow_meter *fm = NULL;
10937                 uint32_t jump_group = 0;
10938
10939                 if (!mlx5_flow_os_action_supported(action_type))
10940                         return rte_flow_error_set(error, ENOTSUP,
10941                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10942                                                   actions,
10943                                                   "action not supported");
10944                 switch (action_type) {
10945                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
10946                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
10947                         break;
10948                 case RTE_FLOW_ACTION_TYPE_VOID:
10949                         break;
10950                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10951                         if (flow_dv_translate_action_port_id(dev, action,
10952                                                              &port_id, error))
10953                                 return -rte_errno;
10954                         port_id_resource.port_id = port_id;
10955                         MLX5_ASSERT(!handle->rix_port_id_action);
10956                         if (flow_dv_port_id_action_resource_register
10957                             (dev, &port_id_resource, dev_flow, error))
10958                                 return -rte_errno;
10959                         dev_flow->dv.actions[actions_n++] =
10960                                         dev_flow->dv.port_id_action->action;
10961                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10962                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
10963                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10964                         num_of_dest++;
10965                         break;
10966                 case RTE_FLOW_ACTION_TYPE_FLAG:
10967                         action_flags |= MLX5_FLOW_ACTION_FLAG;
10968                         dev_flow->handle->mark = 1;
10969                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10970                                 struct rte_flow_action_mark mark = {
10971                                         .id = MLX5_FLOW_MARK_DEFAULT,
10972                                 };
10973
10974                                 if (flow_dv_convert_action_mark(dev, &mark,
10975                                                                 mhdr_res,
10976                                                                 error))
10977                                         return -rte_errno;
10978                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
10979                                 break;
10980                         }
10981                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
10982                         /*
10983                          * Only one FLAG or MARK is supported per device flow
10984                          * right now. So the pointer to the tag resource must be
10985                          * zero before the register process.
10986                          */
10987                         MLX5_ASSERT(!handle->dvh.rix_tag);
10988                         if (flow_dv_tag_resource_register(dev, tag_be,
10989                                                           dev_flow, error))
10990                                 return -rte_errno;
10991                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10992                         dev_flow->dv.actions[actions_n++] =
10993                                         dev_flow->dv.tag_resource->action;
10994                         break;
10995                 case RTE_FLOW_ACTION_TYPE_MARK:
10996                         action_flags |= MLX5_FLOW_ACTION_MARK;
10997                         dev_flow->handle->mark = 1;
10998                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10999                                 const struct rte_flow_action_mark *mark =
11000                                         (const struct rte_flow_action_mark *)
11001                                                 actions->conf;
11002
11003                                 if (flow_dv_convert_action_mark(dev, mark,
11004                                                                 mhdr_res,
11005                                                                 error))
11006                                         return -rte_errno;
11007                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11008                                 break;
11009                         }
11010                         /* Fall-through */
11011                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
11012                         /* Legacy (non-extensive) MARK action. */
11013                         tag_be = mlx5_flow_mark_set
11014                               (((const struct rte_flow_action_mark *)
11015                                (actions->conf))->id);
11016                         MLX5_ASSERT(!handle->dvh.rix_tag);
11017                         if (flow_dv_tag_resource_register(dev, tag_be,
11018                                                           dev_flow, error))
11019                                 return -rte_errno;
11020                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11021                         dev_flow->dv.actions[actions_n++] =
11022                                         dev_flow->dv.tag_resource->action;
11023                         break;
11024                 case RTE_FLOW_ACTION_TYPE_SET_META:
11025                         if (flow_dv_convert_action_set_meta
11026                                 (dev, mhdr_res, attr,
11027                                  (const struct rte_flow_action_set_meta *)
11028                                   actions->conf, error))
11029                                 return -rte_errno;
11030                         action_flags |= MLX5_FLOW_ACTION_SET_META;
11031                         break;
11032                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
11033                         if (flow_dv_convert_action_set_tag
11034                                 (dev, mhdr_res,
11035                                  (const struct rte_flow_action_set_tag *)
11036                                   actions->conf, error))
11037                                 return -rte_errno;
11038                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11039                         break;
11040                 case RTE_FLOW_ACTION_TYPE_DROP:
11041                         action_flags |= MLX5_FLOW_ACTION_DROP;
11042                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
11043                         break;
11044                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11045                         queue = actions->conf;
11046                         rss_desc->queue_num = 1;
11047                         rss_desc->queue[0] = queue->index;
11048                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11049                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11050                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
11051                         num_of_dest++;
11052                         break;
11053                 case RTE_FLOW_ACTION_TYPE_RSS:
11054                         rss = actions->conf;
11055                         memcpy(rss_desc->queue, rss->queue,
11056                                rss->queue_num * sizeof(uint16_t));
11057                         rss_desc->queue_num = rss->queue_num;
11058                         /* NULL RSS key indicates default RSS key. */
11059                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11060                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11061                         /*
11062                          * rss->level and rss.types should be set in advance
11063                          * when expanding items for RSS.
11064                          */
11065                         action_flags |= MLX5_FLOW_ACTION_RSS;
11066                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
11067                                 MLX5_FLOW_FATE_SHARED_RSS :
11068                                 MLX5_FLOW_FATE_QUEUE;
11069                         break;
11070                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
11071                         flow->age = (uint32_t)(uintptr_t)(action->conf);
11072                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
11073                         __atomic_fetch_add(&age_act->refcnt, 1,
11074                                            __ATOMIC_RELAXED);
11075                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
11076                         action_flags |= MLX5_FLOW_ACTION_AGE;
11077                         break;
11078                 case RTE_FLOW_ACTION_TYPE_AGE:
11079                         if (priv->sh->flow_hit_aso_en && attr->group) {
11080                                 /*
11081                                  * Create one shared age action, to be used
11082                                  * by all sub-flows.
11083                                  */
11084                                 if (!flow->age) {
11085                                         flow->age =
11086                                                 flow_dv_translate_create_aso_age
11087                                                         (dev, action->conf,
11088                                                          error);
11089                                         if (!flow->age)
11090                                                 return rte_flow_error_set
11091                                                 (error, rte_errno,
11092                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11093                                                  NULL,
11094                                                  "can't create ASO age action");
11095                                 }
11096                                 dev_flow->dv.actions[actions_n++] =
11097                                           (flow_aso_age_get_by_idx
11098                                                 (dev, flow->age))->dr_action;
11099                                 action_flags |= MLX5_FLOW_ACTION_AGE;
11100                                 break;
11101                         }
11102                         /* Fall-through */
11103                 case RTE_FLOW_ACTION_TYPE_COUNT:
11104                         if (!dev_conf->devx) {
11105                                 return rte_flow_error_set
11106                                               (error, ENOTSUP,
11107                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11108                                                NULL,
11109                                                "count action not supported");
11110                         }
11111                         /* Save information first, will apply later. */
11112                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
11113                                 count = action->conf;
11114                         else
11115                                 age = action->conf;
11116                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11117                         break;
11118                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
11119                         dev_flow->dv.actions[actions_n++] =
11120                                                 priv->sh->pop_vlan_action;
11121                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
11122                         break;
11123                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
11124                         if (!(action_flags &
11125                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
11126                                 flow_dev_get_vlan_info_from_items(items, &vlan);
11127                         vlan.eth_proto = rte_be_to_cpu_16
11128                              ((((const struct rte_flow_action_of_push_vlan *)
11129                                                    actions->conf)->ethertype));
11130                         found_action = mlx5_flow_find_action
11131                                         (actions + 1,
11132                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
11133                         if (found_action)
11134                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11135                         found_action = mlx5_flow_find_action
11136                                         (actions + 1,
11137                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
11138                         if (found_action)
11139                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11140                         if (flow_dv_create_action_push_vlan
11141                                             (dev, attr, &vlan, dev_flow, error))
11142                                 return -rte_errno;
11143                         dev_flow->dv.actions[actions_n++] =
11144                                         dev_flow->dv.push_vlan_res->action;
11145                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
11146                         break;
11147                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
11148                         /* of_vlan_push action handled this action */
11149                         MLX5_ASSERT(action_flags &
11150                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
11151                         break;
11152                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
11153                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
11154                                 break;
11155                         flow_dev_get_vlan_info_from_items(items, &vlan);
11156                         mlx5_update_vlan_vid_pcp(actions, &vlan);
11157                         /* If no VLAN push - this is a modify header action */
11158                         if (flow_dv_convert_action_modify_vlan_vid
11159                                                 (mhdr_res, actions, error))
11160                                 return -rte_errno;
11161                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
11162                         break;
11163                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11164                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11165                         if (flow_dv_create_action_l2_encap(dev, actions,
11166                                                            dev_flow,
11167                                                            attr->transfer,
11168                                                            error))
11169                                 return -rte_errno;
11170                         dev_flow->dv.actions[actions_n++] =
11171                                         dev_flow->dv.encap_decap->action;
11172                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11173                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11174                                 sample_act->action_flags |=
11175                                                         MLX5_FLOW_ACTION_ENCAP;
11176                         break;
11177                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
11178                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
11179                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11180                                                            attr->transfer,
11181                                                            error))
11182                                 return -rte_errno;
11183                         dev_flow->dv.actions[actions_n++] =
11184                                         dev_flow->dv.encap_decap->action;
11185                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11186                         break;
11187                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11188                         /* Handle encap with preceding decap. */
11189                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
11190                                 if (flow_dv_create_action_raw_encap
11191                                         (dev, actions, dev_flow, attr, error))
11192                                         return -rte_errno;
11193                                 dev_flow->dv.actions[actions_n++] =
11194                                         dev_flow->dv.encap_decap->action;
11195                         } else {
11196                                 /* Handle encap without preceding decap. */
11197                                 if (flow_dv_create_action_l2_encap
11198                                     (dev, actions, dev_flow, attr->transfer,
11199                                      error))
11200                                         return -rte_errno;
11201                                 dev_flow->dv.actions[actions_n++] =
11202                                         dev_flow->dv.encap_decap->action;
11203                         }
11204                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11205                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11206                                 sample_act->action_flags |=
11207                                                         MLX5_FLOW_ACTION_ENCAP;
11208                         break;
11209                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11210                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
11211                                 ;
11212                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11213                                 if (flow_dv_create_action_l2_decap
11214                                     (dev, dev_flow, attr->transfer, error))
11215                                         return -rte_errno;
11216                                 dev_flow->dv.actions[actions_n++] =
11217                                         dev_flow->dv.encap_decap->action;
11218                         }
11219                         /* If decap is followed by encap, handle it at encap. */
11220                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11221                         break;
11222                 case RTE_FLOW_ACTION_TYPE_JUMP:
11223                         jump_group = ((const struct rte_flow_action_jump *)
11224                                                         action->conf)->group;
11225                         grp_info.std_tbl_fix = 0;
11226                         if (dev_flow->skip_scale &
11227                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
11228                                 grp_info.skip_scale = 1;
11229                         else
11230                                 grp_info.skip_scale = 0;
11231                         ret = mlx5_flow_group_to_table(dev, tunnel,
11232                                                        jump_group,
11233                                                        &table,
11234                                                        &grp_info, error);
11235                         if (ret)
11236                                 return ret;
11237                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
11238                                                        attr->transfer,
11239                                                        !!dev_flow->external,
11240                                                        tunnel, jump_group, 0,
11241                                                        error);
11242                         if (!tbl)
11243                                 return rte_flow_error_set
11244                                                 (error, errno,
11245                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11246                                                  NULL,
11247                                                  "cannot create jump action.");
11248                         if (flow_dv_jump_tbl_resource_register
11249                             (dev, tbl, dev_flow, error)) {
11250                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11251                                 return rte_flow_error_set
11252                                                 (error, errno,
11253                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11254                                                  NULL,
11255                                                  "cannot create jump action.");
11256                         }
11257                         dev_flow->dv.actions[actions_n++] =
11258                                         dev_flow->dv.jump->action;
11259                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11260                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
11261                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
11262                         num_of_dest++;
11263                         break;
11264                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
11265                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
11266                         if (flow_dv_convert_action_modify_mac
11267                                         (mhdr_res, actions, error))
11268                                 return -rte_errno;
11269                         action_flags |= actions->type ==
11270                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
11271                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
11272                                         MLX5_FLOW_ACTION_SET_MAC_DST;
11273                         break;
11274                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
11275                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
11276                         if (flow_dv_convert_action_modify_ipv4
11277                                         (mhdr_res, actions, error))
11278                                 return -rte_errno;
11279                         action_flags |= actions->type ==
11280                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
11281                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
11282                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
11283                         break;
11284                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
11285                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
11286                         if (flow_dv_convert_action_modify_ipv6
11287                                         (mhdr_res, actions, error))
11288                                 return -rte_errno;
11289                         action_flags |= actions->type ==
11290                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
11291                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
11292                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
11293                         break;
11294                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
11295                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
11296                         if (flow_dv_convert_action_modify_tp
11297                                         (mhdr_res, actions, items,
11298                                          &flow_attr, dev_flow, !!(action_flags &
11299                                          MLX5_FLOW_ACTION_DECAP), error))
11300                                 return -rte_errno;
11301                         action_flags |= actions->type ==
11302                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
11303                                         MLX5_FLOW_ACTION_SET_TP_SRC :
11304                                         MLX5_FLOW_ACTION_SET_TP_DST;
11305                         break;
11306                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
11307                         if (flow_dv_convert_action_modify_dec_ttl
11308                                         (mhdr_res, items, &flow_attr, dev_flow,
11309                                          !!(action_flags &
11310                                          MLX5_FLOW_ACTION_DECAP), error))
11311                                 return -rte_errno;
11312                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
11313                         break;
11314                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
11315                         if (flow_dv_convert_action_modify_ttl
11316                                         (mhdr_res, actions, items, &flow_attr,
11317                                          dev_flow, !!(action_flags &
11318                                          MLX5_FLOW_ACTION_DECAP), error))
11319                                 return -rte_errno;
11320                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
11321                         break;
11322                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
11323                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
11324                         if (flow_dv_convert_action_modify_tcp_seq
11325                                         (mhdr_res, actions, error))
11326                                 return -rte_errno;
11327                         action_flags |= actions->type ==
11328                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
11329                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
11330                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
11331                         break;
11332
11333                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
11334                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
11335                         if (flow_dv_convert_action_modify_tcp_ack
11336                                         (mhdr_res, actions, error))
11337                                 return -rte_errno;
11338                         action_flags |= actions->type ==
11339                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
11340                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
11341                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
11342                         break;
11343                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
11344                         if (flow_dv_convert_action_set_reg
11345                                         (mhdr_res, actions, error))
11346                                 return -rte_errno;
11347                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11348                         break;
11349                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
11350                         if (flow_dv_convert_action_copy_mreg
11351                                         (dev, mhdr_res, actions, error))
11352                                 return -rte_errno;
11353                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11354                         break;
11355                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
11356                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
11357                         dev_flow->handle->fate_action =
11358                                         MLX5_FLOW_FATE_DEFAULT_MISS;
11359                         break;
11360                 case RTE_FLOW_ACTION_TYPE_METER:
11361                         mtr = actions->conf;
11362                         if (!flow->meter) {
11363                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
11364                                                             attr, error);
11365                                 if (!fm)
11366                                         return rte_flow_error_set(error,
11367                                                 rte_errno,
11368                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11369                                                 NULL,
11370                                                 "meter not found "
11371                                                 "or invalid parameters");
11372                                 flow->meter = fm->idx;
11373                         }
11374                         /* Set the meter action. */
11375                         if (!fm) {
11376                                 fm = mlx5_ipool_get(priv->sh->ipool
11377                                                 [MLX5_IPOOL_MTR], flow->meter);
11378                                 if (!fm)
11379                                         return rte_flow_error_set(error,
11380                                                 rte_errno,
11381                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11382                                                 NULL,
11383                                                 "meter not found "
11384                                                 "or invalid parameters");
11385                         }
11386                         dev_flow->dv.actions[actions_n++] =
11387                                 fm->mfts->meter_action;
11388                         action_flags |= MLX5_FLOW_ACTION_METER;
11389                         break;
11390                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
11391                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
11392                                                               actions, error))
11393                                 return -rte_errno;
11394                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
11395                         break;
11396                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
11397                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
11398                                                               actions, error))
11399                                 return -rte_errno;
11400                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
11401                         break;
11402                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
11403                         sample_act_pos = actions_n;
11404                         sample = (const struct rte_flow_action_sample *)
11405                                  action->conf;
11406                         actions_n++;
11407                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
11408                         /* put encap action into group if work with port id */
11409                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
11410                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
11411                                 sample_act->action_flags |=
11412                                                         MLX5_FLOW_ACTION_ENCAP;
11413                         break;
11414                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
11415                         if (flow_dv_convert_action_modify_field
11416                                         (dev, mhdr_res, actions, attr, error))
11417                                 return -rte_errno;
11418                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
11419                         break;
11420                 case RTE_FLOW_ACTION_TYPE_END:
11421                         actions_end = true;
11422                         if (mhdr_res->actions_num) {
11423                                 /* create modify action if needed. */
11424                                 if (flow_dv_modify_hdr_resource_register
11425                                         (dev, mhdr_res, dev_flow, error))
11426                                         return -rte_errno;
11427                                 dev_flow->dv.actions[modify_action_position] =
11428                                         handle->dvh.modify_hdr->action;
11429                         }
11430                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
11431                                 /*
11432                                  * Create one count action, to be used
11433                                  * by all sub-flows.
11434                                  */
11435                                 if (!flow->counter) {
11436                                         flow->counter =
11437                                                 flow_dv_translate_create_counter
11438                                                         (dev, dev_flow, count,
11439                                                          age);
11440                                         if (!flow->counter)
11441                                                 return rte_flow_error_set
11442                                                 (error, rte_errno,
11443                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11444                                                  NULL, "cannot create counter"
11445                                                  " object.");
11446                                 }
11447                                 dev_flow->dv.actions[actions_n] =
11448                                           (flow_dv_counter_get_by_idx(dev,
11449                                           flow->counter, NULL))->action;
11450                                 actions_n++;
11451                         }
11452                 default:
11453                         break;
11454                 }
11455                 if (mhdr_res->actions_num &&
11456                     modify_action_position == UINT32_MAX)
11457                         modify_action_position = actions_n++;
11458         }
11459         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
11460                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
11461                 int item_type = items->type;
11462
11463                 if (!mlx5_flow_os_item_supported(item_type))
11464                         return rte_flow_error_set(error, ENOTSUP,
11465                                                   RTE_FLOW_ERROR_TYPE_ITEM,
11466                                                   NULL, "item not supported");
11467                 switch (item_type) {
11468                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
11469                         flow_dv_translate_item_port_id
11470                                 (dev, match_mask, match_value, items, attr);
11471                         last_item = MLX5_FLOW_ITEM_PORT_ID;
11472                         break;
11473                 case RTE_FLOW_ITEM_TYPE_ETH:
11474                         flow_dv_translate_item_eth(match_mask, match_value,
11475                                                    items, tunnel,
11476                                                    dev_flow->dv.group);
11477                         matcher.priority = action_flags &
11478                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
11479                                         !dev_flow->external ?
11480                                         MLX5_PRIORITY_MAP_L3 :
11481                                         MLX5_PRIORITY_MAP_L2;
11482                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
11483                                              MLX5_FLOW_LAYER_OUTER_L2;
11484                         break;
11485                 case RTE_FLOW_ITEM_TYPE_VLAN:
11486                         flow_dv_translate_item_vlan(dev_flow,
11487                                                     match_mask, match_value,
11488                                                     items, tunnel,
11489                                                     dev_flow->dv.group);
11490                         matcher.priority = MLX5_PRIORITY_MAP_L2;
11491                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
11492                                               MLX5_FLOW_LAYER_INNER_VLAN) :
11493                                              (MLX5_FLOW_LAYER_OUTER_L2 |
11494                                               MLX5_FLOW_LAYER_OUTER_VLAN);
11495                         break;
11496                 case RTE_FLOW_ITEM_TYPE_IPV4:
11497                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11498                                                   &item_flags, &tunnel);
11499                         flow_dv_translate_item_ipv4(match_mask, match_value,
11500                                                     items, tunnel,
11501                                                     dev_flow->dv.group);
11502                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11503                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
11504                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
11505                         if (items->mask != NULL &&
11506                             ((const struct rte_flow_item_ipv4 *)
11507                              items->mask)->hdr.next_proto_id) {
11508                                 next_protocol =
11509                                         ((const struct rte_flow_item_ipv4 *)
11510                                          (items->spec))->hdr.next_proto_id;
11511                                 next_protocol &=
11512                                         ((const struct rte_flow_item_ipv4 *)
11513                                          (items->mask))->hdr.next_proto_id;
11514                         } else {
11515                                 /* Reset for inner layer. */
11516                                 next_protocol = 0xff;
11517                         }
11518                         break;
11519                 case RTE_FLOW_ITEM_TYPE_IPV6:
11520                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11521                                                   &item_flags, &tunnel);
11522                         flow_dv_translate_item_ipv6(match_mask, match_value,
11523                                                     items, tunnel,
11524                                                     dev_flow->dv.group);
11525                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11526                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
11527                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
11528                         if (items->mask != NULL &&
11529                             ((const struct rte_flow_item_ipv6 *)
11530                              items->mask)->hdr.proto) {
11531                                 next_protocol =
11532                                         ((const struct rte_flow_item_ipv6 *)
11533                                          items->spec)->hdr.proto;
11534                                 next_protocol &=
11535                                         ((const struct rte_flow_item_ipv6 *)
11536                                          items->mask)->hdr.proto;
11537                         } else {
11538                                 /* Reset for inner layer. */
11539                                 next_protocol = 0xff;
11540                         }
11541                         break;
11542                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
11543                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
11544                                                              match_value,
11545                                                              items, tunnel);
11546                         last_item = tunnel ?
11547                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
11548                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
11549                         if (items->mask != NULL &&
11550                             ((const struct rte_flow_item_ipv6_frag_ext *)
11551                              items->mask)->hdr.next_header) {
11552                                 next_protocol =
11553                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11554                                  items->spec)->hdr.next_header;
11555                                 next_protocol &=
11556                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11557                                  items->mask)->hdr.next_header;
11558                         } else {
11559                                 /* Reset for inner layer. */
11560                                 next_protocol = 0xff;
11561                         }
11562                         break;
11563                 case RTE_FLOW_ITEM_TYPE_TCP:
11564                         flow_dv_translate_item_tcp(match_mask, match_value,
11565                                                    items, tunnel);
11566                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11567                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
11568                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
11569                         break;
11570                 case RTE_FLOW_ITEM_TYPE_UDP:
11571                         flow_dv_translate_item_udp(match_mask, match_value,
11572                                                    items, tunnel);
11573                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11574                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
11575                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
11576                         break;
11577                 case RTE_FLOW_ITEM_TYPE_GRE:
11578                         flow_dv_translate_item_gre(match_mask, match_value,
11579                                                    items, tunnel);
11580                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11581                         last_item = MLX5_FLOW_LAYER_GRE;
11582                         break;
11583                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
11584                         flow_dv_translate_item_gre_key(match_mask,
11585                                                        match_value, items);
11586                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
11587                         break;
11588                 case RTE_FLOW_ITEM_TYPE_NVGRE:
11589                         flow_dv_translate_item_nvgre(match_mask, match_value,
11590                                                      items, tunnel);
11591                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11592                         last_item = MLX5_FLOW_LAYER_GRE;
11593                         break;
11594                 case RTE_FLOW_ITEM_TYPE_VXLAN:
11595                         flow_dv_translate_item_vxlan(match_mask, match_value,
11596                                                      items, tunnel);
11597                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11598                         last_item = MLX5_FLOW_LAYER_VXLAN;
11599                         break;
11600                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
11601                         flow_dv_translate_item_vxlan_gpe(match_mask,
11602                                                          match_value, items,
11603                                                          tunnel);
11604                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11605                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
11606                         break;
11607                 case RTE_FLOW_ITEM_TYPE_GENEVE:
11608                         flow_dv_translate_item_geneve(match_mask, match_value,
11609                                                       items, tunnel);
11610                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11611                         last_item = MLX5_FLOW_LAYER_GENEVE;
11612                         break;
11613                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
11614                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
11615                                                           match_value,
11616                                                           items, error);
11617                         if (ret)
11618                                 return rte_flow_error_set(error, -ret,
11619                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11620                                         "cannot create GENEVE TLV option");
11621                         flow->geneve_tlv_option = 1;
11622                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
11623                         break;
11624                 case RTE_FLOW_ITEM_TYPE_MPLS:
11625                         flow_dv_translate_item_mpls(match_mask, match_value,
11626                                                     items, last_item, tunnel);
11627                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11628                         last_item = MLX5_FLOW_LAYER_MPLS;
11629                         break;
11630                 case RTE_FLOW_ITEM_TYPE_MARK:
11631                         flow_dv_translate_item_mark(dev, match_mask,
11632                                                     match_value, items);
11633                         last_item = MLX5_FLOW_ITEM_MARK;
11634                         break;
11635                 case RTE_FLOW_ITEM_TYPE_META:
11636                         flow_dv_translate_item_meta(dev, match_mask,
11637                                                     match_value, attr, items);
11638                         last_item = MLX5_FLOW_ITEM_METADATA;
11639                         break;
11640                 case RTE_FLOW_ITEM_TYPE_ICMP:
11641                         flow_dv_translate_item_icmp(match_mask, match_value,
11642                                                     items, tunnel);
11643                         last_item = MLX5_FLOW_LAYER_ICMP;
11644                         break;
11645                 case RTE_FLOW_ITEM_TYPE_ICMP6:
11646                         flow_dv_translate_item_icmp6(match_mask, match_value,
11647                                                       items, tunnel);
11648                         last_item = MLX5_FLOW_LAYER_ICMP6;
11649                         break;
11650                 case RTE_FLOW_ITEM_TYPE_TAG:
11651                         flow_dv_translate_item_tag(dev, match_mask,
11652                                                    match_value, items);
11653                         last_item = MLX5_FLOW_ITEM_TAG;
11654                         break;
11655                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
11656                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
11657                                                         match_value, items);
11658                         last_item = MLX5_FLOW_ITEM_TAG;
11659                         break;
11660                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
11661                         flow_dv_translate_item_tx_queue(dev, match_mask,
11662                                                         match_value,
11663                                                         items);
11664                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
11665                         break;
11666                 case RTE_FLOW_ITEM_TYPE_GTP:
11667                         flow_dv_translate_item_gtp(match_mask, match_value,
11668                                                    items, tunnel);
11669                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11670                         last_item = MLX5_FLOW_LAYER_GTP;
11671                         break;
11672                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
11673                         ret = flow_dv_translate_item_gtp_psc(match_mask,
11674                                                           match_value,
11675                                                           items);
11676                         if (ret)
11677                                 return rte_flow_error_set(error, -ret,
11678                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11679                                         "cannot create GTP PSC item");
11680                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
11681                         break;
11682                 case RTE_FLOW_ITEM_TYPE_ECPRI:
11683                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
11684                                 /* Create it only the first time to be used. */
11685                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
11686                                 if (ret)
11687                                         return rte_flow_error_set
11688                                                 (error, -ret,
11689                                                 RTE_FLOW_ERROR_TYPE_ITEM,
11690                                                 NULL,
11691                                                 "cannot create eCPRI parser");
11692                         }
11693                         /* Adjust the length matcher and device flow value. */
11694                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
11695                         dev_flow->dv.value.size =
11696                                         MLX5_ST_SZ_BYTES(fte_match_param);
11697                         flow_dv_translate_item_ecpri(dev, match_mask,
11698                                                      match_value, items);
11699                         /* No other protocol should follow eCPRI layer. */
11700                         last_item = MLX5_FLOW_LAYER_ECPRI;
11701                         break;
11702                 default:
11703                         break;
11704                 }
11705                 item_flags |= last_item;
11706         }
11707         /*
11708          * When E-Switch mode is enabled, we have two cases where we need to
11709          * set the source port manually.
11710          * The first one, is in case of Nic steering rule, and the second is
11711          * E-Switch rule where no port_id item was found. In both cases
11712          * the source port is set according the current port in use.
11713          */
11714         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
11715             (priv->representor || priv->master)) {
11716                 if (flow_dv_translate_item_port_id(dev, match_mask,
11717                                                    match_value, NULL, attr))
11718                         return -rte_errno;
11719         }
11720 #ifdef RTE_LIBRTE_MLX5_DEBUG
11721         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
11722                                               dev_flow->dv.value.buf));
11723 #endif
11724         /*
11725          * Layers may be already initialized from prefix flow if this dev_flow
11726          * is the suffix flow.
11727          */
11728         handle->layers |= item_flags;
11729         if (action_flags & MLX5_FLOW_ACTION_RSS)
11730                 flow_dv_hashfields_set(dev_flow, rss_desc);
11731         /* If has RSS action in the sample action, the Sample/Mirror resource
11732          * should be registered after the hash filed be update.
11733          */
11734         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
11735                 ret = flow_dv_translate_action_sample(dev,
11736                                                       sample,
11737                                                       dev_flow, attr,
11738                                                       &num_of_dest,
11739                                                       sample_actions,
11740                                                       &sample_res,
11741                                                       error);
11742                 if (ret < 0)
11743                         return ret;
11744                 ret = flow_dv_create_action_sample(dev,
11745                                                    dev_flow,
11746                                                    num_of_dest,
11747                                                    &sample_res,
11748                                                    &mdest_res,
11749                                                    sample_actions,
11750                                                    action_flags,
11751                                                    error);
11752                 if (ret < 0)
11753                         return rte_flow_error_set
11754                                                 (error, rte_errno,
11755                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11756                                                 NULL,
11757                                                 "cannot create sample action");
11758                 if (num_of_dest > 1) {
11759                         dev_flow->dv.actions[sample_act_pos] =
11760                         dev_flow->dv.dest_array_res->action;
11761                 } else {
11762                         dev_flow->dv.actions[sample_act_pos] =
11763                         dev_flow->dv.sample_res->verbs_action;
11764                 }
11765         }
11766         /*
11767          * For multiple destination (sample action with ratio=1), the encap
11768          * action and port id action will be combined into group action.
11769          * So need remove the original these actions in the flow and only
11770          * use the sample action instead of.
11771          */
11772         if (num_of_dest > 1 &&
11773             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
11774                 int i;
11775                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11776
11777                 for (i = 0; i < actions_n; i++) {
11778                         if ((sample_act->dr_encap_action &&
11779                                 sample_act->dr_encap_action ==
11780                                 dev_flow->dv.actions[i]) ||
11781                                 (sample_act->dr_port_id_action &&
11782                                 sample_act->dr_port_id_action ==
11783                                 dev_flow->dv.actions[i]) ||
11784                                 (sample_act->dr_jump_action &&
11785                                 sample_act->dr_jump_action ==
11786                                 dev_flow->dv.actions[i]))
11787                                 continue;
11788                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
11789                 }
11790                 memcpy((void *)dev_flow->dv.actions,
11791                                 (void *)temp_actions,
11792                                 tmp_actions_n * sizeof(void *));
11793                 actions_n = tmp_actions_n;
11794         }
11795         dev_flow->dv.actions_n = actions_n;
11796         dev_flow->act_flags = action_flags;
11797         /* Register matcher. */
11798         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
11799                                     matcher.mask.size);
11800         matcher.priority = mlx5_get_matcher_priority(dev, attr,
11801                                         matcher.priority);
11802         /* reserved field no needs to be set to 0 here. */
11803         tbl_key.domain = attr->transfer;
11804         tbl_key.direction = attr->egress;
11805         tbl_key.table_id = dev_flow->dv.group;
11806         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
11807                                      tunnel, attr->group, error))
11808                 return -rte_errno;
11809         return 0;
11810 }
11811
11812 /**
11813  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11814  * and tunnel.
11815  *
11816  * @param[in, out] action
11817  *   Shred RSS action holding hash RX queue objects.
11818  * @param[in] hash_fields
11819  *   Defines combination of packet fields to participate in RX hash.
11820  * @param[in] tunnel
11821  *   Tunnel type
11822  * @param[in] hrxq_idx
11823  *   Hash RX queue index to set.
11824  *
11825  * @return
11826  *   0 on success, otherwise negative errno value.
11827  */
11828 static int
11829 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
11830                               const uint64_t hash_fields,
11831                               const int tunnel,
11832                               uint32_t hrxq_idx)
11833 {
11834         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
11835
11836         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11837         case MLX5_RSS_HASH_IPV4:
11838                 hrxqs[0] = hrxq_idx;
11839                 return 0;
11840         case MLX5_RSS_HASH_IPV4_TCP:
11841                 hrxqs[1] = hrxq_idx;
11842                 return 0;
11843         case MLX5_RSS_HASH_IPV4_UDP:
11844                 hrxqs[2] = hrxq_idx;
11845                 return 0;
11846         case MLX5_RSS_HASH_IPV6:
11847                 hrxqs[3] = hrxq_idx;
11848                 return 0;
11849         case MLX5_RSS_HASH_IPV6_TCP:
11850                 hrxqs[4] = hrxq_idx;
11851                 return 0;
11852         case MLX5_RSS_HASH_IPV6_UDP:
11853                 hrxqs[5] = hrxq_idx;
11854                 return 0;
11855         case MLX5_RSS_HASH_NONE:
11856                 hrxqs[6] = hrxq_idx;
11857                 return 0;
11858         default:
11859                 return -1;
11860         }
11861 }
11862
11863 /**
11864  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11865  * and tunnel.
11866  *
11867  * @param[in] dev
11868  *   Pointer to the Ethernet device structure.
11869  * @param[in] idx
11870  *   Shared RSS action ID holding hash RX queue objects.
11871  * @param[in] hash_fields
11872  *   Defines combination of packet fields to participate in RX hash.
11873  * @param[in] tunnel
11874  *   Tunnel type
11875  *
11876  * @return
11877  *   Valid hash RX queue index, otherwise 0.
11878  */
11879 static uint32_t
11880 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
11881                                  const uint64_t hash_fields,
11882                                  const int tunnel)
11883 {
11884         struct mlx5_priv *priv = dev->data->dev_private;
11885         struct mlx5_shared_action_rss *shared_rss =
11886             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11887         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
11888                                                         shared_rss->hrxq_tunnel;
11889
11890         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11891         case MLX5_RSS_HASH_IPV4:
11892                 return hrxqs[0];
11893         case MLX5_RSS_HASH_IPV4_TCP:
11894                 return hrxqs[1];
11895         case MLX5_RSS_HASH_IPV4_UDP:
11896                 return hrxqs[2];
11897         case MLX5_RSS_HASH_IPV6:
11898                 return hrxqs[3];
11899         case MLX5_RSS_HASH_IPV6_TCP:
11900                 return hrxqs[4];
11901         case MLX5_RSS_HASH_IPV6_UDP:
11902                 return hrxqs[5];
11903         case MLX5_RSS_HASH_NONE:
11904                 return hrxqs[6];
11905         default:
11906                 return 0;
11907         }
11908 }
11909
11910 /**
11911  * Apply the flow to the NIC, lock free,
11912  * (mutex should be acquired by caller).
11913  *
11914  * @param[in] dev
11915  *   Pointer to the Ethernet device structure.
11916  * @param[in, out] flow
11917  *   Pointer to flow structure.
11918  * @param[out] error
11919  *   Pointer to error structure.
11920  *
11921  * @return
11922  *   0 on success, a negative errno value otherwise and rte_errno is set.
11923  */
11924 static int
11925 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
11926               struct rte_flow_error *error)
11927 {
11928         struct mlx5_flow_dv_workspace *dv;
11929         struct mlx5_flow_handle *dh;
11930         struct mlx5_flow_handle_dv *dv_h;
11931         struct mlx5_flow *dev_flow;
11932         struct mlx5_priv *priv = dev->data->dev_private;
11933         uint32_t handle_idx;
11934         int n;
11935         int err;
11936         int idx;
11937         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11938         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
11939
11940         MLX5_ASSERT(wks);
11941         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
11942                 dev_flow = &wks->flows[idx];
11943                 dv = &dev_flow->dv;
11944                 dh = dev_flow->handle;
11945                 dv_h = &dh->dvh;
11946                 n = dv->actions_n;
11947                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
11948                         if (dv->transfer) {
11949                                 dv->actions[n++] = priv->sh->esw_drop_action;
11950                         } else {
11951                                 MLX5_ASSERT(priv->drop_queue.hrxq);
11952                                 dv->actions[n++] =
11953                                                 priv->drop_queue.hrxq->action;
11954                         }
11955                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
11956                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
11957                         struct mlx5_hrxq *hrxq;
11958                         uint32_t hrxq_idx;
11959
11960                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
11961                                                     &hrxq_idx);
11962                         if (!hrxq) {
11963                                 rte_flow_error_set
11964                                         (error, rte_errno,
11965                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11966                                          "cannot get hash queue");
11967                                 goto error;
11968                         }
11969                         dh->rix_hrxq = hrxq_idx;
11970                         dv->actions[n++] = hrxq->action;
11971                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
11972                         struct mlx5_hrxq *hrxq = NULL;
11973                         uint32_t hrxq_idx;
11974
11975                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
11976                                                 rss_desc->shared_rss,
11977                                                 dev_flow->hash_fields,
11978                                                 !!(dh->layers &
11979                                                 MLX5_FLOW_LAYER_TUNNEL));
11980                         if (hrxq_idx)
11981                                 hrxq = mlx5_ipool_get
11982                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
11983                                          hrxq_idx);
11984                         if (!hrxq) {
11985                                 rte_flow_error_set
11986                                         (error, rte_errno,
11987                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11988                                          "cannot get hash queue");
11989                                 goto error;
11990                         }
11991                         dh->rix_srss = rss_desc->shared_rss;
11992                         dv->actions[n++] = hrxq->action;
11993                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
11994                         if (!priv->sh->default_miss_action) {
11995                                 rte_flow_error_set
11996                                         (error, rte_errno,
11997                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11998                                          "default miss action not be created.");
11999                                 goto error;
12000                         }
12001                         dv->actions[n++] = priv->sh->default_miss_action;
12002                 }
12003                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
12004                                                (void *)&dv->value, n,
12005                                                dv->actions, &dh->drv_flow);
12006                 if (err) {
12007                         rte_flow_error_set(error, errno,
12008                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12009                                            NULL,
12010                                            "hardware refuses to create flow");
12011                         goto error;
12012                 }
12013                 if (priv->vmwa_context &&
12014                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
12015                         /*
12016                          * The rule contains the VLAN pattern.
12017                          * For VF we are going to create VLAN
12018                          * interface to make hypervisor set correct
12019                          * e-Switch vport context.
12020                          */
12021                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
12022                 }
12023         }
12024         return 0;
12025 error:
12026         err = rte_errno; /* Save rte_errno before cleanup. */
12027         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
12028                        handle_idx, dh, next) {
12029                 /* hrxq is union, don't clear it if the flag is not set. */
12030                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
12031                         mlx5_hrxq_release(dev, dh->rix_hrxq);
12032                         dh->rix_hrxq = 0;
12033                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12034                         dh->rix_srss = 0;
12035                 }
12036                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12037                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12038         }
12039         rte_errno = err; /* Restore rte_errno. */
12040         return -rte_errno;
12041 }
12042
12043 void
12044 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
12045                           struct mlx5_cache_entry *entry)
12046 {
12047         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
12048                                                           entry);
12049
12050         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
12051         mlx5_free(cache);
12052 }
12053
12054 /**
12055  * Release the flow matcher.
12056  *
12057  * @param dev
12058  *   Pointer to Ethernet device.
12059  * @param port_id
12060  *   Index to port ID action resource.
12061  *
12062  * @return
12063  *   1 while a reference on it exists, 0 when freed.
12064  */
12065 static int
12066 flow_dv_matcher_release(struct rte_eth_dev *dev,
12067                         struct mlx5_flow_handle *handle)
12068 {
12069         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
12070         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
12071                                                             typeof(*tbl), tbl);
12072         int ret;
12073
12074         MLX5_ASSERT(matcher->matcher_object);
12075         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
12076         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
12077         return ret;
12078 }
12079
12080 /**
12081  * Release encap_decap resource.
12082  *
12083  * @param list
12084  *   Pointer to the hash list.
12085  * @param entry
12086  *   Pointer to exist resource entry object.
12087  */
12088 void
12089 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
12090                               struct mlx5_hlist_entry *entry)
12091 {
12092         struct mlx5_dev_ctx_shared *sh = list->ctx;
12093         struct mlx5_flow_dv_encap_decap_resource *res =
12094                 container_of(entry, typeof(*res), entry);
12095
12096         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12097         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
12098 }
12099
12100 /**
12101  * Release an encap/decap resource.
12102  *
12103  * @param dev
12104  *   Pointer to Ethernet device.
12105  * @param encap_decap_idx
12106  *   Index of encap decap resource.
12107  *
12108  * @return
12109  *   1 while a reference on it exists, 0 when freed.
12110  */
12111 static int
12112 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
12113                                      uint32_t encap_decap_idx)
12114 {
12115         struct mlx5_priv *priv = dev->data->dev_private;
12116         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
12117
12118         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
12119                                         encap_decap_idx);
12120         if (!cache_resource)
12121                 return 0;
12122         MLX5_ASSERT(cache_resource->action);
12123         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
12124                                      &cache_resource->entry);
12125 }
12126
12127 /**
12128  * Release an jump to table action resource.
12129  *
12130  * @param dev
12131  *   Pointer to Ethernet device.
12132  * @param rix_jump
12133  *   Index to the jump action resource.
12134  *
12135  * @return
12136  *   1 while a reference on it exists, 0 when freed.
12137  */
12138 static int
12139 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
12140                                   uint32_t rix_jump)
12141 {
12142         struct mlx5_priv *priv = dev->data->dev_private;
12143         struct mlx5_flow_tbl_data_entry *tbl_data;
12144
12145         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
12146                                   rix_jump);
12147         if (!tbl_data)
12148                 return 0;
12149         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
12150 }
12151
12152 void
12153 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
12154                          struct mlx5_hlist_entry *entry)
12155 {
12156         struct mlx5_flow_dv_modify_hdr_resource *res =
12157                 container_of(entry, typeof(*res), entry);
12158
12159         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12160         mlx5_free(entry);
12161 }
12162
12163 /**
12164  * Release a modify-header resource.
12165  *
12166  * @param dev
12167  *   Pointer to Ethernet device.
12168  * @param handle
12169  *   Pointer to mlx5_flow_handle.
12170  *
12171  * @return
12172  *   1 while a reference on it exists, 0 when freed.
12173  */
12174 static int
12175 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
12176                                     struct mlx5_flow_handle *handle)
12177 {
12178         struct mlx5_priv *priv = dev->data->dev_private;
12179         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
12180
12181         MLX5_ASSERT(entry->action);
12182         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
12183 }
12184
12185 void
12186 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
12187                           struct mlx5_cache_entry *entry)
12188 {
12189         struct mlx5_dev_ctx_shared *sh = list->ctx;
12190         struct mlx5_flow_dv_port_id_action_resource *cache =
12191                         container_of(entry, typeof(*cache), entry);
12192
12193         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12194         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
12195 }
12196
12197 /**
12198  * Release port ID action resource.
12199  *
12200  * @param dev
12201  *   Pointer to Ethernet device.
12202  * @param handle
12203  *   Pointer to mlx5_flow_handle.
12204  *
12205  * @return
12206  *   1 while a reference on it exists, 0 when freed.
12207  */
12208 static int
12209 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
12210                                         uint32_t port_id)
12211 {
12212         struct mlx5_priv *priv = dev->data->dev_private;
12213         struct mlx5_flow_dv_port_id_action_resource *cache;
12214
12215         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
12216         if (!cache)
12217                 return 0;
12218         MLX5_ASSERT(cache->action);
12219         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
12220                                      &cache->entry);
12221 }
12222
12223 /**
12224  * Release shared RSS action resource.
12225  *
12226  * @param dev
12227  *   Pointer to Ethernet device.
12228  * @param srss
12229  *   Shared RSS action index.
12230  */
12231 static void
12232 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
12233 {
12234         struct mlx5_priv *priv = dev->data->dev_private;
12235         struct mlx5_shared_action_rss *shared_rss;
12236
12237         shared_rss = mlx5_ipool_get
12238                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
12239         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12240 }
12241
12242 void
12243 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
12244                             struct mlx5_cache_entry *entry)
12245 {
12246         struct mlx5_dev_ctx_shared *sh = list->ctx;
12247         struct mlx5_flow_dv_push_vlan_action_resource *cache =
12248                         container_of(entry, typeof(*cache), entry);
12249
12250         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12251         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
12252 }
12253
12254 /**
12255  * Release push vlan action resource.
12256  *
12257  * @param dev
12258  *   Pointer to Ethernet device.
12259  * @param handle
12260  *   Pointer to mlx5_flow_handle.
12261  *
12262  * @return
12263  *   1 while a reference on it exists, 0 when freed.
12264  */
12265 static int
12266 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
12267                                           struct mlx5_flow_handle *handle)
12268 {
12269         struct mlx5_priv *priv = dev->data->dev_private;
12270         struct mlx5_flow_dv_push_vlan_action_resource *cache;
12271         uint32_t idx = handle->dvh.rix_push_vlan;
12272
12273         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
12274         if (!cache)
12275                 return 0;
12276         MLX5_ASSERT(cache->action);
12277         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
12278                                      &cache->entry);
12279 }
12280
12281 /**
12282  * Release the fate resource.
12283  *
12284  * @param dev
12285  *   Pointer to Ethernet device.
12286  * @param handle
12287  *   Pointer to mlx5_flow_handle.
12288  */
12289 static void
12290 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
12291                                struct mlx5_flow_handle *handle)
12292 {
12293         if (!handle->rix_fate)
12294                 return;
12295         switch (handle->fate_action) {
12296         case MLX5_FLOW_FATE_QUEUE:
12297                 mlx5_hrxq_release(dev, handle->rix_hrxq);
12298                 break;
12299         case MLX5_FLOW_FATE_JUMP:
12300                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
12301                 break;
12302         case MLX5_FLOW_FATE_PORT_ID:
12303                 flow_dv_port_id_action_resource_release(dev,
12304                                 handle->rix_port_id_action);
12305                 break;
12306         default:
12307                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
12308                 break;
12309         }
12310         handle->rix_fate = 0;
12311 }
12312
12313 void
12314 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
12315                          struct mlx5_cache_entry *entry)
12316 {
12317         struct mlx5_flow_dv_sample_resource *cache_resource =
12318                         container_of(entry, typeof(*cache_resource), entry);
12319         struct rte_eth_dev *dev = cache_resource->dev;
12320         struct mlx5_priv *priv = dev->data->dev_private;
12321
12322         if (cache_resource->verbs_action)
12323                 claim_zero(mlx5_flow_os_destroy_flow_action
12324                                 (cache_resource->verbs_action));
12325         if (cache_resource->normal_path_tbl)
12326                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12327                         cache_resource->normal_path_tbl);
12328         flow_dv_sample_sub_actions_release(dev,
12329                                 &cache_resource->sample_idx);
12330         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12331                         cache_resource->idx);
12332         DRV_LOG(DEBUG, "sample resource %p: removed",
12333                 (void *)cache_resource);
12334 }
12335
12336 /**
12337  * Release an sample resource.
12338  *
12339  * @param dev
12340  *   Pointer to Ethernet device.
12341  * @param handle
12342  *   Pointer to mlx5_flow_handle.
12343  *
12344  * @return
12345  *   1 while a reference on it exists, 0 when freed.
12346  */
12347 static int
12348 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
12349                                      struct mlx5_flow_handle *handle)
12350 {
12351         struct mlx5_priv *priv = dev->data->dev_private;
12352         struct mlx5_flow_dv_sample_resource *cache_resource;
12353
12354         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12355                          handle->dvh.rix_sample);
12356         if (!cache_resource)
12357                 return 0;
12358         MLX5_ASSERT(cache_resource->verbs_action);
12359         return mlx5_cache_unregister(&priv->sh->sample_action_list,
12360                                      &cache_resource->entry);
12361 }
12362
12363 void
12364 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
12365                              struct mlx5_cache_entry *entry)
12366 {
12367         struct mlx5_flow_dv_dest_array_resource *cache_resource =
12368                         container_of(entry, typeof(*cache_resource), entry);
12369         struct rte_eth_dev *dev = cache_resource->dev;
12370         struct mlx5_priv *priv = dev->data->dev_private;
12371         uint32_t i = 0;
12372
12373         MLX5_ASSERT(cache_resource->action);
12374         if (cache_resource->action)
12375                 claim_zero(mlx5_flow_os_destroy_flow_action
12376                                         (cache_resource->action));
12377         for (; i < cache_resource->num_of_dest; i++)
12378                 flow_dv_sample_sub_actions_release(dev,
12379                                 &cache_resource->sample_idx[i]);
12380         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12381                         cache_resource->idx);
12382         DRV_LOG(DEBUG, "destination array resource %p: removed",
12383                 (void *)cache_resource);
12384 }
12385
12386 /**
12387  * Release an destination array resource.
12388  *
12389  * @param dev
12390  *   Pointer to Ethernet device.
12391  * @param handle
12392  *   Pointer to mlx5_flow_handle.
12393  *
12394  * @return
12395  *   1 while a reference on it exists, 0 when freed.
12396  */
12397 static int
12398 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
12399                                     struct mlx5_flow_handle *handle)
12400 {
12401         struct mlx5_priv *priv = dev->data->dev_private;
12402         struct mlx5_flow_dv_dest_array_resource *cache;
12403
12404         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12405                                handle->dvh.rix_dest_array);
12406         if (!cache)
12407                 return 0;
12408         MLX5_ASSERT(cache->action);
12409         return mlx5_cache_unregister(&priv->sh->dest_array_list,
12410                                      &cache->entry);
12411 }
12412
12413 static void
12414 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
12415 {
12416         struct mlx5_priv *priv = dev->data->dev_private;
12417         struct mlx5_dev_ctx_shared *sh = priv->sh;
12418         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
12419                                 sh->geneve_tlv_option_resource;
12420         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
12421         if (geneve_opt_resource) {
12422                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
12423                                          __ATOMIC_RELAXED))) {
12424                         claim_zero(mlx5_devx_cmd_destroy
12425                                         (geneve_opt_resource->obj));
12426                         mlx5_free(sh->geneve_tlv_option_resource);
12427                         sh->geneve_tlv_option_resource = NULL;
12428                 }
12429         }
12430         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
12431 }
12432
12433 /**
12434  * Remove the flow from the NIC but keeps it in memory.
12435  * Lock free, (mutex should be acquired by caller).
12436  *
12437  * @param[in] dev
12438  *   Pointer to Ethernet device.
12439  * @param[in, out] flow
12440  *   Pointer to flow structure.
12441  */
12442 static void
12443 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12444 {
12445         struct mlx5_flow_handle *dh;
12446         uint32_t handle_idx;
12447         struct mlx5_priv *priv = dev->data->dev_private;
12448
12449         if (!flow)
12450                 return;
12451         handle_idx = flow->dev_handles;
12452         while (handle_idx) {
12453                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12454                                     handle_idx);
12455                 if (!dh)
12456                         return;
12457                 if (dh->drv_flow) {
12458                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
12459                         dh->drv_flow = NULL;
12460                 }
12461                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
12462                         flow_dv_fate_resource_release(dev, dh);
12463                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12464                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12465                 handle_idx = dh->next.next;
12466         }
12467 }
12468
12469 /**
12470  * Remove the flow from the NIC and the memory.
12471  * Lock free, (mutex should be acquired by caller).
12472  *
12473  * @param[in] dev
12474  *   Pointer to the Ethernet device structure.
12475  * @param[in, out] flow
12476  *   Pointer to flow structure.
12477  */
12478 static void
12479 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12480 {
12481         struct mlx5_flow_handle *dev_handle;
12482         struct mlx5_priv *priv = dev->data->dev_private;
12483         uint32_t srss = 0;
12484
12485         if (!flow)
12486                 return;
12487         flow_dv_remove(dev, flow);
12488         if (flow->counter) {
12489                 flow_dv_counter_free(dev, flow->counter);
12490                 flow->counter = 0;
12491         }
12492         if (flow->meter) {
12493                 struct mlx5_flow_meter *fm;
12494
12495                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
12496                                     flow->meter);
12497                 if (fm)
12498                         mlx5_flow_meter_detach(fm);
12499                 flow->meter = 0;
12500         }
12501         if (flow->age)
12502                 flow_dv_aso_age_release(dev, flow->age);
12503         if (flow->geneve_tlv_option) {
12504                 flow_dv_geneve_tlv_option_resource_release(dev);
12505                 flow->geneve_tlv_option = 0;
12506         }
12507         while (flow->dev_handles) {
12508                 uint32_t tmp_idx = flow->dev_handles;
12509
12510                 dev_handle = mlx5_ipool_get(priv->sh->ipool
12511                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
12512                 if (!dev_handle)
12513                         return;
12514                 flow->dev_handles = dev_handle->next.next;
12515                 if (dev_handle->dvh.matcher)
12516                         flow_dv_matcher_release(dev, dev_handle);
12517                 if (dev_handle->dvh.rix_sample)
12518                         flow_dv_sample_resource_release(dev, dev_handle);
12519                 if (dev_handle->dvh.rix_dest_array)
12520                         flow_dv_dest_array_resource_release(dev, dev_handle);
12521                 if (dev_handle->dvh.rix_encap_decap)
12522                         flow_dv_encap_decap_resource_release(dev,
12523                                 dev_handle->dvh.rix_encap_decap);
12524                 if (dev_handle->dvh.modify_hdr)
12525                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
12526                 if (dev_handle->dvh.rix_push_vlan)
12527                         flow_dv_push_vlan_action_resource_release(dev,
12528                                                                   dev_handle);
12529                 if (dev_handle->dvh.rix_tag)
12530                         flow_dv_tag_release(dev,
12531                                             dev_handle->dvh.rix_tag);
12532                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
12533                         flow_dv_fate_resource_release(dev, dev_handle);
12534                 else if (!srss)
12535                         srss = dev_handle->rix_srss;
12536                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12537                            tmp_idx);
12538         }
12539         if (srss)
12540                 flow_dv_shared_rss_action_release(dev, srss);
12541 }
12542
12543 /**
12544  * Release array of hash RX queue objects.
12545  * Helper function.
12546  *
12547  * @param[in] dev
12548  *   Pointer to the Ethernet device structure.
12549  * @param[in, out] hrxqs
12550  *   Array of hash RX queue objects.
12551  *
12552  * @return
12553  *   Total number of references to hash RX queue objects in *hrxqs* array
12554  *   after this operation.
12555  */
12556 static int
12557 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
12558                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
12559 {
12560         size_t i;
12561         int remaining = 0;
12562
12563         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
12564                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
12565
12566                 if (!ret)
12567                         (*hrxqs)[i] = 0;
12568                 remaining += ret;
12569         }
12570         return remaining;
12571 }
12572
12573 /**
12574  * Release all hash RX queue objects representing shared RSS action.
12575  *
12576  * @param[in] dev
12577  *   Pointer to the Ethernet device structure.
12578  * @param[in, out] action
12579  *   Shared RSS action to remove hash RX queue objects from.
12580  *
12581  * @return
12582  *   Total number of references to hash RX queue objects stored in *action*
12583  *   after this operation.
12584  *   Expected to be 0 if no external references held.
12585  */
12586 static int
12587 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
12588                                  struct mlx5_shared_action_rss *shared_rss)
12589 {
12590         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) +
12591                 __flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel);
12592 }
12593
12594 /**
12595  * Setup shared RSS action.
12596  * Prepare set of hash RX queue objects sufficient to handle all valid
12597  * hash_fields combinations (see enum ibv_rx_hash_fields).
12598  *
12599  * @param[in] dev
12600  *   Pointer to the Ethernet device structure.
12601  * @param[in] action_idx
12602  *   Shared RSS action ipool index.
12603  * @param[in, out] action
12604  *   Partially initialized shared RSS action.
12605  * @param[out] error
12606  *   Perform verbose error reporting if not NULL. Initialized in case of
12607  *   error only.
12608  *
12609  * @return
12610  *   0 on success, otherwise negative errno value.
12611  */
12612 static int
12613 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
12614                            uint32_t action_idx,
12615                            struct mlx5_shared_action_rss *shared_rss,
12616                            struct rte_flow_error *error)
12617 {
12618         struct mlx5_flow_rss_desc rss_desc = { 0 };
12619         size_t i;
12620         int err;
12621
12622         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
12623                 return rte_flow_error_set(error, rte_errno,
12624                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12625                                           "cannot setup indirection table");
12626         }
12627         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
12628         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
12629         rss_desc.const_q = shared_rss->origin.queue;
12630         rss_desc.queue_num = shared_rss->origin.queue_num;
12631         /* Set non-zero value to indicate a shared RSS. */
12632         rss_desc.shared_rss = action_idx;
12633         rss_desc.ind_tbl = shared_rss->ind_tbl;
12634         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
12635                 uint32_t hrxq_idx;
12636                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
12637                 int tunnel;
12638
12639                 for (tunnel = 0; tunnel < 2; tunnel++) {
12640                         rss_desc.tunnel = tunnel;
12641                         rss_desc.hash_fields = hash_fields;
12642                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
12643                         if (!hrxq_idx) {
12644                                 rte_flow_error_set
12645                                         (error, rte_errno,
12646                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12647                                          "cannot get hash queue");
12648                                 goto error_hrxq_new;
12649                         }
12650                         err = __flow_dv_action_rss_hrxq_set
12651                                 (shared_rss, hash_fields, tunnel, hrxq_idx);
12652                         MLX5_ASSERT(!err);
12653                 }
12654         }
12655         return 0;
12656 error_hrxq_new:
12657         err = rte_errno;
12658         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12659         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
12660                 shared_rss->ind_tbl = NULL;
12661         rte_errno = err;
12662         return -rte_errno;
12663 }
12664
12665 /**
12666  * Create shared RSS action.
12667  *
12668  * @param[in] dev
12669  *   Pointer to the Ethernet device structure.
12670  * @param[in] conf
12671  *   Shared action configuration.
12672  * @param[in] rss
12673  *   RSS action specification used to create shared action.
12674  * @param[out] error
12675  *   Perform verbose error reporting if not NULL. Initialized in case of
12676  *   error only.
12677  *
12678  * @return
12679  *   A valid shared action ID in case of success, 0 otherwise and
12680  *   rte_errno is set.
12681  */
12682 static uint32_t
12683 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
12684                             const struct rte_flow_shared_action_conf *conf,
12685                             const struct rte_flow_action_rss *rss,
12686                             struct rte_flow_error *error)
12687 {
12688         struct mlx5_priv *priv = dev->data->dev_private;
12689         struct mlx5_shared_action_rss *shared_rss = NULL;
12690         void *queue = NULL;
12691         struct rte_flow_action_rss *origin;
12692         const uint8_t *rss_key;
12693         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
12694         uint32_t idx;
12695
12696         RTE_SET_USED(conf);
12697         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
12698                             0, SOCKET_ID_ANY);
12699         shared_rss = mlx5_ipool_zmalloc
12700                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
12701         if (!shared_rss || !queue) {
12702                 rte_flow_error_set(error, ENOMEM,
12703                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12704                                    "cannot allocate resource memory");
12705                 goto error_rss_init;
12706         }
12707         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
12708                 rte_flow_error_set(error, E2BIG,
12709                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12710                                    "rss action number out of range");
12711                 goto error_rss_init;
12712         }
12713         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
12714                                           sizeof(*shared_rss->ind_tbl),
12715                                           0, SOCKET_ID_ANY);
12716         if (!shared_rss->ind_tbl) {
12717                 rte_flow_error_set(error, ENOMEM,
12718                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12719                                    "cannot allocate resource memory");
12720                 goto error_rss_init;
12721         }
12722         memcpy(queue, rss->queue, queue_size);
12723         shared_rss->ind_tbl->queues = queue;
12724         shared_rss->ind_tbl->queues_n = rss->queue_num;
12725         origin = &shared_rss->origin;
12726         origin->func = rss->func;
12727         origin->level = rss->level;
12728         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
12729         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
12730         /* NULL RSS key indicates default RSS key. */
12731         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12732         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12733         origin->key = &shared_rss->key[0];
12734         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
12735         origin->queue = queue;
12736         origin->queue_num = rss->queue_num;
12737         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
12738                 goto error_rss_init;
12739         rte_spinlock_init(&shared_rss->action_rss_sl);
12740         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12741         rte_spinlock_lock(&priv->shared_act_sl);
12742         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12743                      &priv->rss_shared_actions, idx, shared_rss, next);
12744         rte_spinlock_unlock(&priv->shared_act_sl);
12745         return idx;
12746 error_rss_init:
12747         if (shared_rss) {
12748                 if (shared_rss->ind_tbl)
12749                         mlx5_free(shared_rss->ind_tbl);
12750                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12751                                 idx);
12752         }
12753         if (queue)
12754                 mlx5_free(queue);
12755         return 0;
12756 }
12757
12758 /**
12759  * Destroy the shared RSS action.
12760  * Release related hash RX queue objects.
12761  *
12762  * @param[in] dev
12763  *   Pointer to the Ethernet device structure.
12764  * @param[in] idx
12765  *   The shared RSS action object ID to be removed.
12766  * @param[out] error
12767  *   Perform verbose error reporting if not NULL. Initialized in case of
12768  *   error only.
12769  *
12770  * @return
12771  *   0 on success, otherwise negative errno value.
12772  */
12773 static int
12774 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
12775                              struct rte_flow_error *error)
12776 {
12777         struct mlx5_priv *priv = dev->data->dev_private;
12778         struct mlx5_shared_action_rss *shared_rss =
12779             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12780         uint32_t old_refcnt = 1;
12781         int remaining;
12782         uint16_t *queue = NULL;
12783
12784         if (!shared_rss)
12785                 return rte_flow_error_set(error, EINVAL,
12786                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12787                                           "invalid shared action");
12788         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12789         if (remaining)
12790                 return rte_flow_error_set(error, EBUSY,
12791                                           RTE_FLOW_ERROR_TYPE_ACTION,
12792                                           NULL,
12793                                           "shared rss hrxq has references");
12794         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
12795                                          0, 0, __ATOMIC_ACQUIRE,
12796                                          __ATOMIC_RELAXED))
12797                 return rte_flow_error_set(error, EBUSY,
12798                                           RTE_FLOW_ERROR_TYPE_ACTION,
12799                                           NULL,
12800                                           "shared rss has references");
12801         queue = shared_rss->ind_tbl->queues;
12802         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
12803         if (remaining)
12804                 return rte_flow_error_set(error, EBUSY,
12805                                           RTE_FLOW_ERROR_TYPE_ACTION,
12806                                           NULL,
12807                                           "shared rss indirection table has"
12808                                           " references");
12809         mlx5_free(queue);
12810         rte_spinlock_lock(&priv->shared_act_sl);
12811         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12812                      &priv->rss_shared_actions, idx, shared_rss, next);
12813         rte_spinlock_unlock(&priv->shared_act_sl);
12814         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12815                         idx);
12816         return 0;
12817 }
12818
12819 /**
12820  * Create shared action, lock free,
12821  * (mutex should be acquired by caller).
12822  * Dispatcher for action type specific call.
12823  *
12824  * @param[in] dev
12825  *   Pointer to the Ethernet device structure.
12826  * @param[in] conf
12827  *   Shared action configuration.
12828  * @param[in] action
12829  *   Action specification used to create shared action.
12830  * @param[out] error
12831  *   Perform verbose error reporting if not NULL. Initialized in case of
12832  *   error only.
12833  *
12834  * @return
12835  *   A valid shared action handle in case of success, NULL otherwise and
12836  *   rte_errno is set.
12837  */
12838 static struct rte_flow_shared_action *
12839 flow_dv_action_create(struct rte_eth_dev *dev,
12840                       const struct rte_flow_shared_action_conf *conf,
12841                       const struct rte_flow_action *action,
12842                       struct rte_flow_error *err)
12843 {
12844         uint32_t idx = 0;
12845         uint32_t ret = 0;
12846
12847         switch (action->type) {
12848         case RTE_FLOW_ACTION_TYPE_RSS:
12849                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
12850                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
12851                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12852                 break;
12853         case RTE_FLOW_ACTION_TYPE_AGE:
12854                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
12855                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
12856                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12857                 if (ret) {
12858                         struct mlx5_aso_age_action *aso_age =
12859                                               flow_aso_age_get_by_idx(dev, ret);
12860
12861                         if (!aso_age->age_params.context)
12862                                 aso_age->age_params.context =
12863                                                          (void *)(uintptr_t)idx;
12864                 }
12865                 break;
12866         default:
12867                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12868                                    NULL, "action type not supported");
12869                 break;
12870         }
12871         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
12872 }
12873
12874 /**
12875  * Destroy the shared action.
12876  * Release action related resources on the NIC and the memory.
12877  * Lock free, (mutex should be acquired by caller).
12878  * Dispatcher for action type specific call.
12879  *
12880  * @param[in] dev
12881  *   Pointer to the Ethernet device structure.
12882  * @param[in] action
12883  *   The shared action object to be removed.
12884  * @param[out] error
12885  *   Perform verbose error reporting if not NULL. Initialized in case of
12886  *   error only.
12887  *
12888  * @return
12889  *   0 on success, otherwise negative errno value.
12890  */
12891 static int
12892 flow_dv_action_destroy(struct rte_eth_dev *dev,
12893                        struct rte_flow_shared_action *action,
12894                        struct rte_flow_error *error)
12895 {
12896         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12897         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12898         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12899         int ret;
12900
12901         switch (type) {
12902         case MLX5_SHARED_ACTION_TYPE_RSS:
12903                 return __flow_dv_action_rss_release(dev, idx, error);
12904         case MLX5_SHARED_ACTION_TYPE_AGE:
12905                 ret = flow_dv_aso_age_release(dev, idx);
12906                 if (ret)
12907                         /*
12908                          * In this case, the last flow has a reference will
12909                          * actually release the age action.
12910                          */
12911                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
12912                                 " released with references %d.", idx, ret);
12913                 return 0;
12914         default:
12915                 return rte_flow_error_set(error, ENOTSUP,
12916                                           RTE_FLOW_ERROR_TYPE_ACTION,
12917                                           NULL,
12918                                           "action type not supported");
12919         }
12920 }
12921
12922 /**
12923  * Updates in place shared RSS action configuration.
12924  *
12925  * @param[in] dev
12926  *   Pointer to the Ethernet device structure.
12927  * @param[in] idx
12928  *   The shared RSS action object ID to be updated.
12929  * @param[in] action_conf
12930  *   RSS action specification used to modify *shared_rss*.
12931  * @param[out] error
12932  *   Perform verbose error reporting if not NULL. Initialized in case of
12933  *   error only.
12934  *
12935  * @return
12936  *   0 on success, otherwise negative errno value.
12937  * @note: currently only support update of RSS queues.
12938  */
12939 static int
12940 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
12941                             const struct rte_flow_action_rss *action_conf,
12942                             struct rte_flow_error *error)
12943 {
12944         struct mlx5_priv *priv = dev->data->dev_private;
12945         struct mlx5_shared_action_rss *shared_rss =
12946             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12947         int ret = 0;
12948         void *queue = NULL;
12949         uint16_t *queue_old = NULL;
12950         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
12951
12952         if (!shared_rss)
12953                 return rte_flow_error_set(error, EINVAL,
12954                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12955                                           "invalid shared action to update");
12956         queue = mlx5_malloc(MLX5_MEM_ZERO,
12957                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
12958                             0, SOCKET_ID_ANY);
12959         if (!queue)
12960                 return rte_flow_error_set(error, ENOMEM,
12961                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12962                                           NULL,
12963                                           "cannot allocate resource memory");
12964         memcpy(queue, action_conf->queue, queue_size);
12965         MLX5_ASSERT(shared_rss->ind_tbl);
12966         rte_spinlock_lock(&shared_rss->action_rss_sl);
12967         queue_old = shared_rss->ind_tbl->queues;
12968         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
12969                                         queue, action_conf->queue_num, true);
12970         if (ret) {
12971                 mlx5_free(queue);
12972                 ret = rte_flow_error_set(error, rte_errno,
12973                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12974                                           "cannot update indirection table");
12975         } else {
12976                 mlx5_free(queue_old);
12977                 shared_rss->origin.queue = queue;
12978                 shared_rss->origin.queue_num = action_conf->queue_num;
12979         }
12980         rte_spinlock_unlock(&shared_rss->action_rss_sl);
12981         return ret;
12982 }
12983
12984 /**
12985  * Updates in place shared action configuration, lock free,
12986  * (mutex should be acquired by caller).
12987  *
12988  * @param[in] dev
12989  *   Pointer to the Ethernet device structure.
12990  * @param[in] action
12991  *   The shared action object to be updated.
12992  * @param[in] action_conf
12993  *   Action specification used to modify *action*.
12994  *   *action_conf* should be of type correlating with type of the *action*,
12995  *   otherwise considered as invalid.
12996  * @param[out] error
12997  *   Perform verbose error reporting if not NULL. Initialized in case of
12998  *   error only.
12999  *
13000  * @return
13001  *   0 on success, otherwise negative errno value.
13002  */
13003 static int
13004 flow_dv_action_update(struct rte_eth_dev *dev,
13005                         struct rte_flow_shared_action *action,
13006                         const void *action_conf,
13007                         struct rte_flow_error *err)
13008 {
13009         uint32_t act_idx = (uint32_t)(uintptr_t)action;
13010         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
13011         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
13012
13013         switch (type) {
13014         case MLX5_SHARED_ACTION_TYPE_RSS:
13015                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
13016         default:
13017                 return rte_flow_error_set(err, ENOTSUP,
13018                                           RTE_FLOW_ERROR_TYPE_ACTION,
13019                                           NULL,
13020                                           "action type update not supported");
13021         }
13022 }
13023
13024 static int
13025 flow_dv_action_query(struct rte_eth_dev *dev,
13026                      const struct rte_flow_shared_action *action, void *data,
13027                      struct rte_flow_error *error)
13028 {
13029         struct mlx5_age_param *age_param;
13030         struct rte_flow_query_age *resp;
13031         uint32_t act_idx = (uint32_t)(uintptr_t)action;
13032         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
13033         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
13034
13035         switch (type) {
13036         case MLX5_SHARED_ACTION_TYPE_AGE:
13037                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
13038                 resp = data;
13039                 resp->aged = __atomic_load_n(&age_param->state,
13040                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
13041                                                                           1 : 0;
13042                 resp->sec_since_last_hit_valid = !resp->aged;
13043                 if (resp->sec_since_last_hit_valid)
13044                         resp->sec_since_last_hit = __atomic_load_n
13045                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13046                 return 0;
13047         default:
13048                 return rte_flow_error_set(error, ENOTSUP,
13049                                           RTE_FLOW_ERROR_TYPE_ACTION,
13050                                           NULL,
13051                                           "action type query not supported");
13052         }
13053 }
13054
13055 /**
13056  * Query a dv flow  rule for its statistics via devx.
13057  *
13058  * @param[in] dev
13059  *   Pointer to Ethernet device.
13060  * @param[in] flow
13061  *   Pointer to the sub flow.
13062  * @param[out] data
13063  *   data retrieved by the query.
13064  * @param[out] error
13065  *   Perform verbose error reporting if not NULL.
13066  *
13067  * @return
13068  *   0 on success, a negative errno value otherwise and rte_errno is set.
13069  */
13070 static int
13071 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
13072                     void *data, struct rte_flow_error *error)
13073 {
13074         struct mlx5_priv *priv = dev->data->dev_private;
13075         struct rte_flow_query_count *qc = data;
13076
13077         if (!priv->config.devx)
13078                 return rte_flow_error_set(error, ENOTSUP,
13079                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13080                                           NULL,
13081                                           "counters are not supported");
13082         if (flow->counter) {
13083                 uint64_t pkts, bytes;
13084                 struct mlx5_flow_counter *cnt;
13085
13086                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
13087                                                  NULL);
13088                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
13089                                                &bytes);
13090
13091                 if (err)
13092                         return rte_flow_error_set(error, -err,
13093                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13094                                         NULL, "cannot read counters");
13095                 qc->hits_set = 1;
13096                 qc->bytes_set = 1;
13097                 qc->hits = pkts - cnt->hits;
13098                 qc->bytes = bytes - cnt->bytes;
13099                 if (qc->reset) {
13100                         cnt->hits = pkts;
13101                         cnt->bytes = bytes;
13102                 }
13103                 return 0;
13104         }
13105         return rte_flow_error_set(error, EINVAL,
13106                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13107                                   NULL,
13108                                   "counters are not available");
13109 }
13110
13111 /**
13112  * Query a flow rule AGE action for aging information.
13113  *
13114  * @param[in] dev
13115  *   Pointer to Ethernet device.
13116  * @param[in] flow
13117  *   Pointer to the sub flow.
13118  * @param[out] data
13119  *   data retrieved by the query.
13120  * @param[out] error
13121  *   Perform verbose error reporting if not NULL.
13122  *
13123  * @return
13124  *   0 on success, a negative errno value otherwise and rte_errno is set.
13125  */
13126 static int
13127 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
13128                   void *data, struct rte_flow_error *error)
13129 {
13130         struct rte_flow_query_age *resp = data;
13131         struct mlx5_age_param *age_param;
13132
13133         if (flow->age) {
13134                 struct mlx5_aso_age_action *act =
13135                                      flow_aso_age_get_by_idx(dev, flow->age);
13136
13137                 age_param = &act->age_params;
13138         } else if (flow->counter) {
13139                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
13140
13141                 if (!age_param || !age_param->timeout)
13142                         return rte_flow_error_set
13143                                         (error, EINVAL,
13144                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13145                                          NULL, "cannot read age data");
13146         } else {
13147                 return rte_flow_error_set(error, EINVAL,
13148                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13149                                           NULL, "age data not available");
13150         }
13151         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
13152                                      AGE_TMOUT ? 1 : 0;
13153         resp->sec_since_last_hit_valid = !resp->aged;
13154         if (resp->sec_since_last_hit_valid)
13155                 resp->sec_since_last_hit = __atomic_load_n
13156                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13157         return 0;
13158 }
13159
13160 /**
13161  * Query a flow.
13162  *
13163  * @see rte_flow_query()
13164  * @see rte_flow_ops
13165  */
13166 static int
13167 flow_dv_query(struct rte_eth_dev *dev,
13168               struct rte_flow *flow __rte_unused,
13169               const struct rte_flow_action *actions __rte_unused,
13170               void *data __rte_unused,
13171               struct rte_flow_error *error __rte_unused)
13172 {
13173         int ret = -EINVAL;
13174
13175         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
13176                 switch (actions->type) {
13177                 case RTE_FLOW_ACTION_TYPE_VOID:
13178                         break;
13179                 case RTE_FLOW_ACTION_TYPE_COUNT:
13180                         ret = flow_dv_query_count(dev, flow, data, error);
13181                         break;
13182                 case RTE_FLOW_ACTION_TYPE_AGE:
13183                         ret = flow_dv_query_age(dev, flow, data, error);
13184                         break;
13185                 default:
13186                         return rte_flow_error_set(error, ENOTSUP,
13187                                                   RTE_FLOW_ERROR_TYPE_ACTION,
13188                                                   actions,
13189                                                   "action not supported");
13190                 }
13191         }
13192         return ret;
13193 }
13194
13195 /**
13196  * Destroy the meter table set.
13197  * Lock free, (mutex should be acquired by caller).
13198  *
13199  * @param[in] dev
13200  *   Pointer to Ethernet device.
13201  * @param[in] tbl
13202  *   Pointer to the meter table set.
13203  *
13204  * @return
13205  *   Always 0.
13206  */
13207 static int
13208 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
13209                         struct mlx5_meter_domains_infos *tbl)
13210 {
13211         struct mlx5_priv *priv = dev->data->dev_private;
13212         struct mlx5_meter_domains_infos *mtd =
13213                                 (struct mlx5_meter_domains_infos *)tbl;
13214
13215         if (!mtd || !priv->config.dv_flow_en)
13216                 return 0;
13217         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
13218                 claim_zero(mlx5_flow_os_destroy_flow
13219                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
13220         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
13221                 claim_zero(mlx5_flow_os_destroy_flow
13222                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
13223         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
13224                 claim_zero(mlx5_flow_os_destroy_flow
13225                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
13226         if (mtd->egress.color_matcher)
13227                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13228                            (mtd->egress.color_matcher));
13229         if (mtd->egress.any_matcher)
13230                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13231                            (mtd->egress.any_matcher));
13232         if (mtd->egress.tbl)
13233                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
13234         if (mtd->egress.sfx_tbl)
13235                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
13236         if (mtd->ingress.color_matcher)
13237                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13238                            (mtd->ingress.color_matcher));
13239         if (mtd->ingress.any_matcher)
13240                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13241                            (mtd->ingress.any_matcher));
13242         if (mtd->ingress.tbl)
13243                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
13244         if (mtd->ingress.sfx_tbl)
13245                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13246                                              mtd->ingress.sfx_tbl);
13247         if (mtd->transfer.color_matcher)
13248                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13249                            (mtd->transfer.color_matcher));
13250         if (mtd->transfer.any_matcher)
13251                 claim_zero(mlx5_flow_os_destroy_flow_matcher
13252                            (mtd->transfer.any_matcher));
13253         if (mtd->transfer.tbl)
13254                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
13255         if (mtd->transfer.sfx_tbl)
13256                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13257                                              mtd->transfer.sfx_tbl);
13258         if (mtd->drop_actn)
13259                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
13260         mlx5_free(mtd);
13261         return 0;
13262 }
13263
13264 /* Number of meter flow actions, count and jump or count and drop. */
13265 #define METER_ACTIONS 2
13266
13267 /**
13268  * Create specify domain meter table and suffix table.
13269  *
13270  * @param[in] dev
13271  *   Pointer to Ethernet device.
13272  * @param[in,out] mtb
13273  *   Pointer to DV meter table set.
13274  * @param[in] egress
13275  *   Table attribute.
13276  * @param[in] transfer
13277  *   Table attribute.
13278  * @param[in] color_reg_c_idx
13279  *   Reg C index for color match.
13280  *
13281  * @return
13282  *   0 on success, -1 otherwise and rte_errno is set.
13283  */
13284 static int
13285 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
13286                            struct mlx5_meter_domains_infos *mtb,
13287                            uint8_t egress, uint8_t transfer,
13288                            uint32_t color_reg_c_idx)
13289 {
13290         struct mlx5_priv *priv = dev->data->dev_private;
13291         struct mlx5_dev_ctx_shared *sh = priv->sh;
13292         struct mlx5_flow_dv_match_params mask = {
13293                 .size = sizeof(mask.buf),
13294         };
13295         struct mlx5_flow_dv_match_params value = {
13296                 .size = sizeof(value.buf),
13297         };
13298         struct mlx5dv_flow_matcher_attr dv_attr = {
13299                 .type = IBV_FLOW_ATTR_NORMAL,
13300                 .priority = 0,
13301                 .match_criteria_enable = 0,
13302                 .match_mask = (void *)&mask,
13303         };
13304         void *actions[METER_ACTIONS];
13305         struct mlx5_meter_domain_info *dtb;
13306         struct rte_flow_error error;
13307         int i = 0;
13308         int ret;
13309
13310         if (transfer)
13311                 dtb = &mtb->transfer;
13312         else if (egress)
13313                 dtb = &mtb->egress;
13314         else
13315                 dtb = &mtb->ingress;
13316         /* Create the meter table with METER level. */
13317         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
13318                                             egress, transfer, false, NULL, 0,
13319                                             0, &error);
13320         if (!dtb->tbl) {
13321                 DRV_LOG(ERR, "Failed to create meter policer table.");
13322                 return -1;
13323         }
13324         /* Create the meter suffix table with SUFFIX level. */
13325         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
13326                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
13327                                             egress, transfer, false, NULL, 0,
13328                                             0, &error);
13329         if (!dtb->sfx_tbl) {
13330                 DRV_LOG(ERR, "Failed to create meter suffix table.");
13331                 return -1;
13332         }
13333         /* Create matchers, Any and Color. */
13334         dv_attr.priority = 3;
13335         dv_attr.match_criteria_enable = 0;
13336         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
13337                                                &dtb->any_matcher);
13338         if (ret) {
13339                 DRV_LOG(ERR, "Failed to create meter"
13340                              " policer default matcher.");
13341                 goto error_exit;
13342         }
13343         dv_attr.priority = 0;
13344         dv_attr.match_criteria_enable =
13345                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
13346         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
13347                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
13348         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
13349                                                &dtb->color_matcher);
13350         if (ret) {
13351                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
13352                 goto error_exit;
13353         }
13354         if (mtb->count_actns[RTE_MTR_DROPPED])
13355                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
13356         actions[i++] = mtb->drop_actn;
13357         /* Default rule: lowest priority, match any, actions: drop. */
13358         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
13359                                        actions,
13360                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
13361         if (ret) {
13362                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
13363                 goto error_exit;
13364         }
13365         return 0;
13366 error_exit:
13367         return -1;
13368 }
13369
13370 /**
13371  * Create the needed meter and suffix tables.
13372  * Lock free, (mutex should be acquired by caller).
13373  *
13374  * @param[in] dev
13375  *   Pointer to Ethernet device.
13376  * @param[in] fm
13377  *   Pointer to the flow meter.
13378  *
13379  * @return
13380  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
13381  */
13382 static struct mlx5_meter_domains_infos *
13383 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
13384                        const struct mlx5_flow_meter *fm)
13385 {
13386         struct mlx5_priv *priv = dev->data->dev_private;
13387         struct mlx5_meter_domains_infos *mtb;
13388         int ret;
13389         int i;
13390
13391         if (!priv->mtr_en) {
13392                 rte_errno = ENOTSUP;
13393                 return NULL;
13394         }
13395         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
13396         if (!mtb) {
13397                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
13398                 return NULL;
13399         }
13400         /* Create meter count actions */
13401         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
13402                 struct mlx5_flow_counter *cnt;
13403                 if (!fm->policer_stats.cnt[i])
13404                         continue;
13405                 cnt = flow_dv_counter_get_by_idx(dev,
13406                       fm->policer_stats.cnt[i], NULL);
13407                 mtb->count_actns[i] = cnt->action;
13408         }
13409         /* Create drop action. */
13410         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
13411         if (ret) {
13412                 DRV_LOG(ERR, "Failed to create drop action.");
13413                 goto error_exit;
13414         }
13415         /* Egress meter table. */
13416         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
13417         if (ret) {
13418                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
13419                 goto error_exit;
13420         }
13421         /* Ingress meter table. */
13422         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
13423         if (ret) {
13424                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
13425                 goto error_exit;
13426         }
13427         /* FDB meter table. */
13428         if (priv->config.dv_esw_en) {
13429                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
13430                                                  priv->mtr_color_reg);
13431                 if (ret) {
13432                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
13433                         goto error_exit;
13434                 }
13435         }
13436         return mtb;
13437 error_exit:
13438         flow_dv_destroy_mtr_tbl(dev, mtb);
13439         return NULL;
13440 }
13441
13442 /**
13443  * Destroy domain policer rule.
13444  *
13445  * @param[in] dt
13446  *   Pointer to domain table.
13447  */
13448 static void
13449 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
13450 {
13451         int i;
13452
13453         for (i = 0; i < RTE_MTR_DROPPED; i++) {
13454                 if (dt->policer_rules[i]) {
13455                         claim_zero(mlx5_flow_os_destroy_flow
13456                                    (dt->policer_rules[i]));
13457                         dt->policer_rules[i] = NULL;
13458                 }
13459         }
13460         if (dt->jump_actn) {
13461                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
13462                 dt->jump_actn = NULL;
13463         }
13464 }
13465
13466 /**
13467  * Destroy policer rules.
13468  *
13469  * @param[in] dev
13470  *   Pointer to Ethernet device.
13471  * @param[in] fm
13472  *   Pointer to flow meter structure.
13473  * @param[in] attr
13474  *   Pointer to flow attributes.
13475  *
13476  * @return
13477  *   Always 0.
13478  */
13479 static int
13480 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
13481                               const struct mlx5_flow_meter *fm,
13482                               const struct rte_flow_attr *attr)
13483 {
13484         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
13485
13486         if (!mtb)
13487                 return 0;
13488         if (attr->egress)
13489                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
13490         if (attr->ingress)
13491                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
13492         if (attr->transfer)
13493                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
13494         return 0;
13495 }
13496
13497 /**
13498  * Create specify domain meter policer rule.
13499  *
13500  * @param[in] fm
13501  *   Pointer to flow meter structure.
13502  * @param[in] mtb
13503  *   Pointer to DV meter table set.
13504  * @param[in] mtr_reg_c
13505  *   Color match REG_C.
13506  *
13507  * @return
13508  *   0 on success, -1 otherwise.
13509  */
13510 static int
13511 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
13512                                     struct mlx5_meter_domain_info *dtb,
13513                                     uint8_t mtr_reg_c)
13514 {
13515         struct mlx5_flow_dv_match_params matcher = {
13516                 .size = sizeof(matcher.buf),
13517         };
13518         struct mlx5_flow_dv_match_params value = {
13519                 .size = sizeof(value.buf),
13520         };
13521         struct mlx5_meter_domains_infos *mtb = fm->mfts;
13522         void *actions[METER_ACTIONS];
13523         int i;
13524         int ret = 0;
13525
13526         /* Create jump action. */
13527         if (!dtb->jump_actn)
13528                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
13529                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
13530         if (ret) {
13531                 DRV_LOG(ERR, "Failed to create policer jump action.");
13532                 goto error;
13533         }
13534         for (i = 0; i < RTE_MTR_DROPPED; i++) {
13535                 int j = 0;
13536
13537                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
13538                                        rte_col_2_mlx5_col(i), UINT8_MAX);
13539                 if (mtb->count_actns[i])
13540                         actions[j++] = mtb->count_actns[i];
13541                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
13542                         actions[j++] = mtb->drop_actn;
13543                 else
13544                         actions[j++] = dtb->jump_actn;
13545                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
13546                                                (void *)&value, j, actions,
13547                                                &dtb->policer_rules[i]);
13548                 if (ret) {
13549                         DRV_LOG(ERR, "Failed to create policer rule.");
13550                         goto error;
13551                 }
13552         }
13553         return 0;
13554 error:
13555         rte_errno = errno;
13556         return -1;
13557 }
13558
13559 /**
13560  * Create policer rules.
13561  *
13562  * @param[in] dev
13563  *   Pointer to Ethernet device.
13564  * @param[in] fm
13565  *   Pointer to flow meter structure.
13566  * @param[in] attr
13567  *   Pointer to flow attributes.
13568  *
13569  * @return
13570  *   0 on success, -1 otherwise.
13571  */
13572 static int
13573 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
13574                              struct mlx5_flow_meter *fm,
13575                              const struct rte_flow_attr *attr)
13576 {
13577         struct mlx5_priv *priv = dev->data->dev_private;
13578         struct mlx5_meter_domains_infos *mtb = fm->mfts;
13579         int ret;
13580
13581         if (attr->egress) {
13582                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
13583                                                 priv->mtr_color_reg);
13584                 if (ret) {
13585                         DRV_LOG(ERR, "Failed to create egress policer.");
13586                         goto error;
13587                 }
13588         }
13589         if (attr->ingress) {
13590                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
13591                                                 priv->mtr_color_reg);
13592                 if (ret) {
13593                         DRV_LOG(ERR, "Failed to create ingress policer.");
13594                         goto error;
13595                 }
13596         }
13597         if (attr->transfer) {
13598                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
13599                                                 priv->mtr_color_reg);
13600                 if (ret) {
13601                         DRV_LOG(ERR, "Failed to create transfer policer.");
13602                         goto error;
13603                 }
13604         }
13605         return 0;
13606 error:
13607         flow_dv_destroy_policer_rules(dev, fm, attr);
13608         return -1;
13609 }
13610
13611 /**
13612  * Validate the batch counter support in root table.
13613  *
13614  * Create a simple flow with invalid counter and drop action on root table to
13615  * validate if batch counter with offset on root table is supported or not.
13616  *
13617  * @param[in] dev
13618  *   Pointer to rte_eth_dev structure.
13619  *
13620  * @return
13621  *   0 on success, a negative errno value otherwise and rte_errno is set.
13622  */
13623 int
13624 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
13625 {
13626         struct mlx5_priv *priv = dev->data->dev_private;
13627         struct mlx5_dev_ctx_shared *sh = priv->sh;
13628         struct mlx5_flow_dv_match_params mask = {
13629                 .size = sizeof(mask.buf),
13630         };
13631         struct mlx5_flow_dv_match_params value = {
13632                 .size = sizeof(value.buf),
13633         };
13634         struct mlx5dv_flow_matcher_attr dv_attr = {
13635                 .type = IBV_FLOW_ATTR_NORMAL,
13636                 .priority = 0,
13637                 .match_criteria_enable = 0,
13638                 .match_mask = (void *)&mask,
13639         };
13640         void *actions[2] = { 0 };
13641         struct mlx5_flow_tbl_resource *tbl = NULL;
13642         struct mlx5_devx_obj *dcs = NULL;
13643         void *matcher = NULL;
13644         void *flow = NULL;
13645         int ret = -1;
13646
13647         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
13648         if (!tbl)
13649                 goto err;
13650         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
13651         if (!dcs)
13652                 goto err;
13653         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
13654                                                     &actions[0]);
13655         if (ret)
13656                 goto err;
13657         actions[1] = priv->drop_queue.hrxq->action;
13658         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
13659         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
13660                                                &matcher);
13661         if (ret)
13662                 goto err;
13663         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
13664                                        actions, &flow);
13665 err:
13666         /*
13667          * If batch counter with offset is not supported, the driver will not
13668          * validate the invalid offset value, flow create should success.
13669          * In this case, it means batch counter is not supported in root table.
13670          *
13671          * Otherwise, if flow create is failed, counter offset is supported.
13672          */
13673         if (flow) {
13674                 DRV_LOG(INFO, "Batch counter is not supported in root "
13675                               "table. Switch to fallback mode.");
13676                 rte_errno = ENOTSUP;
13677                 ret = -rte_errno;
13678                 claim_zero(mlx5_flow_os_destroy_flow(flow));
13679         } else {
13680                 /* Check matcher to make sure validate fail at flow create. */
13681                 if (!matcher || (matcher && errno != EINVAL))
13682                         DRV_LOG(ERR, "Unexpected error in counter offset "
13683                                      "support detection");
13684                 ret = 0;
13685         }
13686         if (actions[0])
13687                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
13688         if (matcher)
13689                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
13690         if (tbl)
13691                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13692         if (dcs)
13693                 claim_zero(mlx5_devx_cmd_destroy(dcs));
13694         return ret;
13695 }
13696
13697 /**
13698  * Query a devx counter.
13699  *
13700  * @param[in] dev
13701  *   Pointer to the Ethernet device structure.
13702  * @param[in] cnt
13703  *   Index to the flow counter.
13704  * @param[in] clear
13705  *   Set to clear the counter statistics.
13706  * @param[out] pkts
13707  *   The statistics value of packets.
13708  * @param[out] bytes
13709  *   The statistics value of bytes.
13710  *
13711  * @return
13712  *   0 on success, otherwise return -1.
13713  */
13714 static int
13715 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
13716                       uint64_t *pkts, uint64_t *bytes)
13717 {
13718         struct mlx5_priv *priv = dev->data->dev_private;
13719         struct mlx5_flow_counter *cnt;
13720         uint64_t inn_pkts, inn_bytes;
13721         int ret;
13722
13723         if (!priv->config.devx)
13724                 return -1;
13725
13726         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
13727         if (ret)
13728                 return -1;
13729         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
13730         *pkts = inn_pkts - cnt->hits;
13731         *bytes = inn_bytes - cnt->bytes;
13732         if (clear) {
13733                 cnt->hits = inn_pkts;
13734                 cnt->bytes = inn_bytes;
13735         }
13736         return 0;
13737 }
13738
13739 /**
13740  * Get aged-out flows.
13741  *
13742  * @param[in] dev
13743  *   Pointer to the Ethernet device structure.
13744  * @param[in] context
13745  *   The address of an array of pointers to the aged-out flows contexts.
13746  * @param[in] nb_contexts
13747  *   The length of context array pointers.
13748  * @param[out] error
13749  *   Perform verbose error reporting if not NULL. Initialized in case of
13750  *   error only.
13751  *
13752  * @return
13753  *   how many contexts get in success, otherwise negative errno value.
13754  *   if nb_contexts is 0, return the amount of all aged contexts.
13755  *   if nb_contexts is not 0 , return the amount of aged flows reported
13756  *   in the context array.
13757  * @note: only stub for now
13758  */
13759 static int
13760 flow_get_aged_flows(struct rte_eth_dev *dev,
13761                     void **context,
13762                     uint32_t nb_contexts,
13763                     struct rte_flow_error *error)
13764 {
13765         struct mlx5_priv *priv = dev->data->dev_private;
13766         struct mlx5_age_info *age_info;
13767         struct mlx5_age_param *age_param;
13768         struct mlx5_flow_counter *counter;
13769         struct mlx5_aso_age_action *act;
13770         int nb_flows = 0;
13771
13772         if (nb_contexts && !context)
13773                 return rte_flow_error_set(error, EINVAL,
13774                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13775                                           NULL, "empty context");
13776         age_info = GET_PORT_AGE_INFO(priv);
13777         rte_spinlock_lock(&age_info->aged_sl);
13778         LIST_FOREACH(act, &age_info->aged_aso, next) {
13779                 nb_flows++;
13780                 if (nb_contexts) {
13781                         context[nb_flows - 1] =
13782                                                 act->age_params.context;
13783                         if (!(--nb_contexts))
13784                                 break;
13785                 }
13786         }
13787         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
13788                 nb_flows++;
13789                 if (nb_contexts) {
13790                         age_param = MLX5_CNT_TO_AGE(counter);
13791                         context[nb_flows - 1] = age_param->context;
13792                         if (!(--nb_contexts))
13793                                 break;
13794                 }
13795         }
13796         rte_spinlock_unlock(&age_info->aged_sl);
13797         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13798         return nb_flows;
13799 }
13800
13801 /*
13802  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
13803  */
13804 static uint32_t
13805 flow_dv_counter_allocate(struct rte_eth_dev *dev)
13806 {
13807         return flow_dv_counter_alloc(dev, 0);
13808 }
13809
13810 /**
13811  * Validate shared action.
13812  * Dispatcher for action type specific validation.
13813  *
13814  * @param[in] dev
13815  *   Pointer to the Ethernet device structure.
13816  * @param[in] conf
13817  *   Shared action configuration.
13818  * @param[in] action
13819  *   The shared action object to validate.
13820  * @param[out] error
13821  *   Perform verbose error reporting if not NULL. Initialized in case of
13822  *   error only.
13823  *
13824  * @return
13825  *   0 on success, otherwise negative errno value.
13826  */
13827 static int
13828 flow_dv_action_validate(struct rte_eth_dev *dev,
13829                         const struct rte_flow_shared_action_conf *conf,
13830                         const struct rte_flow_action *action,
13831                         struct rte_flow_error *err)
13832 {
13833         struct mlx5_priv *priv = dev->data->dev_private;
13834
13835         RTE_SET_USED(conf);
13836         switch (action->type) {
13837         case RTE_FLOW_ACTION_TYPE_RSS:
13838                 return mlx5_validate_action_rss(dev, action, err);
13839         case RTE_FLOW_ACTION_TYPE_AGE:
13840                 if (!priv->sh->aso_age_mng)
13841                         return rte_flow_error_set(err, ENOTSUP,
13842                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13843                                                 NULL,
13844                                              "shared age action not supported");
13845                 return flow_dv_validate_action_age(0, action, dev, err);
13846         default:
13847                 return rte_flow_error_set(err, ENOTSUP,
13848                                           RTE_FLOW_ERROR_TYPE_ACTION,
13849                                           NULL,
13850                                           "action type not supported");
13851         }
13852 }
13853
13854 static int
13855 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
13856 {
13857         struct mlx5_priv *priv = dev->data->dev_private;
13858         int ret = 0;
13859
13860         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
13861                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
13862                                                 flags);
13863                 if (ret != 0)
13864                         return ret;
13865         }
13866         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
13867                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
13868                 if (ret != 0)
13869                         return ret;
13870         }
13871         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
13872                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
13873                 if (ret != 0)
13874                         return ret;
13875         }
13876         return 0;
13877 }
13878
13879 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
13880         .validate = flow_dv_validate,
13881         .prepare = flow_dv_prepare,
13882         .translate = flow_dv_translate,
13883         .apply = flow_dv_apply,
13884         .remove = flow_dv_remove,
13885         .destroy = flow_dv_destroy,
13886         .query = flow_dv_query,
13887         .create_mtr_tbls = flow_dv_create_mtr_tbl,
13888         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
13889         .create_policer_rules = flow_dv_create_policer_rules,
13890         .destroy_policer_rules = flow_dv_destroy_policer_rules,
13891         .counter_alloc = flow_dv_counter_allocate,
13892         .counter_free = flow_dv_counter_free,
13893         .counter_query = flow_dv_counter_query,
13894         .get_aged_flows = flow_get_aged_flows,
13895         .action_validate = flow_dv_action_validate,
13896         .action_create = flow_dv_action_create,
13897         .action_destroy = flow_dv_action_destroy,
13898         .action_update = flow_dv_action_update,
13899         .action_query = flow_dv_action_query,
13900         .sync_domain = flow_dv_sync_domain,
13901 };
13902
13903 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
13904