net/mlx5: connect meter policy to created flows
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static void
272 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
273                           uint8_t next_protocol, uint64_t *item_flags,
274                           int *tunnel)
275 {
276         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
277                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
278         if (next_protocol == IPPROTO_IPIP) {
279                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
280                 *tunnel = 1;
281         }
282         if (next_protocol == IPPROTO_IPV6) {
283                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
284                 *tunnel = 1;
285         }
286 }
287
288 /* Update VLAN's VID/PCP based on input rte_flow_action.
289  *
290  * @param[in] action
291  *   Pointer to struct rte_flow_action.
292  * @param[out] vlan
293  *   Pointer to struct rte_vlan_hdr.
294  */
295 static void
296 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
297                          struct rte_vlan_hdr *vlan)
298 {
299         uint16_t vlan_tci;
300         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
301                 vlan_tci =
302                     ((const struct rte_flow_action_of_set_vlan_pcp *)
303                                                action->conf)->vlan_pcp;
304                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
306                 vlan->vlan_tci |= vlan_tci;
307         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
308                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
309                 vlan->vlan_tci |= rte_be_to_cpu_16
310                     (((const struct rte_flow_action_of_set_vlan_vid *)
311                                              action->conf)->vlan_vid);
312         }
313 }
314
315 /**
316  * Fetch 1, 2, 3 or 4 byte field from the byte array
317  * and return as unsigned integer in host-endian format.
318  *
319  * @param[in] data
320  *   Pointer to data array.
321  * @param[in] size
322  *   Size of field to extract.
323  *
324  * @return
325  *   converted field in host endian format.
326  */
327 static inline uint32_t
328 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
329 {
330         uint32_t ret;
331
332         switch (size) {
333         case 1:
334                 ret = *data;
335                 break;
336         case 2:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 break;
339         case 3:
340                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
341                 ret = (ret << 8) | *(data + sizeof(uint16_t));
342                 break;
343         case 4:
344                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
345                 break;
346         default:
347                 MLX5_ASSERT(false);
348                 ret = 0;
349                 break;
350         }
351         return ret;
352 }
353
354 /**
355  * Convert modify-header action to DV specification.
356  *
357  * Data length of each action is determined by provided field description
358  * and the item mask. Data bit offset and width of each action is determined
359  * by provided item mask.
360  *
361  * @param[in] item
362  *   Pointer to item specification.
363  * @param[in] field
364  *   Pointer to field modification information.
365  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
366  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
367  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
368  * @param[in] dcopy
369  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
370  *   Negative offset value sets the same offset as source offset.
371  *   size field is ignored, value is taken from source field.
372  * @param[in,out] resource
373  *   Pointer to the modify-header resource.
374  * @param[in] type
375  *   Type of modification.
376  * @param[out] error
377  *   Pointer to the error structure.
378  *
379  * @return
380  *   0 on success, a negative errno value otherwise and rte_errno is set.
381  */
382 static int
383 flow_dv_convert_modify_action(struct rte_flow_item *item,
384                               struct field_modify_info *field,
385                               struct field_modify_info *dcopy,
386                               struct mlx5_flow_dv_modify_hdr_resource *resource,
387                               uint32_t type, struct rte_flow_error *error)
388 {
389         uint32_t i = resource->actions_num;
390         struct mlx5_modification_cmd *actions = resource->actions;
391
392         /*
393          * The item and mask are provided in big-endian format.
394          * The fields should be presented as in big-endian format either.
395          * Mask must be always present, it defines the actual field width.
396          */
397         MLX5_ASSERT(item->mask);
398         MLX5_ASSERT(field->size);
399         do {
400                 unsigned int size_b;
401                 unsigned int off_b;
402                 uint32_t mask;
403                 uint32_t data;
404
405                 if (i >= MLX5_MAX_MODIFY_NUM)
406                         return rte_flow_error_set(error, EINVAL,
407                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
408                                  "too many items to modify");
409                 /* Fetch variable byte size mask from the array. */
410                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
411                                            field->offset, field->size);
412                 if (!mask) {
413                         ++field;
414                         continue;
415                 }
416                 /* Deduce actual data width in bits from mask value. */
417                 off_b = rte_bsf32(mask);
418                 size_b = sizeof(uint32_t) * CHAR_BIT -
419                          off_b - __builtin_clz(mask);
420                 MLX5_ASSERT(size_b);
421                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
422                 actions[i] = (struct mlx5_modification_cmd) {
423                         .action_type = type,
424                         .field = field->id,
425                         .offset = off_b,
426                         .length = size_b,
427                 };
428                 /* Convert entire record to expected big-endian format. */
429                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
430                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
431                         MLX5_ASSERT(dcopy);
432                         actions[i].dst_field = dcopy->id;
433                         actions[i].dst_offset =
434                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
435                         /* Convert entire record to big-endian format. */
436                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
437                         ++dcopy;
438                 } else {
439                         MLX5_ASSERT(item->spec);
440                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
441                                                    field->offset, field->size);
442                         /* Shift out the trailing masked bits from data. */
443                         data = (data & mask) >> off_b;
444                         actions[i].data1 = rte_cpu_to_be_32(data);
445                 }
446                 ++i;
447                 ++field;
448         } while (field->size);
449         if (resource->actions_num == i)
450                 return rte_flow_error_set(error, EINVAL,
451                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
452                                           "invalid modification flow item");
453         resource->actions_num = i;
454         return 0;
455 }
456
457 /**
458  * Convert modify-header set IPv4 address action to DV specification.
459  *
460  * @param[in,out] resource
461  *   Pointer to the modify-header resource.
462  * @param[in] action
463  *   Pointer to action specification.
464  * @param[out] error
465  *   Pointer to the error structure.
466  *
467  * @return
468  *   0 on success, a negative errno value otherwise and rte_errno is set.
469  */
470 static int
471 flow_dv_convert_action_modify_ipv4
472                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
473                          const struct rte_flow_action *action,
474                          struct rte_flow_error *error)
475 {
476         const struct rte_flow_action_set_ipv4 *conf =
477                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
478         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
479         struct rte_flow_item_ipv4 ipv4;
480         struct rte_flow_item_ipv4 ipv4_mask;
481
482         memset(&ipv4, 0, sizeof(ipv4));
483         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
484         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
485                 ipv4.hdr.src_addr = conf->ipv4_addr;
486                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
487         } else {
488                 ipv4.hdr.dst_addr = conf->ipv4_addr;
489                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
490         }
491         item.spec = &ipv4;
492         item.mask = &ipv4_mask;
493         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
494                                              MLX5_MODIFICATION_TYPE_SET, error);
495 }
496
497 /**
498  * Convert modify-header set IPv6 address action to DV specification.
499  *
500  * @param[in,out] resource
501  *   Pointer to the modify-header resource.
502  * @param[in] action
503  *   Pointer to action specification.
504  * @param[out] error
505  *   Pointer to the error structure.
506  *
507  * @return
508  *   0 on success, a negative errno value otherwise and rte_errno is set.
509  */
510 static int
511 flow_dv_convert_action_modify_ipv6
512                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
513                          const struct rte_flow_action *action,
514                          struct rte_flow_error *error)
515 {
516         const struct rte_flow_action_set_ipv6 *conf =
517                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
518         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
519         struct rte_flow_item_ipv6 ipv6;
520         struct rte_flow_item_ipv6 ipv6_mask;
521
522         memset(&ipv6, 0, sizeof(ipv6));
523         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
524         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
525                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
526                        sizeof(ipv6.hdr.src_addr));
527                 memcpy(&ipv6_mask.hdr.src_addr,
528                        &rte_flow_item_ipv6_mask.hdr.src_addr,
529                        sizeof(ipv6.hdr.src_addr));
530         } else {
531                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
532                        sizeof(ipv6.hdr.dst_addr));
533                 memcpy(&ipv6_mask.hdr.dst_addr,
534                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
535                        sizeof(ipv6.hdr.dst_addr));
536         }
537         item.spec = &ipv6;
538         item.mask = &ipv6_mask;
539         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
540                                              MLX5_MODIFICATION_TYPE_SET, error);
541 }
542
543 /**
544  * Convert modify-header set MAC address action to DV specification.
545  *
546  * @param[in,out] resource
547  *   Pointer to the modify-header resource.
548  * @param[in] action
549  *   Pointer to action specification.
550  * @param[out] error
551  *   Pointer to the error structure.
552  *
553  * @return
554  *   0 on success, a negative errno value otherwise and rte_errno is set.
555  */
556 static int
557 flow_dv_convert_action_modify_mac
558                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
559                          const struct rte_flow_action *action,
560                          struct rte_flow_error *error)
561 {
562         const struct rte_flow_action_set_mac *conf =
563                 (const struct rte_flow_action_set_mac *)(action->conf);
564         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
565         struct rte_flow_item_eth eth;
566         struct rte_flow_item_eth eth_mask;
567
568         memset(&eth, 0, sizeof(eth));
569         memset(&eth_mask, 0, sizeof(eth_mask));
570         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
571                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
572                        sizeof(eth.src.addr_bytes));
573                 memcpy(&eth_mask.src.addr_bytes,
574                        &rte_flow_item_eth_mask.src.addr_bytes,
575                        sizeof(eth_mask.src.addr_bytes));
576         } else {
577                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
578                        sizeof(eth.dst.addr_bytes));
579                 memcpy(&eth_mask.dst.addr_bytes,
580                        &rte_flow_item_eth_mask.dst.addr_bytes,
581                        sizeof(eth_mask.dst.addr_bytes));
582         }
583         item.spec = &eth;
584         item.mask = &eth_mask;
585         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
586                                              MLX5_MODIFICATION_TYPE_SET, error);
587 }
588
589 /**
590  * Convert modify-header set VLAN VID action to DV specification.
591  *
592  * @param[in,out] resource
593  *   Pointer to the modify-header resource.
594  * @param[in] action
595  *   Pointer to action specification.
596  * @param[out] error
597  *   Pointer to the error structure.
598  *
599  * @return
600  *   0 on success, a negative errno value otherwise and rte_errno is set.
601  */
602 static int
603 flow_dv_convert_action_modify_vlan_vid
604                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
605                          const struct rte_flow_action *action,
606                          struct rte_flow_error *error)
607 {
608         const struct rte_flow_action_of_set_vlan_vid *conf =
609                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
610         int i = resource->actions_num;
611         struct mlx5_modification_cmd *actions = resource->actions;
612         struct field_modify_info *field = modify_vlan_out_first_vid;
613
614         if (i >= MLX5_MAX_MODIFY_NUM)
615                 return rte_flow_error_set(error, EINVAL,
616                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
617                          "too many items to modify");
618         actions[i] = (struct mlx5_modification_cmd) {
619                 .action_type = MLX5_MODIFICATION_TYPE_SET,
620                 .field = field->id,
621                 .length = field->size,
622                 .offset = field->offset,
623         };
624         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
625         actions[i].data1 = conf->vlan_vid;
626         actions[i].data1 = actions[i].data1 << 16;
627         resource->actions_num = ++i;
628         return 0;
629 }
630
631 /**
632  * Convert modify-header set TP action to DV specification.
633  *
634  * @param[in,out] resource
635  *   Pointer to the modify-header resource.
636  * @param[in] action
637  *   Pointer to action specification.
638  * @param[in] items
639  *   Pointer to rte_flow_item objects list.
640  * @param[in] attr
641  *   Pointer to flow attributes structure.
642  * @param[in] dev_flow
643  *   Pointer to the sub flow.
644  * @param[in] tunnel_decap
645  *   Whether action is after tunnel decapsulation.
646  * @param[out] error
647  *   Pointer to the error structure.
648  *
649  * @return
650  *   0 on success, a negative errno value otherwise and rte_errno is set.
651  */
652 static int
653 flow_dv_convert_action_modify_tp
654                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
655                          const struct rte_flow_action *action,
656                          const struct rte_flow_item *items,
657                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
658                          bool tunnel_decap, struct rte_flow_error *error)
659 {
660         const struct rte_flow_action_set_tp *conf =
661                 (const struct rte_flow_action_set_tp *)(action->conf);
662         struct rte_flow_item item;
663         struct rte_flow_item_udp udp;
664         struct rte_flow_item_udp udp_mask;
665         struct rte_flow_item_tcp tcp;
666         struct rte_flow_item_tcp tcp_mask;
667         struct field_modify_info *field;
668
669         if (!attr->valid)
670                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
671         if (attr->udp) {
672                 memset(&udp, 0, sizeof(udp));
673                 memset(&udp_mask, 0, sizeof(udp_mask));
674                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
675                         udp.hdr.src_port = conf->port;
676                         udp_mask.hdr.src_port =
677                                         rte_flow_item_udp_mask.hdr.src_port;
678                 } else {
679                         udp.hdr.dst_port = conf->port;
680                         udp_mask.hdr.dst_port =
681                                         rte_flow_item_udp_mask.hdr.dst_port;
682                 }
683                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
684                 item.spec = &udp;
685                 item.mask = &udp_mask;
686                 field = modify_udp;
687         } else {
688                 MLX5_ASSERT(attr->tcp);
689                 memset(&tcp, 0, sizeof(tcp));
690                 memset(&tcp_mask, 0, sizeof(tcp_mask));
691                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
692                         tcp.hdr.src_port = conf->port;
693                         tcp_mask.hdr.src_port =
694                                         rte_flow_item_tcp_mask.hdr.src_port;
695                 } else {
696                         tcp.hdr.dst_port = conf->port;
697                         tcp_mask.hdr.dst_port =
698                                         rte_flow_item_tcp_mask.hdr.dst_port;
699                 }
700                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
701                 item.spec = &tcp;
702                 item.mask = &tcp_mask;
703                 field = modify_tcp;
704         }
705         return flow_dv_convert_modify_action(&item, field, NULL, resource,
706                                              MLX5_MODIFICATION_TYPE_SET, error);
707 }
708
709 /**
710  * Convert modify-header set TTL action to DV specification.
711  *
712  * @param[in,out] resource
713  *   Pointer to the modify-header resource.
714  * @param[in] action
715  *   Pointer to action specification.
716  * @param[in] items
717  *   Pointer to rte_flow_item objects list.
718  * @param[in] attr
719  *   Pointer to flow attributes structure.
720  * @param[in] dev_flow
721  *   Pointer to the sub flow.
722  * @param[in] tunnel_decap
723  *   Whether action is after tunnel decapsulation.
724  * @param[out] error
725  *   Pointer to the error structure.
726  *
727  * @return
728  *   0 on success, a negative errno value otherwise and rte_errno is set.
729  */
730 static int
731 flow_dv_convert_action_modify_ttl
732                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
733                          const struct rte_flow_action *action,
734                          const struct rte_flow_item *items,
735                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
736                          bool tunnel_decap, struct rte_flow_error *error)
737 {
738         const struct rte_flow_action_set_ttl *conf =
739                 (const struct rte_flow_action_set_ttl *)(action->conf);
740         struct rte_flow_item item;
741         struct rte_flow_item_ipv4 ipv4;
742         struct rte_flow_item_ipv4 ipv4_mask;
743         struct rte_flow_item_ipv6 ipv6;
744         struct rte_flow_item_ipv6 ipv6_mask;
745         struct field_modify_info *field;
746
747         if (!attr->valid)
748                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
749         if (attr->ipv4) {
750                 memset(&ipv4, 0, sizeof(ipv4));
751                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
752                 ipv4.hdr.time_to_live = conf->ttl_value;
753                 ipv4_mask.hdr.time_to_live = 0xFF;
754                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
755                 item.spec = &ipv4;
756                 item.mask = &ipv4_mask;
757                 field = modify_ipv4;
758         } else {
759                 MLX5_ASSERT(attr->ipv6);
760                 memset(&ipv6, 0, sizeof(ipv6));
761                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
762                 ipv6.hdr.hop_limits = conf->ttl_value;
763                 ipv6_mask.hdr.hop_limits = 0xFF;
764                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
765                 item.spec = &ipv6;
766                 item.mask = &ipv6_mask;
767                 field = modify_ipv6;
768         }
769         return flow_dv_convert_modify_action(&item, field, NULL, resource,
770                                              MLX5_MODIFICATION_TYPE_SET, error);
771 }
772
773 /**
774  * Convert modify-header decrement TTL action to DV specification.
775  *
776  * @param[in,out] resource
777  *   Pointer to the modify-header resource.
778  * @param[in] action
779  *   Pointer to action specification.
780  * @param[in] items
781  *   Pointer to rte_flow_item objects list.
782  * @param[in] attr
783  *   Pointer to flow attributes structure.
784  * @param[in] dev_flow
785  *   Pointer to the sub flow.
786  * @param[in] tunnel_decap
787  *   Whether action is after tunnel decapsulation.
788  * @param[out] error
789  *   Pointer to the error structure.
790  *
791  * @return
792  *   0 on success, a negative errno value otherwise and rte_errno is set.
793  */
794 static int
795 flow_dv_convert_action_modify_dec_ttl
796                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
797                          const struct rte_flow_item *items,
798                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
799                          bool tunnel_decap, struct rte_flow_error *error)
800 {
801         struct rte_flow_item item;
802         struct rte_flow_item_ipv4 ipv4;
803         struct rte_flow_item_ipv4 ipv4_mask;
804         struct rte_flow_item_ipv6 ipv6;
805         struct rte_flow_item_ipv6 ipv6_mask;
806         struct field_modify_info *field;
807
808         if (!attr->valid)
809                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
810         if (attr->ipv4) {
811                 memset(&ipv4, 0, sizeof(ipv4));
812                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
813                 ipv4.hdr.time_to_live = 0xFF;
814                 ipv4_mask.hdr.time_to_live = 0xFF;
815                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
816                 item.spec = &ipv4;
817                 item.mask = &ipv4_mask;
818                 field = modify_ipv4;
819         } else {
820                 MLX5_ASSERT(attr->ipv6);
821                 memset(&ipv6, 0, sizeof(ipv6));
822                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
823                 ipv6.hdr.hop_limits = 0xFF;
824                 ipv6_mask.hdr.hop_limits = 0xFF;
825                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
826                 item.spec = &ipv6;
827                 item.mask = &ipv6_mask;
828                 field = modify_ipv6;
829         }
830         return flow_dv_convert_modify_action(&item, field, NULL, resource,
831                                              MLX5_MODIFICATION_TYPE_ADD, error);
832 }
833
834 /**
835  * Convert modify-header increment/decrement TCP Sequence number
836  * to DV specification.
837  *
838  * @param[in,out] resource
839  *   Pointer to the modify-header resource.
840  * @param[in] action
841  *   Pointer to action specification.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_tcp_seq
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_action *action,
852                          struct rte_flow_error *error)
853 {
854         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
855         uint64_t value = rte_be_to_cpu_32(*conf);
856         struct rte_flow_item item;
857         struct rte_flow_item_tcp tcp;
858         struct rte_flow_item_tcp tcp_mask;
859
860         memset(&tcp, 0, sizeof(tcp));
861         memset(&tcp_mask, 0, sizeof(tcp_mask));
862         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
863                 /*
864                  * The HW has no decrement operation, only increment operation.
865                  * To simulate decrement X from Y using increment operation
866                  * we need to add UINT32_MAX X times to Y.
867                  * Each adding of UINT32_MAX decrements Y by 1.
868                  */
869                 value *= UINT32_MAX;
870         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
871         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
872         item.type = RTE_FLOW_ITEM_TYPE_TCP;
873         item.spec = &tcp;
874         item.mask = &tcp_mask;
875         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
876                                              MLX5_MODIFICATION_TYPE_ADD, error);
877 }
878
879 /**
880  * Convert modify-header increment/decrement TCP Acknowledgment number
881  * to DV specification.
882  *
883  * @param[in,out] resource
884  *   Pointer to the modify-header resource.
885  * @param[in] action
886  *   Pointer to action specification.
887  * @param[out] error
888  *   Pointer to the error structure.
889  *
890  * @return
891  *   0 on success, a negative errno value otherwise and rte_errno is set.
892  */
893 static int
894 flow_dv_convert_action_modify_tcp_ack
895                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
896                          const struct rte_flow_action *action,
897                          struct rte_flow_error *error)
898 {
899         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
900         uint64_t value = rte_be_to_cpu_32(*conf);
901         struct rte_flow_item item;
902         struct rte_flow_item_tcp tcp;
903         struct rte_flow_item_tcp tcp_mask;
904
905         memset(&tcp, 0, sizeof(tcp));
906         memset(&tcp_mask, 0, sizeof(tcp_mask));
907         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
908                 /*
909                  * The HW has no decrement operation, only increment operation.
910                  * To simulate decrement X from Y using increment operation
911                  * we need to add UINT32_MAX X times to Y.
912                  * Each adding of UINT32_MAX decrements Y by 1.
913                  */
914                 value *= UINT32_MAX;
915         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
916         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
917         item.type = RTE_FLOW_ITEM_TYPE_TCP;
918         item.spec = &tcp;
919         item.mask = &tcp_mask;
920         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
921                                              MLX5_MODIFICATION_TYPE_ADD, error);
922 }
923
924 static enum mlx5_modification_field reg_to_field[] = {
925         [REG_NON] = MLX5_MODI_OUT_NONE,
926         [REG_A] = MLX5_MODI_META_DATA_REG_A,
927         [REG_B] = MLX5_MODI_META_DATA_REG_B,
928         [REG_C_0] = MLX5_MODI_META_REG_C_0,
929         [REG_C_1] = MLX5_MODI_META_REG_C_1,
930         [REG_C_2] = MLX5_MODI_META_REG_C_2,
931         [REG_C_3] = MLX5_MODI_META_REG_C_3,
932         [REG_C_4] = MLX5_MODI_META_REG_C_4,
933         [REG_C_5] = MLX5_MODI_META_REG_C_5,
934         [REG_C_6] = MLX5_MODI_META_REG_C_6,
935         [REG_C_7] = MLX5_MODI_META_REG_C_7,
936 };
937
938 /**
939  * Convert register set to DV specification.
940  *
941  * @param[in,out] resource
942  *   Pointer to the modify-header resource.
943  * @param[in] action
944  *   Pointer to action specification.
945  * @param[out] error
946  *   Pointer to the error structure.
947  *
948  * @return
949  *   0 on success, a negative errno value otherwise and rte_errno is set.
950  */
951 static int
952 flow_dv_convert_action_set_reg
953                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
954                          const struct rte_flow_action *action,
955                          struct rte_flow_error *error)
956 {
957         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
958         struct mlx5_modification_cmd *actions = resource->actions;
959         uint32_t i = resource->actions_num;
960
961         if (i >= MLX5_MAX_MODIFY_NUM)
962                 return rte_flow_error_set(error, EINVAL,
963                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
964                                           "too many items to modify");
965         MLX5_ASSERT(conf->id != REG_NON);
966         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
967         actions[i] = (struct mlx5_modification_cmd) {
968                 .action_type = MLX5_MODIFICATION_TYPE_SET,
969                 .field = reg_to_field[conf->id],
970                 .offset = conf->offset,
971                 .length = conf->length,
972         };
973         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
974         actions[i].data1 = rte_cpu_to_be_32(conf->data);
975         ++i;
976         resource->actions_num = i;
977         return 0;
978 }
979
980 /**
981  * Convert SET_TAG action to DV specification.
982  *
983  * @param[in] dev
984  *   Pointer to the rte_eth_dev structure.
985  * @param[in,out] resource
986  *   Pointer to the modify-header resource.
987  * @param[in] conf
988  *   Pointer to action specification.
989  * @param[out] error
990  *   Pointer to the error structure.
991  *
992  * @return
993  *   0 on success, a negative errno value otherwise and rte_errno is set.
994  */
995 static int
996 flow_dv_convert_action_set_tag
997                         (struct rte_eth_dev *dev,
998                          struct mlx5_flow_dv_modify_hdr_resource *resource,
999                          const struct rte_flow_action_set_tag *conf,
1000                          struct rte_flow_error *error)
1001 {
1002         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1003         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1004         struct rte_flow_item item = {
1005                 .spec = &data,
1006                 .mask = &mask,
1007         };
1008         struct field_modify_info reg_c_x[] = {
1009                 [1] = {0, 0, 0},
1010         };
1011         enum mlx5_modification_field reg_type;
1012         int ret;
1013
1014         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1015         if (ret < 0)
1016                 return ret;
1017         MLX5_ASSERT(ret != REG_NON);
1018         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1019         reg_type = reg_to_field[ret];
1020         MLX5_ASSERT(reg_type > 0);
1021         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1022         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1023                                              MLX5_MODIFICATION_TYPE_SET, error);
1024 }
1025
1026 /**
1027  * Convert internal COPY_REG action to DV specification.
1028  *
1029  * @param[in] dev
1030  *   Pointer to the rte_eth_dev structure.
1031  * @param[in,out] res
1032  *   Pointer to the modify-header resource.
1033  * @param[in] action
1034  *   Pointer to action specification.
1035  * @param[out] error
1036  *   Pointer to the error structure.
1037  *
1038  * @return
1039  *   0 on success, a negative errno value otherwise and rte_errno is set.
1040  */
1041 static int
1042 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1043                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1044                                  const struct rte_flow_action *action,
1045                                  struct rte_flow_error *error)
1046 {
1047         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1048         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1049         struct rte_flow_item item = {
1050                 .spec = NULL,
1051                 .mask = &mask,
1052         };
1053         struct field_modify_info reg_src[] = {
1054                 {4, 0, reg_to_field[conf->src]},
1055                 {0, 0, 0},
1056         };
1057         struct field_modify_info reg_dst = {
1058                 .offset = 0,
1059                 .id = reg_to_field[conf->dst],
1060         };
1061         /* Adjust reg_c[0] usage according to reported mask. */
1062         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1063                 struct mlx5_priv *priv = dev->data->dev_private;
1064                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1065
1066                 MLX5_ASSERT(reg_c0);
1067                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1068                 if (conf->dst == REG_C_0) {
1069                         /* Copy to reg_c[0], within mask only. */
1070                         reg_dst.offset = rte_bsf32(reg_c0);
1071                         /*
1072                          * Mask is ignoring the enianness, because
1073                          * there is no conversion in datapath.
1074                          */
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076                         /* Copy from destination lower bits to reg_c[0]. */
1077                         mask = reg_c0 >> reg_dst.offset;
1078 #else
1079                         /* Copy from destination upper bits to reg_c[0]. */
1080                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1081                                           rte_fls_u32(reg_c0));
1082 #endif
1083                 } else {
1084                         mask = rte_cpu_to_be_32(reg_c0);
1085 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1086                         /* Copy from reg_c[0] to destination lower bits. */
1087                         reg_dst.offset = 0;
1088 #else
1089                         /* Copy from reg_c[0] to destination upper bits. */
1090                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1091                                          (rte_fls_u32(reg_c0) -
1092                                           rte_bsf32(reg_c0));
1093 #endif
1094                 }
1095         }
1096         return flow_dv_convert_modify_action(&item,
1097                                              reg_src, &reg_dst, res,
1098                                              MLX5_MODIFICATION_TYPE_COPY,
1099                                              error);
1100 }
1101
1102 /**
1103  * Convert MARK action to DV specification. This routine is used
1104  * in extensive metadata only and requires metadata register to be
1105  * handled. In legacy mode hardware tag resource is engaged.
1106  *
1107  * @param[in] dev
1108  *   Pointer to the rte_eth_dev structure.
1109  * @param[in] conf
1110  *   Pointer to MARK action specification.
1111  * @param[in,out] resource
1112  *   Pointer to the modify-header resource.
1113  * @param[out] error
1114  *   Pointer to the error structure.
1115  *
1116  * @return
1117  *   0 on success, a negative errno value otherwise and rte_errno is set.
1118  */
1119 static int
1120 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1121                             const struct rte_flow_action_mark *conf,
1122                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1123                             struct rte_flow_error *error)
1124 {
1125         struct mlx5_priv *priv = dev->data->dev_private;
1126         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1127                                            priv->sh->dv_mark_mask);
1128         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1129         struct rte_flow_item item = {
1130                 .spec = &data,
1131                 .mask = &mask,
1132         };
1133         struct field_modify_info reg_c_x[] = {
1134                 [1] = {0, 0, 0},
1135         };
1136         int reg;
1137
1138         if (!mask)
1139                 return rte_flow_error_set(error, EINVAL,
1140                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1141                                           NULL, "zero mark action mask");
1142         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1143         if (reg < 0)
1144                 return reg;
1145         MLX5_ASSERT(reg > 0);
1146         if (reg == REG_C_0) {
1147                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1148                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1149
1150                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1151                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1152                 mask = rte_cpu_to_be_32(mask << shl_c0);
1153         }
1154         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1155         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1156                                              MLX5_MODIFICATION_TYPE_SET, error);
1157 }
1158
1159 /**
1160  * Get metadata register index for specified steering domain.
1161  *
1162  * @param[in] dev
1163  *   Pointer to the rte_eth_dev structure.
1164  * @param[in] attr
1165  *   Attributes of flow to determine steering domain.
1166  * @param[out] error
1167  *   Pointer to the error structure.
1168  *
1169  * @return
1170  *   positive index on success, a negative errno value otherwise
1171  *   and rte_errno is set.
1172  */
1173 static enum modify_reg
1174 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1175                          const struct rte_flow_attr *attr,
1176                          struct rte_flow_error *error)
1177 {
1178         int reg =
1179                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1180                                           MLX5_METADATA_FDB :
1181                                             attr->egress ?
1182                                             MLX5_METADATA_TX :
1183                                             MLX5_METADATA_RX, 0, error);
1184         if (reg < 0)
1185                 return rte_flow_error_set(error,
1186                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1187                                           NULL, "unavailable "
1188                                           "metadata register");
1189         return reg;
1190 }
1191
1192 /**
1193  * Convert SET_META action to DV specification.
1194  *
1195  * @param[in] dev
1196  *   Pointer to the rte_eth_dev structure.
1197  * @param[in,out] resource
1198  *   Pointer to the modify-header resource.
1199  * @param[in] attr
1200  *   Attributes of flow that includes this item.
1201  * @param[in] conf
1202  *   Pointer to action specification.
1203  * @param[out] error
1204  *   Pointer to the error structure.
1205  *
1206  * @return
1207  *   0 on success, a negative errno value otherwise and rte_errno is set.
1208  */
1209 static int
1210 flow_dv_convert_action_set_meta
1211                         (struct rte_eth_dev *dev,
1212                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1213                          const struct rte_flow_attr *attr,
1214                          const struct rte_flow_action_set_meta *conf,
1215                          struct rte_flow_error *error)
1216 {
1217         uint32_t data = conf->data;
1218         uint32_t mask = conf->mask;
1219         struct rte_flow_item item = {
1220                 .spec = &data,
1221                 .mask = &mask,
1222         };
1223         struct field_modify_info reg_c_x[] = {
1224                 [1] = {0, 0, 0},
1225         };
1226         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1227
1228         if (reg < 0)
1229                 return reg;
1230         MLX5_ASSERT(reg != REG_NON);
1231         /*
1232          * In datapath code there is no endianness
1233          * coversions for perfromance reasons, all
1234          * pattern conversions are done in rte_flow.
1235          */
1236         if (reg == REG_C_0) {
1237                 struct mlx5_priv *priv = dev->data->dev_private;
1238                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1239                 uint32_t shl_c0;
1240
1241                 MLX5_ASSERT(msk_c0);
1242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1243                 shl_c0 = rte_bsf32(msk_c0);
1244 #else
1245                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1246 #endif
1247                 mask <<= shl_c0;
1248                 data <<= shl_c0;
1249                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1250         }
1251         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1252         /* The routine expects parameters in memory as big-endian ones. */
1253         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1254                                              MLX5_MODIFICATION_TYPE_SET, error);
1255 }
1256
1257 /**
1258  * Convert modify-header set IPv4 DSCP action to DV specification.
1259  *
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] action
1263  *   Pointer to action specification.
1264  * @param[out] error
1265  *   Pointer to the error structure.
1266  *
1267  * @return
1268  *   0 on success, a negative errno value otherwise and rte_errno is set.
1269  */
1270 static int
1271 flow_dv_convert_action_modify_ipv4_dscp
1272                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1273                          const struct rte_flow_action *action,
1274                          struct rte_flow_error *error)
1275 {
1276         const struct rte_flow_action_set_dscp *conf =
1277                 (const struct rte_flow_action_set_dscp *)(action->conf);
1278         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1279         struct rte_flow_item_ipv4 ipv4;
1280         struct rte_flow_item_ipv4 ipv4_mask;
1281
1282         memset(&ipv4, 0, sizeof(ipv4));
1283         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1284         ipv4.hdr.type_of_service = conf->dscp;
1285         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1286         item.spec = &ipv4;
1287         item.mask = &ipv4_mask;
1288         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1289                                              MLX5_MODIFICATION_TYPE_SET, error);
1290 }
1291
1292 /**
1293  * Convert modify-header set IPv6 DSCP action to DV specification.
1294  *
1295  * @param[in,out] resource
1296  *   Pointer to the modify-header resource.
1297  * @param[in] action
1298  *   Pointer to action specification.
1299  * @param[out] error
1300  *   Pointer to the error structure.
1301  *
1302  * @return
1303  *   0 on success, a negative errno value otherwise and rte_errno is set.
1304  */
1305 static int
1306 flow_dv_convert_action_modify_ipv6_dscp
1307                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1308                          const struct rte_flow_action *action,
1309                          struct rte_flow_error *error)
1310 {
1311         const struct rte_flow_action_set_dscp *conf =
1312                 (const struct rte_flow_action_set_dscp *)(action->conf);
1313         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1314         struct rte_flow_item_ipv6 ipv6;
1315         struct rte_flow_item_ipv6 ipv6_mask;
1316
1317         memset(&ipv6, 0, sizeof(ipv6));
1318         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1319         /*
1320          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1321          * rdma-core only accept the DSCP bits byte aligned start from
1322          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1323          * bits in IPv6 case as rdma-core requires byte aligned value.
1324          */
1325         ipv6.hdr.vtc_flow = conf->dscp;
1326         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1327         item.spec = &ipv6;
1328         item.mask = &ipv6_mask;
1329         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1330                                              MLX5_MODIFICATION_TYPE_SET, error);
1331 }
1332
1333 static int
1334 mlx5_flow_item_field_width(enum rte_flow_field_id field)
1335 {
1336         switch (field) {
1337         case RTE_FLOW_FIELD_START:
1338                 return 32;
1339         case RTE_FLOW_FIELD_MAC_DST:
1340         case RTE_FLOW_FIELD_MAC_SRC:
1341                 return 48;
1342         case RTE_FLOW_FIELD_VLAN_TYPE:
1343                 return 16;
1344         case RTE_FLOW_FIELD_VLAN_ID:
1345                 return 12;
1346         case RTE_FLOW_FIELD_MAC_TYPE:
1347                 return 16;
1348         case RTE_FLOW_FIELD_IPV4_DSCP:
1349                 return 6;
1350         case RTE_FLOW_FIELD_IPV4_TTL:
1351                 return 8;
1352         case RTE_FLOW_FIELD_IPV4_SRC:
1353         case RTE_FLOW_FIELD_IPV4_DST:
1354                 return 32;
1355         case RTE_FLOW_FIELD_IPV6_DSCP:
1356                 return 6;
1357         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1358                 return 8;
1359         case RTE_FLOW_FIELD_IPV6_SRC:
1360         case RTE_FLOW_FIELD_IPV6_DST:
1361                 return 128;
1362         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1363         case RTE_FLOW_FIELD_TCP_PORT_DST:
1364                 return 16;
1365         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1366         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1367                 return 32;
1368         case RTE_FLOW_FIELD_TCP_FLAGS:
1369                 return 6;
1370         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1371         case RTE_FLOW_FIELD_UDP_PORT_DST:
1372                 return 16;
1373         case RTE_FLOW_FIELD_VXLAN_VNI:
1374         case RTE_FLOW_FIELD_GENEVE_VNI:
1375                 return 24;
1376         case RTE_FLOW_FIELD_GTP_TEID:
1377         case RTE_FLOW_FIELD_TAG:
1378                 return 32;
1379         case RTE_FLOW_FIELD_MARK:
1380                 return 24;
1381         case RTE_FLOW_FIELD_META:
1382                 return 32;
1383         case RTE_FLOW_FIELD_POINTER:
1384         case RTE_FLOW_FIELD_VALUE:
1385                 return 64;
1386         default:
1387                 MLX5_ASSERT(false);
1388         }
1389         return 0;
1390 }
1391
1392 static void
1393 mlx5_flow_field_id_to_modify_info
1394                 (const struct rte_flow_action_modify_data *data,
1395                  struct field_modify_info *info,
1396                  uint32_t *mask, uint32_t *value,
1397                  uint32_t width, uint32_t dst_width,
1398                  struct rte_eth_dev *dev,
1399                  const struct rte_flow_attr *attr,
1400                  struct rte_flow_error *error)
1401 {
1402         uint32_t idx = 0;
1403         uint64_t val = 0;
1404         switch (data->field) {
1405         case RTE_FLOW_FIELD_START:
1406                 /* not supported yet */
1407                 MLX5_ASSERT(false);
1408                 break;
1409         case RTE_FLOW_FIELD_MAC_DST:
1410                 if (mask) {
1411                         if (data->offset < 32) {
1412                                 info[idx] = (struct field_modify_info){4, 0,
1413                                                 MLX5_MODI_OUT_DMAC_47_16};
1414                                 if (width < 32) {
1415                                         mask[idx] =
1416                                                 rte_cpu_to_be_32(0xffffffff >>
1417                                                                  (32 - width));
1418                                         width = 0;
1419                                 } else {
1420                                         mask[idx] = RTE_BE32(0xffffffff);
1421                                         width -= 32;
1422                                 }
1423                                 if (!width)
1424                                         break;
1425                                 ++idx;
1426                         }
1427                         info[idx] = (struct field_modify_info){2, 4 * idx,
1428                                                 MLX5_MODI_OUT_DMAC_15_0};
1429                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1430                 } else {
1431                         if (data->offset < 32)
1432                                 info[idx++] = (struct field_modify_info){4, 0,
1433                                                 MLX5_MODI_OUT_DMAC_47_16};
1434                         info[idx] = (struct field_modify_info){2, 0,
1435                                                 MLX5_MODI_OUT_DMAC_15_0};
1436                 }
1437                 break;
1438         case RTE_FLOW_FIELD_MAC_SRC:
1439                 if (mask) {
1440                         if (data->offset < 32) {
1441                                 info[idx] = (struct field_modify_info){4, 0,
1442                                                 MLX5_MODI_OUT_SMAC_47_16};
1443                                 if (width < 32) {
1444                                         mask[idx] =
1445                                                 rte_cpu_to_be_32(0xffffffff >>
1446                                                                 (32 - width));
1447                                         width = 0;
1448                                 } else {
1449                                         mask[idx] = RTE_BE32(0xffffffff);
1450                                         width -= 32;
1451                                 }
1452                                 if (!width)
1453                                         break;
1454                                 ++idx;
1455                         }
1456                         info[idx] = (struct field_modify_info){2, 4 * idx,
1457                                                 MLX5_MODI_OUT_SMAC_15_0};
1458                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1459                 } else {
1460                         if (data->offset < 32)
1461                                 info[idx++] = (struct field_modify_info){4, 0,
1462                                                 MLX5_MODI_OUT_SMAC_47_16};
1463                         info[idx] = (struct field_modify_info){2, 0,
1464                                                 MLX5_MODI_OUT_SMAC_15_0};
1465                 }
1466                 break;
1467         case RTE_FLOW_FIELD_VLAN_TYPE:
1468                 /* not supported yet */
1469                 break;
1470         case RTE_FLOW_FIELD_VLAN_ID:
1471                 info[idx] = (struct field_modify_info){2, 0,
1472                                         MLX5_MODI_OUT_FIRST_VID};
1473                 if (mask)
1474                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1475                 break;
1476         case RTE_FLOW_FIELD_MAC_TYPE:
1477                 info[idx] = (struct field_modify_info){2, 0,
1478                                         MLX5_MODI_OUT_ETHERTYPE};
1479                 if (mask)
1480                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1481                 break;
1482         case RTE_FLOW_FIELD_IPV4_DSCP:
1483                 info[idx] = (struct field_modify_info){1, 0,
1484                                         MLX5_MODI_OUT_IP_DSCP};
1485                 if (mask)
1486                         mask[idx] = 0x3f >> (6 - width);
1487                 break;
1488         case RTE_FLOW_FIELD_IPV4_TTL:
1489                 info[idx] = (struct field_modify_info){1, 0,
1490                                         MLX5_MODI_OUT_IPV4_TTL};
1491                 if (mask)
1492                         mask[idx] = 0xff >> (8 - width);
1493                 break;
1494         case RTE_FLOW_FIELD_IPV4_SRC:
1495                 info[idx] = (struct field_modify_info){4, 0,
1496                                         MLX5_MODI_OUT_SIPV4};
1497                 if (mask)
1498                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1499                                                      (32 - width));
1500                 break;
1501         case RTE_FLOW_FIELD_IPV4_DST:
1502                 info[idx] = (struct field_modify_info){4, 0,
1503                                         MLX5_MODI_OUT_DIPV4};
1504                 if (mask)
1505                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1506                                                      (32 - width));
1507                 break;
1508         case RTE_FLOW_FIELD_IPV6_DSCP:
1509                 info[idx] = (struct field_modify_info){1, 0,
1510                                         MLX5_MODI_OUT_IP_DSCP};
1511                 if (mask)
1512                         mask[idx] = 0x3f >> (6 - width);
1513                 break;
1514         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1515                 info[idx] = (struct field_modify_info){1, 0,
1516                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1517                 if (mask)
1518                         mask[idx] = 0xff >> (8 - width);
1519                 break;
1520         case RTE_FLOW_FIELD_IPV6_SRC:
1521                 if (mask) {
1522                         if (data->offset < 32) {
1523                                 info[idx] = (struct field_modify_info){4,
1524                                                 4 * idx,
1525                                                 MLX5_MODI_OUT_SIPV6_31_0};
1526                                 if (width < 32) {
1527                                         mask[idx] =
1528                                                 rte_cpu_to_be_32(0xffffffff >>
1529                                                                  (32 - width));
1530                                         width = 0;
1531                                 } else {
1532                                         mask[idx] = RTE_BE32(0xffffffff);
1533                                         width -= 32;
1534                                 }
1535                                 if (!width)
1536                                         break;
1537                                 ++idx;
1538                         }
1539                         if (data->offset < 64) {
1540                                 info[idx] = (struct field_modify_info){4,
1541                                                 4 * idx,
1542                                                 MLX5_MODI_OUT_SIPV6_63_32};
1543                                 if (width < 32) {
1544                                         mask[idx] =
1545                                                 rte_cpu_to_be_32(0xffffffff >>
1546                                                                  (32 - width));
1547                                         width = 0;
1548                                 } else {
1549                                         mask[idx] = RTE_BE32(0xffffffff);
1550                                         width -= 32;
1551                                 }
1552                                 if (!width)
1553                                         break;
1554                                 ++idx;
1555                         }
1556                         if (data->offset < 96) {
1557                                 info[idx] = (struct field_modify_info){4,
1558                                                 4 * idx,
1559                                                 MLX5_MODI_OUT_SIPV6_95_64};
1560                                 if (width < 32) {
1561                                         mask[idx] =
1562                                                 rte_cpu_to_be_32(0xffffffff >>
1563                                                                  (32 - width));
1564                                         width = 0;
1565                                 } else {
1566                                         mask[idx] = RTE_BE32(0xffffffff);
1567                                         width -= 32;
1568                                 }
1569                                 if (!width)
1570                                         break;
1571                                 ++idx;
1572                         }
1573                         info[idx] = (struct field_modify_info){4, 4 * idx,
1574                                                 MLX5_MODI_OUT_SIPV6_127_96};
1575                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1576                                                      (32 - width));
1577                 } else {
1578                         if (data->offset < 32)
1579                                 info[idx++] = (struct field_modify_info){4, 0,
1580                                                 MLX5_MODI_OUT_SIPV6_31_0};
1581                         if (data->offset < 64)
1582                                 info[idx++] = (struct field_modify_info){4, 0,
1583                                                 MLX5_MODI_OUT_SIPV6_63_32};
1584                         if (data->offset < 96)
1585                                 info[idx++] = (struct field_modify_info){4, 0,
1586                                                 MLX5_MODI_OUT_SIPV6_95_64};
1587                         if (data->offset < 128)
1588                                 info[idx++] = (struct field_modify_info){4, 0,
1589                                                 MLX5_MODI_OUT_SIPV6_127_96};
1590                 }
1591                 break;
1592         case RTE_FLOW_FIELD_IPV6_DST:
1593                 if (mask) {
1594                         if (data->offset < 32) {
1595                                 info[idx] = (struct field_modify_info){4,
1596                                                 4 * idx,
1597                                                 MLX5_MODI_OUT_DIPV6_31_0};
1598                                 if (width < 32) {
1599                                         mask[idx] =
1600                                                 rte_cpu_to_be_32(0xffffffff >>
1601                                                                  (32 - width));
1602                                         width = 0;
1603                                 } else {
1604                                         mask[idx] = RTE_BE32(0xffffffff);
1605                                         width -= 32;
1606                                 }
1607                                 if (!width)
1608                                         break;
1609                                 ++idx;
1610                         }
1611                         if (data->offset < 64) {
1612                                 info[idx] = (struct field_modify_info){4,
1613                                                 4 * idx,
1614                                                 MLX5_MODI_OUT_DIPV6_63_32};
1615                                 if (width < 32) {
1616                                         mask[idx] =
1617                                                 rte_cpu_to_be_32(0xffffffff >>
1618                                                                  (32 - width));
1619                                         width = 0;
1620                                 } else {
1621                                         mask[idx] = RTE_BE32(0xffffffff);
1622                                         width -= 32;
1623                                 }
1624                                 if (!width)
1625                                         break;
1626                                 ++idx;
1627                         }
1628                         if (data->offset < 96) {
1629                                 info[idx] = (struct field_modify_info){4,
1630                                                 4 * idx,
1631                                                 MLX5_MODI_OUT_DIPV6_95_64};
1632                                 if (width < 32) {
1633                                         mask[idx] =
1634                                                 rte_cpu_to_be_32(0xffffffff >>
1635                                                                  (32 - width));
1636                                         width = 0;
1637                                 } else {
1638                                         mask[idx] = RTE_BE32(0xffffffff);
1639                                         width -= 32;
1640                                 }
1641                                 if (!width)
1642                                         break;
1643                                 ++idx;
1644                         }
1645                         info[idx] = (struct field_modify_info){4, 4 * idx,
1646                                                 MLX5_MODI_OUT_DIPV6_127_96};
1647                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1648                                                      (32 - width));
1649                 } else {
1650                         if (data->offset < 32)
1651                                 info[idx++] = (struct field_modify_info){4, 0,
1652                                                 MLX5_MODI_OUT_DIPV6_31_0};
1653                         if (data->offset < 64)
1654                                 info[idx++] = (struct field_modify_info){4, 0,
1655                                                 MLX5_MODI_OUT_DIPV6_63_32};
1656                         if (data->offset < 96)
1657                                 info[idx++] = (struct field_modify_info){4, 0,
1658                                                 MLX5_MODI_OUT_DIPV6_95_64};
1659                         if (data->offset < 128)
1660                                 info[idx++] = (struct field_modify_info){4, 0,
1661                                                 MLX5_MODI_OUT_DIPV6_127_96};
1662                 }
1663                 break;
1664         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1665                 info[idx] = (struct field_modify_info){2, 0,
1666                                         MLX5_MODI_OUT_TCP_SPORT};
1667                 if (mask)
1668                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1669                 break;
1670         case RTE_FLOW_FIELD_TCP_PORT_DST:
1671                 info[idx] = (struct field_modify_info){2, 0,
1672                                         MLX5_MODI_OUT_TCP_DPORT};
1673                 if (mask)
1674                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1675                 break;
1676         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1677                 info[idx] = (struct field_modify_info){4, 0,
1678                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1679                 if (mask)
1680                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1681                                                      (32 - width));
1682                 break;
1683         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1684                 info[idx] = (struct field_modify_info){4, 0,
1685                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1686                 if (mask)
1687                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1688                                                      (32 - width));
1689                 break;
1690         case RTE_FLOW_FIELD_TCP_FLAGS:
1691                 info[idx] = (struct field_modify_info){1, 0,
1692                                         MLX5_MODI_OUT_TCP_FLAGS};
1693                 if (mask)
1694                         mask[idx] = 0x3f >> (6 - width);
1695                 break;
1696         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1697                 info[idx] = (struct field_modify_info){2, 0,
1698                                         MLX5_MODI_OUT_UDP_SPORT};
1699                 if (mask)
1700                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1701                 break;
1702         case RTE_FLOW_FIELD_UDP_PORT_DST:
1703                 info[idx] = (struct field_modify_info){2, 0,
1704                                         MLX5_MODI_OUT_UDP_DPORT};
1705                 if (mask)
1706                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1707                 break;
1708         case RTE_FLOW_FIELD_VXLAN_VNI:
1709                 /* not supported yet */
1710                 break;
1711         case RTE_FLOW_FIELD_GENEVE_VNI:
1712                 /* not supported yet*/
1713                 break;
1714         case RTE_FLOW_FIELD_GTP_TEID:
1715                 info[idx] = (struct field_modify_info){4, 0,
1716                                         MLX5_MODI_GTP_TEID};
1717                 if (mask)
1718                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1719                                                      (32 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TAG:
1722                 {
1723                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1724                                                    data->level, error);
1725                         if (reg < 0)
1726                                 return;
1727                         MLX5_ASSERT(reg != REG_NON);
1728                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1729                         info[idx] = (struct field_modify_info){4, 0,
1730                                                 reg_to_field[reg]};
1731                         if (mask)
1732                                 mask[idx] =
1733                                         rte_cpu_to_be_32(0xffffffff >>
1734                                                          (32 - width));
1735                 }
1736                 break;
1737         case RTE_FLOW_FIELD_MARK:
1738                 {
1739                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1740                                                        0, error);
1741                         if (reg < 0)
1742                                 return;
1743                         MLX5_ASSERT(reg != REG_NON);
1744                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1745                         info[idx] = (struct field_modify_info){4, 0,
1746                                                 reg_to_field[reg]};
1747                         if (mask)
1748                                 mask[idx] =
1749                                         rte_cpu_to_be_32(0xffffffff >>
1750                                                          (32 - width));
1751                 }
1752                 break;
1753         case RTE_FLOW_FIELD_META:
1754                 {
1755                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1756                         if (reg < 0)
1757                                 return;
1758                         MLX5_ASSERT(reg != REG_NON);
1759                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1760                         info[idx] = (struct field_modify_info){4, 0,
1761                                                 reg_to_field[reg]};
1762                         if (mask)
1763                                 mask[idx] =
1764                                         rte_cpu_to_be_32(0xffffffff >>
1765                                                          (32 - width));
1766                 }
1767                 break;
1768         case RTE_FLOW_FIELD_POINTER:
1769         case RTE_FLOW_FIELD_VALUE:
1770                 if (data->field == RTE_FLOW_FIELD_POINTER)
1771                         memcpy(&val, (void *)(uintptr_t)data->value,
1772                                sizeof(uint64_t));
1773                 else
1774                         val = data->value;
1775                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1776                         if (mask[idx]) {
1777                                 if (dst_width > 16) {
1778                                         value[idx] = rte_cpu_to_be_32(val);
1779                                         val >>= 32;
1780                                 } else if (dst_width > 8) {
1781                                         value[idx] = rte_cpu_to_be_16(val);
1782                                         val >>= 16;
1783                                 } else {
1784                                         value[idx] = (uint8_t)val;
1785                                         val >>= 8;
1786                                 }
1787                                 if (!val)
1788                                         break;
1789                         }
1790                 }
1791                 break;
1792         default:
1793                 MLX5_ASSERT(false);
1794                 break;
1795         }
1796 }
1797
1798 /**
1799  * Convert modify_field action to DV specification.
1800  *
1801  * @param[in] dev
1802  *   Pointer to the rte_eth_dev structure.
1803  * @param[in,out] resource
1804  *   Pointer to the modify-header resource.
1805  * @param[in] action
1806  *   Pointer to action specification.
1807  * @param[in] attr
1808  *   Attributes of flow that includes this item.
1809  * @param[out] error
1810  *   Pointer to the error structure.
1811  *
1812  * @return
1813  *   0 on success, a negative errno value otherwise and rte_errno is set.
1814  */
1815 static int
1816 flow_dv_convert_action_modify_field
1817                         (struct rte_eth_dev *dev,
1818                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1819                          const struct rte_flow_action *action,
1820                          const struct rte_flow_attr *attr,
1821                          struct rte_flow_error *error)
1822 {
1823         const struct rte_flow_action_modify_field *conf =
1824                 (const struct rte_flow_action_modify_field *)(action->conf);
1825         struct rte_flow_item item;
1826         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1827                                                                 {0, 0, 0} };
1828         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1829                                                                 {0, 0, 0} };
1830         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1831         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1832         uint32_t type;
1833         uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
1834
1835         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1836                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1837                 type = MLX5_MODIFICATION_TYPE_SET;
1838                 /** For SET fill the destination field (field) first. */
1839                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1840                         value, conf->width, dst_width, dev, attr, error);
1841                 /** Then copy immediate value from source as per mask. */
1842                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1843                         value, conf->width, dst_width, dev, attr, error);
1844                 item.spec = &value;
1845         } else {
1846                 type = MLX5_MODIFICATION_TYPE_COPY;
1847                 /** For COPY fill the destination field (dcopy) without mask. */
1848                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1849                         value, conf->width, dst_width, dev, attr, error);
1850                 /** Then construct the source field (field) with mask. */
1851                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1852                         value, conf->width, dst_width, dev, attr, error);
1853         }
1854         item.mask = &mask;
1855         return flow_dv_convert_modify_action(&item,
1856                         field, dcopy, resource, type, error);
1857 }
1858
1859 /**
1860  * Validate MARK item.
1861  *
1862  * @param[in] dev
1863  *   Pointer to the rte_eth_dev structure.
1864  * @param[in] item
1865  *   Item specification.
1866  * @param[in] attr
1867  *   Attributes of flow that includes this item.
1868  * @param[out] error
1869  *   Pointer to error structure.
1870  *
1871  * @return
1872  *   0 on success, a negative errno value otherwise and rte_errno is set.
1873  */
1874 static int
1875 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1876                            const struct rte_flow_item *item,
1877                            const struct rte_flow_attr *attr __rte_unused,
1878                            struct rte_flow_error *error)
1879 {
1880         struct mlx5_priv *priv = dev->data->dev_private;
1881         struct mlx5_dev_config *config = &priv->config;
1882         const struct rte_flow_item_mark *spec = item->spec;
1883         const struct rte_flow_item_mark *mask = item->mask;
1884         const struct rte_flow_item_mark nic_mask = {
1885                 .id = priv->sh->dv_mark_mask,
1886         };
1887         int ret;
1888
1889         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1890                 return rte_flow_error_set(error, ENOTSUP,
1891                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1892                                           "extended metadata feature"
1893                                           " isn't enabled");
1894         if (!mlx5_flow_ext_mreg_supported(dev))
1895                 return rte_flow_error_set(error, ENOTSUP,
1896                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1897                                           "extended metadata register"
1898                                           " isn't supported");
1899         if (!nic_mask.id)
1900                 return rte_flow_error_set(error, ENOTSUP,
1901                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1902                                           "extended metadata register"
1903                                           " isn't available");
1904         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1905         if (ret < 0)
1906                 return ret;
1907         if (!spec)
1908                 return rte_flow_error_set(error, EINVAL,
1909                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1910                                           item->spec,
1911                                           "data cannot be empty");
1912         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1913                 return rte_flow_error_set(error, EINVAL,
1914                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1915                                           &spec->id,
1916                                           "mark id exceeds the limit");
1917         if (!mask)
1918                 mask = &nic_mask;
1919         if (!mask->id)
1920                 return rte_flow_error_set(error, EINVAL,
1921                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1922                                         "mask cannot be zero");
1923
1924         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1925                                         (const uint8_t *)&nic_mask,
1926                                         sizeof(struct rte_flow_item_mark),
1927                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1928         if (ret < 0)
1929                 return ret;
1930         return 0;
1931 }
1932
1933 /**
1934  * Validate META item.
1935  *
1936  * @param[in] dev
1937  *   Pointer to the rte_eth_dev structure.
1938  * @param[in] item
1939  *   Item specification.
1940  * @param[in] attr
1941  *   Attributes of flow that includes this item.
1942  * @param[out] error
1943  *   Pointer to error structure.
1944  *
1945  * @return
1946  *   0 on success, a negative errno value otherwise and rte_errno is set.
1947  */
1948 static int
1949 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1950                            const struct rte_flow_item *item,
1951                            const struct rte_flow_attr *attr,
1952                            struct rte_flow_error *error)
1953 {
1954         struct mlx5_priv *priv = dev->data->dev_private;
1955         struct mlx5_dev_config *config = &priv->config;
1956         const struct rte_flow_item_meta *spec = item->spec;
1957         const struct rte_flow_item_meta *mask = item->mask;
1958         struct rte_flow_item_meta nic_mask = {
1959                 .data = UINT32_MAX
1960         };
1961         int reg;
1962         int ret;
1963
1964         if (!spec)
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1967                                           item->spec,
1968                                           "data cannot be empty");
1969         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1970                 if (!mlx5_flow_ext_mreg_supported(dev))
1971                         return rte_flow_error_set(error, ENOTSUP,
1972                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1973                                           "extended metadata register"
1974                                           " isn't supported");
1975                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1976                 if (reg < 0)
1977                         return reg;
1978                 if (reg == REG_NON)
1979                         return rte_flow_error_set(error, ENOTSUP,
1980                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                         "unavalable extended metadata register");
1982                 if (reg == REG_B)
1983                         return rte_flow_error_set(error, ENOTSUP,
1984                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1985                                           "match on reg_b "
1986                                           "isn't supported");
1987                 if (reg != REG_A)
1988                         nic_mask.data = priv->sh->dv_meta_mask;
1989         } else {
1990                 if (attr->transfer)
1991                         return rte_flow_error_set(error, ENOTSUP,
1992                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1993                                         "extended metadata feature "
1994                                         "should be enabled when "
1995                                         "meta item is requested "
1996                                         "with e-switch mode ");
1997                 if (attr->ingress)
1998                         return rte_flow_error_set(error, ENOTSUP,
1999                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2000                                         "match on metadata for ingress "
2001                                         "is not supported in legacy "
2002                                         "metadata mode");
2003         }
2004         if (!mask)
2005                 mask = &rte_flow_item_meta_mask;
2006         if (!mask->data)
2007                 return rte_flow_error_set(error, EINVAL,
2008                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2009                                         "mask cannot be zero");
2010
2011         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2012                                         (const uint8_t *)&nic_mask,
2013                                         sizeof(struct rte_flow_item_meta),
2014                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2015         return ret;
2016 }
2017
2018 /**
2019  * Validate TAG item.
2020  *
2021  * @param[in] dev
2022  *   Pointer to the rte_eth_dev structure.
2023  * @param[in] item
2024  *   Item specification.
2025  * @param[in] attr
2026  *   Attributes of flow that includes this item.
2027  * @param[out] error
2028  *   Pointer to error structure.
2029  *
2030  * @return
2031  *   0 on success, a negative errno value otherwise and rte_errno is set.
2032  */
2033 static int
2034 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2035                           const struct rte_flow_item *item,
2036                           const struct rte_flow_attr *attr __rte_unused,
2037                           struct rte_flow_error *error)
2038 {
2039         const struct rte_flow_item_tag *spec = item->spec;
2040         const struct rte_flow_item_tag *mask = item->mask;
2041         const struct rte_flow_item_tag nic_mask = {
2042                 .data = RTE_BE32(UINT32_MAX),
2043                 .index = 0xff,
2044         };
2045         int ret;
2046
2047         if (!mlx5_flow_ext_mreg_supported(dev))
2048                 return rte_flow_error_set(error, ENOTSUP,
2049                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2050                                           "extensive metadata register"
2051                                           " isn't supported");
2052         if (!spec)
2053                 return rte_flow_error_set(error, EINVAL,
2054                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2055                                           item->spec,
2056                                           "data cannot be empty");
2057         if (!mask)
2058                 mask = &rte_flow_item_tag_mask;
2059         if (!mask->data)
2060                 return rte_flow_error_set(error, EINVAL,
2061                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2062                                         "mask cannot be zero");
2063
2064         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2065                                         (const uint8_t *)&nic_mask,
2066                                         sizeof(struct rte_flow_item_tag),
2067                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2068         if (ret < 0)
2069                 return ret;
2070         if (mask->index != 0xff)
2071                 return rte_flow_error_set(error, EINVAL,
2072                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2073                                           "partial mask for tag index"
2074                                           " is not supported");
2075         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2076         if (ret < 0)
2077                 return ret;
2078         MLX5_ASSERT(ret != REG_NON);
2079         return 0;
2080 }
2081
2082 /**
2083  * Validate vport item.
2084  *
2085  * @param[in] dev
2086  *   Pointer to the rte_eth_dev structure.
2087  * @param[in] item
2088  *   Item specification.
2089  * @param[in] attr
2090  *   Attributes of flow that includes this item.
2091  * @param[in] item_flags
2092  *   Bit-fields that holds the items detected until now.
2093  * @param[out] error
2094  *   Pointer to error structure.
2095  *
2096  * @return
2097  *   0 on success, a negative errno value otherwise and rte_errno is set.
2098  */
2099 static int
2100 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2101                               const struct rte_flow_item *item,
2102                               const struct rte_flow_attr *attr,
2103                               uint64_t item_flags,
2104                               struct rte_flow_error *error)
2105 {
2106         const struct rte_flow_item_port_id *spec = item->spec;
2107         const struct rte_flow_item_port_id *mask = item->mask;
2108         const struct rte_flow_item_port_id switch_mask = {
2109                         .id = 0xffffffff,
2110         };
2111         struct mlx5_priv *esw_priv;
2112         struct mlx5_priv *dev_priv;
2113         int ret;
2114
2115         if (!attr->transfer)
2116                 return rte_flow_error_set(error, EINVAL,
2117                                           RTE_FLOW_ERROR_TYPE_ITEM,
2118                                           NULL,
2119                                           "match on port id is valid only"
2120                                           " when transfer flag is enabled");
2121         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2122                 return rte_flow_error_set(error, ENOTSUP,
2123                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2124                                           "multiple source ports are not"
2125                                           " supported");
2126         if (!mask)
2127                 mask = &switch_mask;
2128         if (mask->id != 0xffffffff)
2129                 return rte_flow_error_set(error, ENOTSUP,
2130                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2131                                            mask,
2132                                            "no support for partial mask on"
2133                                            " \"id\" field");
2134         ret = mlx5_flow_item_acceptable
2135                                 (item, (const uint8_t *)mask,
2136                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2137                                  sizeof(struct rte_flow_item_port_id),
2138                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2139         if (ret)
2140                 return ret;
2141         if (!spec)
2142                 return 0;
2143         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2144         if (!esw_priv)
2145                 return rte_flow_error_set(error, rte_errno,
2146                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2147                                           "failed to obtain E-Switch info for"
2148                                           " port");
2149         dev_priv = mlx5_dev_to_eswitch_info(dev);
2150         if (!dev_priv)
2151                 return rte_flow_error_set(error, rte_errno,
2152                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2153                                           NULL,
2154                                           "failed to obtain E-Switch info");
2155         if (esw_priv->domain_id != dev_priv->domain_id)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2158                                           "cannot match on a port from a"
2159                                           " different E-Switch");
2160         return 0;
2161 }
2162
2163 /**
2164  * Validate VLAN item.
2165  *
2166  * @param[in] item
2167  *   Item specification.
2168  * @param[in] item_flags
2169  *   Bit-fields that holds the items detected until now.
2170  * @param[in] dev
2171  *   Ethernet device flow is being created on.
2172  * @param[out] error
2173  *   Pointer to error structure.
2174  *
2175  * @return
2176  *   0 on success, a negative errno value otherwise and rte_errno is set.
2177  */
2178 static int
2179 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2180                            uint64_t item_flags,
2181                            struct rte_eth_dev *dev,
2182                            struct rte_flow_error *error)
2183 {
2184         const struct rte_flow_item_vlan *mask = item->mask;
2185         const struct rte_flow_item_vlan nic_mask = {
2186                 .tci = RTE_BE16(UINT16_MAX),
2187                 .inner_type = RTE_BE16(UINT16_MAX),
2188                 .has_more_vlan = 1,
2189         };
2190         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2191         int ret;
2192         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2193                                         MLX5_FLOW_LAYER_INNER_L4) :
2194                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2195                                         MLX5_FLOW_LAYER_OUTER_L4);
2196         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2197                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2198
2199         if (item_flags & vlanm)
2200                 return rte_flow_error_set(error, EINVAL,
2201                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2202                                           "multiple VLAN layers not supported");
2203         else if ((item_flags & l34m) != 0)
2204                 return rte_flow_error_set(error, EINVAL,
2205                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2206                                           "VLAN cannot follow L3/L4 layer");
2207         if (!mask)
2208                 mask = &rte_flow_item_vlan_mask;
2209         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2210                                         (const uint8_t *)&nic_mask,
2211                                         sizeof(struct rte_flow_item_vlan),
2212                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2213         if (ret)
2214                 return ret;
2215         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2216                 struct mlx5_priv *priv = dev->data->dev_private;
2217
2218                 if (priv->vmwa_context) {
2219                         /*
2220                          * Non-NULL context means we have a virtual machine
2221                          * and SR-IOV enabled, we have to create VLAN interface
2222                          * to make hypervisor to setup E-Switch vport
2223                          * context correctly. We avoid creating the multiple
2224                          * VLAN interfaces, so we cannot support VLAN tag mask.
2225                          */
2226                         return rte_flow_error_set(error, EINVAL,
2227                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2228                                                   item,
2229                                                   "VLAN tag mask is not"
2230                                                   " supported in virtual"
2231                                                   " environment");
2232                 }
2233         }
2234         return 0;
2235 }
2236
2237 /*
2238  * GTP flags are contained in 1 byte of the format:
2239  * -------------------------------------------
2240  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2241  * |-----------------------------------------|
2242  * | value | Version | PT | Res | E | S | PN |
2243  * -------------------------------------------
2244  *
2245  * Matching is supported only for GTP flags E, S, PN.
2246  */
2247 #define MLX5_GTP_FLAGS_MASK     0x07
2248
2249 /**
2250  * Validate GTP item.
2251  *
2252  * @param[in] dev
2253  *   Pointer to the rte_eth_dev structure.
2254  * @param[in] item
2255  *   Item specification.
2256  * @param[in] item_flags
2257  *   Bit-fields that holds the items detected until now.
2258  * @param[out] error
2259  *   Pointer to error structure.
2260  *
2261  * @return
2262  *   0 on success, a negative errno value otherwise and rte_errno is set.
2263  */
2264 static int
2265 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2266                           const struct rte_flow_item *item,
2267                           uint64_t item_flags,
2268                           struct rte_flow_error *error)
2269 {
2270         struct mlx5_priv *priv = dev->data->dev_private;
2271         const struct rte_flow_item_gtp *spec = item->spec;
2272         const struct rte_flow_item_gtp *mask = item->mask;
2273         const struct rte_flow_item_gtp nic_mask = {
2274                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2275                 .msg_type = 0xff,
2276                 .teid = RTE_BE32(0xffffffff),
2277         };
2278
2279         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2280                 return rte_flow_error_set(error, ENOTSUP,
2281                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2282                                           "GTP support is not enabled");
2283         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2284                 return rte_flow_error_set(error, ENOTSUP,
2285                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2286                                           "multiple tunnel layers not"
2287                                           " supported");
2288         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2289                 return rte_flow_error_set(error, EINVAL,
2290                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2291                                           "no outer UDP layer found");
2292         if (!mask)
2293                 mask = &rte_flow_item_gtp_mask;
2294         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2295                 return rte_flow_error_set(error, ENOTSUP,
2296                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2297                                           "Match is supported for GTP"
2298                                           " flags only");
2299         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2300                                          (const uint8_t *)&nic_mask,
2301                                          sizeof(struct rte_flow_item_gtp),
2302                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2303 }
2304
2305 /**
2306  * Validate GTP PSC item.
2307  *
2308  * @param[in] item
2309  *   Item specification.
2310  * @param[in] last_item
2311  *   Previous validated item in the pattern items.
2312  * @param[in] gtp_item
2313  *   Previous GTP item specification.
2314  * @param[in] attr
2315  *   Pointer to flow attributes.
2316  * @param[out] error
2317  *   Pointer to error structure.
2318  *
2319  * @return
2320  *   0 on success, a negative errno value otherwise and rte_errno is set.
2321  */
2322 static int
2323 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2324                               uint64_t last_item,
2325                               const struct rte_flow_item *gtp_item,
2326                               const struct rte_flow_attr *attr,
2327                               struct rte_flow_error *error)
2328 {
2329         const struct rte_flow_item_gtp *gtp_spec;
2330         const struct rte_flow_item_gtp *gtp_mask;
2331         const struct rte_flow_item_gtp_psc *spec;
2332         const struct rte_flow_item_gtp_psc *mask;
2333         const struct rte_flow_item_gtp_psc nic_mask = {
2334                 .pdu_type = 0xFF,
2335                 .qfi = 0xFF,
2336         };
2337
2338         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2339                 return rte_flow_error_set
2340                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2341                          "GTP PSC item must be preceded with GTP item");
2342         gtp_spec = gtp_item->spec;
2343         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2344         /* GTP spec and E flag is requested to match zero. */
2345         if (gtp_spec &&
2346                 (gtp_mask->v_pt_rsv_flags &
2347                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2348                 return rte_flow_error_set
2349                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2350                          "GTP E flag must be 1 to match GTP PSC");
2351         /* Check the flow is not created in group zero. */
2352         if (!attr->transfer && !attr->group)
2353                 return rte_flow_error_set
2354                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2355                          "GTP PSC is not supported for group 0");
2356         /* GTP spec is here and E flag is requested to match zero. */
2357         if (!item->spec)
2358                 return 0;
2359         spec = item->spec;
2360         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2361         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2362                 return rte_flow_error_set
2363                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                          "PDU type should be smaller than 16");
2365         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2366                                          (const uint8_t *)&nic_mask,
2367                                          sizeof(struct rte_flow_item_gtp_psc),
2368                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2369 }
2370
2371 /**
2372  * Validate IPV4 item.
2373  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2374  * add specific validation of fragment_offset field,
2375  *
2376  * @param[in] item
2377  *   Item specification.
2378  * @param[in] item_flags
2379  *   Bit-fields that holds the items detected until now.
2380  * @param[out] error
2381  *   Pointer to error structure.
2382  *
2383  * @return
2384  *   0 on success, a negative errno value otherwise and rte_errno is set.
2385  */
2386 static int
2387 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2388                            uint64_t item_flags,
2389                            uint64_t last_item,
2390                            uint16_t ether_type,
2391                            struct rte_flow_error *error)
2392 {
2393         int ret;
2394         const struct rte_flow_item_ipv4 *spec = item->spec;
2395         const struct rte_flow_item_ipv4 *last = item->last;
2396         const struct rte_flow_item_ipv4 *mask = item->mask;
2397         rte_be16_t fragment_offset_spec = 0;
2398         rte_be16_t fragment_offset_last = 0;
2399         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2400                 .hdr = {
2401                         .src_addr = RTE_BE32(0xffffffff),
2402                         .dst_addr = RTE_BE32(0xffffffff),
2403                         .type_of_service = 0xff,
2404                         .fragment_offset = RTE_BE16(0xffff),
2405                         .next_proto_id = 0xff,
2406                         .time_to_live = 0xff,
2407                 },
2408         };
2409
2410         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2411                                            ether_type, &nic_ipv4_mask,
2412                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2413         if (ret < 0)
2414                 return ret;
2415         if (spec && mask)
2416                 fragment_offset_spec = spec->hdr.fragment_offset &
2417                                        mask->hdr.fragment_offset;
2418         if (!fragment_offset_spec)
2419                 return 0;
2420         /*
2421          * spec and mask are valid, enforce using full mask to make sure the
2422          * complete value is used correctly.
2423          */
2424         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2425                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2426                 return rte_flow_error_set(error, EINVAL,
2427                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2428                                           item, "must use full mask for"
2429                                           " fragment_offset");
2430         /*
2431          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2432          * indicating this is 1st fragment of fragmented packet.
2433          * This is not yet supported in MLX5, return appropriate error message.
2434          */
2435         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2436                 return rte_flow_error_set(error, ENOTSUP,
2437                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2438                                           "match on first fragment not "
2439                                           "supported");
2440         if (fragment_offset_spec && !last)
2441                 return rte_flow_error_set(error, ENOTSUP,
2442                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2443                                           "specified value not supported");
2444         /* spec and last are valid, validate the specified range. */
2445         fragment_offset_last = last->hdr.fragment_offset &
2446                                mask->hdr.fragment_offset;
2447         /*
2448          * Match on fragment_offset spec 0x2001 and last 0x3fff
2449          * means MF is 1 and frag-offset is > 0.
2450          * This packet is fragment 2nd and onward, excluding last.
2451          * This is not yet supported in MLX5, return appropriate
2452          * error message.
2453          */
2454         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2455             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2456                 return rte_flow_error_set(error, ENOTSUP,
2457                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2458                                           last, "match on following "
2459                                           "fragments not supported");
2460         /*
2461          * Match on fragment_offset spec 0x0001 and last 0x1fff
2462          * means MF is 0 and frag-offset is > 0.
2463          * This packet is last fragment of fragmented packet.
2464          * This is not yet supported in MLX5, return appropriate
2465          * error message.
2466          */
2467         if (fragment_offset_spec == RTE_BE16(1) &&
2468             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2469                 return rte_flow_error_set(error, ENOTSUP,
2470                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2471                                           last, "match on last "
2472                                           "fragment not supported");
2473         /*
2474          * Match on fragment_offset spec 0x0001 and last 0x3fff
2475          * means MF and/or frag-offset is not 0.
2476          * This is a fragmented packet.
2477          * Other range values are invalid and rejected.
2478          */
2479         if (!(fragment_offset_spec == RTE_BE16(1) &&
2480               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2481                 return rte_flow_error_set(error, ENOTSUP,
2482                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2483                                           "specified range not supported");
2484         return 0;
2485 }
2486
2487 /**
2488  * Validate IPV6 fragment extension item.
2489  *
2490  * @param[in] item
2491  *   Item specification.
2492  * @param[in] item_flags
2493  *   Bit-fields that holds the items detected until now.
2494  * @param[out] error
2495  *   Pointer to error structure.
2496  *
2497  * @return
2498  *   0 on success, a negative errno value otherwise and rte_errno is set.
2499  */
2500 static int
2501 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2502                                     uint64_t item_flags,
2503                                     struct rte_flow_error *error)
2504 {
2505         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2506         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2507         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2508         rte_be16_t frag_data_spec = 0;
2509         rte_be16_t frag_data_last = 0;
2510         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2511         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2512                                       MLX5_FLOW_LAYER_OUTER_L4;
2513         int ret = 0;
2514         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2515                 .hdr = {
2516                         .next_header = 0xff,
2517                         .frag_data = RTE_BE16(0xffff),
2518                 },
2519         };
2520
2521         if (item_flags & l4m)
2522                 return rte_flow_error_set(error, EINVAL,
2523                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2524                                           "ipv6 fragment extension item cannot "
2525                                           "follow L4 item.");
2526         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2527             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2528                 return rte_flow_error_set(error, EINVAL,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2530                                           "ipv6 fragment extension item must "
2531                                           "follow ipv6 item");
2532         if (spec && mask)
2533                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2534         if (!frag_data_spec)
2535                 return 0;
2536         /*
2537          * spec and mask are valid, enforce using full mask to make sure the
2538          * complete value is used correctly.
2539          */
2540         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2541                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2542                 return rte_flow_error_set(error, EINVAL,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2544                                           item, "must use full mask for"
2545                                           " frag_data");
2546         /*
2547          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2548          * This is 1st fragment of fragmented packet.
2549          */
2550         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2551                 return rte_flow_error_set(error, ENOTSUP,
2552                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2553                                           "match on first fragment not "
2554                                           "supported");
2555         if (frag_data_spec && !last)
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2558                                           "specified value not supported");
2559         ret = mlx5_flow_item_acceptable
2560                                 (item, (const uint8_t *)mask,
2561                                  (const uint8_t *)&nic_mask,
2562                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2563                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2564         if (ret)
2565                 return ret;
2566         /* spec and last are valid, validate the specified range. */
2567         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2568         /*
2569          * Match on frag_data spec 0x0009 and last 0xfff9
2570          * means M is 1 and frag-offset is > 0.
2571          * This packet is fragment 2nd and onward, excluding last.
2572          * This is not yet supported in MLX5, return appropriate
2573          * error message.
2574          */
2575         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2576                                        RTE_IPV6_EHDR_MF_MASK) &&
2577             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2578                 return rte_flow_error_set(error, ENOTSUP,
2579                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2580                                           last, "match on following "
2581                                           "fragments not supported");
2582         /*
2583          * Match on frag_data spec 0x0008 and last 0xfff8
2584          * means M is 0 and frag-offset is > 0.
2585          * This packet is last fragment of fragmented packet.
2586          * This is not yet supported in MLX5, return appropriate
2587          * error message.
2588          */
2589         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2590             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2591                 return rte_flow_error_set(error, ENOTSUP,
2592                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2593                                           last, "match on last "
2594                                           "fragment not supported");
2595         /* Other range values are invalid and rejected. */
2596         return rte_flow_error_set(error, EINVAL,
2597                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2598                                   "specified range not supported");
2599 }
2600
2601 /**
2602  * Validate the pop VLAN action.
2603  *
2604  * @param[in] dev
2605  *   Pointer to the rte_eth_dev structure.
2606  * @param[in] action_flags
2607  *   Holds the actions detected until now.
2608  * @param[in] action
2609  *   Pointer to the pop vlan action.
2610  * @param[in] item_flags
2611  *   The items found in this flow rule.
2612  * @param[in] attr
2613  *   Pointer to flow attributes.
2614  * @param[out] error
2615  *   Pointer to error structure.
2616  *
2617  * @return
2618  *   0 on success, a negative errno value otherwise and rte_errno is set.
2619  */
2620 static int
2621 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2622                                  uint64_t action_flags,
2623                                  const struct rte_flow_action *action,
2624                                  uint64_t item_flags,
2625                                  const struct rte_flow_attr *attr,
2626                                  struct rte_flow_error *error)
2627 {
2628         const struct mlx5_priv *priv = dev->data->dev_private;
2629
2630         (void)action;
2631         (void)attr;
2632         if (!priv->sh->pop_vlan_action)
2633                 return rte_flow_error_set(error, ENOTSUP,
2634                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2635                                           NULL,
2636                                           "pop vlan action is not supported");
2637         if (attr->egress)
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2640                                           NULL,
2641                                           "pop vlan action not supported for "
2642                                           "egress");
2643         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2644                 return rte_flow_error_set(error, ENOTSUP,
2645                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2646                                           "no support for multiple VLAN "
2647                                           "actions");
2648         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2649         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2650             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2653                                           NULL,
2654                                           "cannot pop vlan after decap without "
2655                                           "match on inner vlan in the flow");
2656         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2657         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2658             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2659                 return rte_flow_error_set(error, ENOTSUP,
2660                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2661                                           NULL,
2662                                           "cannot pop vlan without a "
2663                                           "match on (outer) vlan in the flow");
2664         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2665                 return rte_flow_error_set(error, EINVAL,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2667                                           "wrong action order, port_id should "
2668                                           "be after pop VLAN action");
2669         if (!attr->transfer && priv->representor)
2670                 return rte_flow_error_set(error, ENOTSUP,
2671                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2672                                           "pop vlan action for VF representor "
2673                                           "not supported on NIC table");
2674         return 0;
2675 }
2676
2677 /**
2678  * Get VLAN default info from vlan match info.
2679  *
2680  * @param[in] items
2681  *   the list of item specifications.
2682  * @param[out] vlan
2683  *   pointer VLAN info to fill to.
2684  *
2685  * @return
2686  *   0 on success, a negative errno value otherwise and rte_errno is set.
2687  */
2688 static void
2689 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2690                                   struct rte_vlan_hdr *vlan)
2691 {
2692         const struct rte_flow_item_vlan nic_mask = {
2693                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2694                                 MLX5DV_FLOW_VLAN_VID_MASK),
2695                 .inner_type = RTE_BE16(0xffff),
2696         };
2697
2698         if (items == NULL)
2699                 return;
2700         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2701                 int type = items->type;
2702
2703                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2704                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2705                         break;
2706         }
2707         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2708                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2709                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2710
2711                 /* If VLAN item in pattern doesn't contain data, return here. */
2712                 if (!vlan_v)
2713                         return;
2714                 if (!vlan_m)
2715                         vlan_m = &nic_mask;
2716                 /* Only full match values are accepted */
2717                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2718                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2719                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2720                         vlan->vlan_tci |=
2721                                 rte_be_to_cpu_16(vlan_v->tci &
2722                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2723                 }
2724                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2725                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2726                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2727                         vlan->vlan_tci |=
2728                                 rte_be_to_cpu_16(vlan_v->tci &
2729                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2730                 }
2731                 if (vlan_m->inner_type == nic_mask.inner_type)
2732                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2733                                                            vlan_m->inner_type);
2734         }
2735 }
2736
2737 /**
2738  * Validate the push VLAN action.
2739  *
2740  * @param[in] dev
2741  *   Pointer to the rte_eth_dev structure.
2742  * @param[in] action_flags
2743  *   Holds the actions detected until now.
2744  * @param[in] item_flags
2745  *   The items found in this flow rule.
2746  * @param[in] action
2747  *   Pointer to the action structure.
2748  * @param[in] attr
2749  *   Pointer to flow attributes
2750  * @param[out] error
2751  *   Pointer to error structure.
2752  *
2753  * @return
2754  *   0 on success, a negative errno value otherwise and rte_errno is set.
2755  */
2756 static int
2757 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2758                                   uint64_t action_flags,
2759                                   const struct rte_flow_item_vlan *vlan_m,
2760                                   const struct rte_flow_action *action,
2761                                   const struct rte_flow_attr *attr,
2762                                   struct rte_flow_error *error)
2763 {
2764         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2765         const struct mlx5_priv *priv = dev->data->dev_private;
2766
2767         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2768             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2769                 return rte_flow_error_set(error, EINVAL,
2770                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2771                                           "invalid vlan ethertype");
2772         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2773                 return rte_flow_error_set(error, EINVAL,
2774                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2775                                           "wrong action order, port_id should "
2776                                           "be after push VLAN");
2777         if (!attr->transfer && priv->representor)
2778                 return rte_flow_error_set(error, ENOTSUP,
2779                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2780                                           "push vlan action for VF representor "
2781                                           "not supported on NIC table");
2782         if (vlan_m &&
2783             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2784             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2785                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2786             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2787             !(mlx5_flow_find_action
2788                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2789                 return rte_flow_error_set(error, EINVAL,
2790                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2791                                           "not full match mask on VLAN PCP and "
2792                                           "there is no of_set_vlan_pcp action, "
2793                                           "push VLAN action cannot figure out "
2794                                           "PCP value");
2795         if (vlan_m &&
2796             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2797             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2798                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2799             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2800             !(mlx5_flow_find_action
2801                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2802                 return rte_flow_error_set(error, EINVAL,
2803                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2804                                           "not full match mask on VLAN VID and "
2805                                           "there is no of_set_vlan_vid action, "
2806                                           "push VLAN action cannot figure out "
2807                                           "VID value");
2808         (void)attr;
2809         return 0;
2810 }
2811
2812 /**
2813  * Validate the set VLAN PCP.
2814  *
2815  * @param[in] action_flags
2816  *   Holds the actions detected until now.
2817  * @param[in] actions
2818  *   Pointer to the list of actions remaining in the flow rule.
2819  * @param[out] error
2820  *   Pointer to error structure.
2821  *
2822  * @return
2823  *   0 on success, a negative errno value otherwise and rte_errno is set.
2824  */
2825 static int
2826 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2827                                      const struct rte_flow_action actions[],
2828                                      struct rte_flow_error *error)
2829 {
2830         const struct rte_flow_action *action = actions;
2831         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2832
2833         if (conf->vlan_pcp > 7)
2834                 return rte_flow_error_set(error, EINVAL,
2835                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2836                                           "VLAN PCP value is too big");
2837         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2838                 return rte_flow_error_set(error, ENOTSUP,
2839                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2840                                           "set VLAN PCP action must follow "
2841                                           "the push VLAN action");
2842         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2843                 return rte_flow_error_set(error, ENOTSUP,
2844                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2845                                           "Multiple VLAN PCP modification are "
2846                                           "not supported");
2847         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2848                 return rte_flow_error_set(error, EINVAL,
2849                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2850                                           "wrong action order, port_id should "
2851                                           "be after set VLAN PCP");
2852         return 0;
2853 }
2854
2855 /**
2856  * Validate the set VLAN VID.
2857  *
2858  * @param[in] item_flags
2859  *   Holds the items detected in this rule.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] actions
2863  *   Pointer to the list of actions remaining in the flow rule.
2864  * @param[out] error
2865  *   Pointer to error structure.
2866  *
2867  * @return
2868  *   0 on success, a negative errno value otherwise and rte_errno is set.
2869  */
2870 static int
2871 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2872                                      uint64_t action_flags,
2873                                      const struct rte_flow_action actions[],
2874                                      struct rte_flow_error *error)
2875 {
2876         const struct rte_flow_action *action = actions;
2877         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2878
2879         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2880                 return rte_flow_error_set(error, EINVAL,
2881                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2882                                           "VLAN VID value is too big");
2883         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2884             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2885                 return rte_flow_error_set(error, ENOTSUP,
2886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2887                                           "set VLAN VID action must follow push"
2888                                           " VLAN action or match on VLAN item");
2889         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2892                                           "Multiple VLAN VID modifications are "
2893                                           "not supported");
2894         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2895                 return rte_flow_error_set(error, EINVAL,
2896                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2897                                           "wrong action order, port_id should "
2898                                           "be after set VLAN VID");
2899         return 0;
2900 }
2901
2902 /*
2903  * Validate the FLAG action.
2904  *
2905  * @param[in] dev
2906  *   Pointer to the rte_eth_dev structure.
2907  * @param[in] action_flags
2908  *   Holds the actions detected until now.
2909  * @param[in] attr
2910  *   Pointer to flow attributes
2911  * @param[out] error
2912  *   Pointer to error structure.
2913  *
2914  * @return
2915  *   0 on success, a negative errno value otherwise and rte_errno is set.
2916  */
2917 static int
2918 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2919                              uint64_t action_flags,
2920                              const struct rte_flow_attr *attr,
2921                              struct rte_flow_error *error)
2922 {
2923         struct mlx5_priv *priv = dev->data->dev_private;
2924         struct mlx5_dev_config *config = &priv->config;
2925         int ret;
2926
2927         /* Fall back if no extended metadata register support. */
2928         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2929                 return mlx5_flow_validate_action_flag(action_flags, attr,
2930                                                       error);
2931         /* Extensive metadata mode requires registers. */
2932         if (!mlx5_flow_ext_mreg_supported(dev))
2933                 return rte_flow_error_set(error, ENOTSUP,
2934                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2935                                           "no metadata registers "
2936                                           "to support flag action");
2937         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2938                 return rte_flow_error_set(error, ENOTSUP,
2939                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2940                                           "extended metadata register"
2941                                           " isn't available");
2942         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2943         if (ret < 0)
2944                 return ret;
2945         MLX5_ASSERT(ret > 0);
2946         if (action_flags & MLX5_FLOW_ACTION_MARK)
2947                 return rte_flow_error_set(error, EINVAL,
2948                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2949                                           "can't mark and flag in same flow");
2950         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2951                 return rte_flow_error_set(error, EINVAL,
2952                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2953                                           "can't have 2 flag"
2954                                           " actions in same flow");
2955         return 0;
2956 }
2957
2958 /**
2959  * Validate MARK action.
2960  *
2961  * @param[in] dev
2962  *   Pointer to the rte_eth_dev structure.
2963  * @param[in] action
2964  *   Pointer to action.
2965  * @param[in] action_flags
2966  *   Holds the actions detected until now.
2967  * @param[in] attr
2968  *   Pointer to flow attributes
2969  * @param[out] error
2970  *   Pointer to error structure.
2971  *
2972  * @return
2973  *   0 on success, a negative errno value otherwise and rte_errno is set.
2974  */
2975 static int
2976 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2977                              const struct rte_flow_action *action,
2978                              uint64_t action_flags,
2979                              const struct rte_flow_attr *attr,
2980                              struct rte_flow_error *error)
2981 {
2982         struct mlx5_priv *priv = dev->data->dev_private;
2983         struct mlx5_dev_config *config = &priv->config;
2984         const struct rte_flow_action_mark *mark = action->conf;
2985         int ret;
2986
2987         if (is_tunnel_offload_active(dev))
2988                 return rte_flow_error_set(error, ENOTSUP,
2989                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2990                                           "no mark action "
2991                                           "if tunnel offload active");
2992         /* Fall back if no extended metadata register support. */
2993         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2994                 return mlx5_flow_validate_action_mark(action, action_flags,
2995                                                       attr, error);
2996         /* Extensive metadata mode requires registers. */
2997         if (!mlx5_flow_ext_mreg_supported(dev))
2998                 return rte_flow_error_set(error, ENOTSUP,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3000                                           "no metadata registers "
3001                                           "to support mark action");
3002         if (!priv->sh->dv_mark_mask)
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3005                                           "extended metadata register"
3006                                           " isn't available");
3007         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3008         if (ret < 0)
3009                 return ret;
3010         MLX5_ASSERT(ret > 0);
3011         if (!mark)
3012                 return rte_flow_error_set(error, EINVAL,
3013                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                           "configuration cannot be null");
3015         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3016                 return rte_flow_error_set(error, EINVAL,
3017                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3018                                           &mark->id,
3019                                           "mark id exceeds the limit");
3020         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3021                 return rte_flow_error_set(error, EINVAL,
3022                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3023                                           "can't flag and mark in same flow");
3024         if (action_flags & MLX5_FLOW_ACTION_MARK)
3025                 return rte_flow_error_set(error, EINVAL,
3026                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3027                                           "can't have 2 mark actions in same"
3028                                           " flow");
3029         return 0;
3030 }
3031
3032 /**
3033  * Validate SET_META action.
3034  *
3035  * @param[in] dev
3036  *   Pointer to the rte_eth_dev structure.
3037  * @param[in] action
3038  *   Pointer to the action structure.
3039  * @param[in] action_flags
3040  *   Holds the actions detected until now.
3041  * @param[in] attr
3042  *   Pointer to flow attributes
3043  * @param[out] error
3044  *   Pointer to error structure.
3045  *
3046  * @return
3047  *   0 on success, a negative errno value otherwise and rte_errno is set.
3048  */
3049 static int
3050 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3051                                  const struct rte_flow_action *action,
3052                                  uint64_t action_flags __rte_unused,
3053                                  const struct rte_flow_attr *attr,
3054                                  struct rte_flow_error *error)
3055 {
3056         const struct rte_flow_action_set_meta *conf;
3057         uint32_t nic_mask = UINT32_MAX;
3058         int reg;
3059
3060         if (!mlx5_flow_ext_mreg_supported(dev))
3061                 return rte_flow_error_set(error, ENOTSUP,
3062                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3063                                           "extended metadata register"
3064                                           " isn't supported");
3065         reg = flow_dv_get_metadata_reg(dev, attr, error);
3066         if (reg < 0)
3067                 return reg;
3068         if (reg == REG_NON)
3069                 return rte_flow_error_set(error, ENOTSUP,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3071                                           "unavalable extended metadata register");
3072         if (reg != REG_A && reg != REG_B) {
3073                 struct mlx5_priv *priv = dev->data->dev_private;
3074
3075                 nic_mask = priv->sh->dv_meta_mask;
3076         }
3077         if (!(action->conf))
3078                 return rte_flow_error_set(error, EINVAL,
3079                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3080                                           "configuration cannot be null");
3081         conf = (const struct rte_flow_action_set_meta *)action->conf;
3082         if (!conf->mask)
3083                 return rte_flow_error_set(error, EINVAL,
3084                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3085                                           "zero mask doesn't have any effect");
3086         if (conf->mask & ~nic_mask)
3087                 return rte_flow_error_set(error, EINVAL,
3088                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3089                                           "meta data must be within reg C0");
3090         return 0;
3091 }
3092
3093 /**
3094  * Validate SET_TAG action.
3095  *
3096  * @param[in] dev
3097  *   Pointer to the rte_eth_dev structure.
3098  * @param[in] action
3099  *   Pointer to the action structure.
3100  * @param[in] action_flags
3101  *   Holds the actions detected until now.
3102  * @param[in] attr
3103  *   Pointer to flow attributes
3104  * @param[out] error
3105  *   Pointer to error structure.
3106  *
3107  * @return
3108  *   0 on success, a negative errno value otherwise and rte_errno is set.
3109  */
3110 static int
3111 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3112                                 const struct rte_flow_action *action,
3113                                 uint64_t action_flags,
3114                                 const struct rte_flow_attr *attr,
3115                                 struct rte_flow_error *error)
3116 {
3117         const struct rte_flow_action_set_tag *conf;
3118         const uint64_t terminal_action_flags =
3119                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3120                 MLX5_FLOW_ACTION_RSS;
3121         int ret;
3122
3123         if (!mlx5_flow_ext_mreg_supported(dev))
3124                 return rte_flow_error_set(error, ENOTSUP,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3126                                           "extensive metadata register"
3127                                           " isn't supported");
3128         if (!(action->conf))
3129                 return rte_flow_error_set(error, EINVAL,
3130                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3131                                           "configuration cannot be null");
3132         conf = (const struct rte_flow_action_set_tag *)action->conf;
3133         if (!conf->mask)
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3136                                           "zero mask doesn't have any effect");
3137         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3138         if (ret < 0)
3139                 return ret;
3140         if (!attr->transfer && attr->ingress &&
3141             (action_flags & terminal_action_flags))
3142                 return rte_flow_error_set(error, EINVAL,
3143                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3144                                           "set_tag has no effect"
3145                                           " with terminal actions");
3146         return 0;
3147 }
3148
3149 /**
3150  * Validate count action.
3151  *
3152  * @param[in] dev
3153  *   Pointer to rte_eth_dev structure.
3154  * @param[in] action
3155  *   Pointer to the action structure.
3156  * @param[in] action_flags
3157  *   Holds the actions detected until now.
3158  * @param[out] error
3159  *   Pointer to error structure.
3160  *
3161  * @return
3162  *   0 on success, a negative errno value otherwise and rte_errno is set.
3163  */
3164 static int
3165 flow_dv_validate_action_count(struct rte_eth_dev *dev,
3166                               const struct rte_flow_action *action,
3167                               uint64_t action_flags,
3168                               struct rte_flow_error *error)
3169 {
3170         struct mlx5_priv *priv = dev->data->dev_private;
3171         const struct rte_flow_action_count *count;
3172
3173         if (!priv->config.devx)
3174                 goto notsup_err;
3175         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3176                 return rte_flow_error_set(error, EINVAL,
3177                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3178                                           "duplicate count actions set");
3179         count = (const struct rte_flow_action_count *)action->conf;
3180         if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3181             !priv->sh->flow_hit_aso_en)
3182                 return rte_flow_error_set(error, EINVAL,
3183                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3184                                           "old age and shared count combination is not supported");
3185 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3186         return 0;
3187 #endif
3188 notsup_err:
3189         return rte_flow_error_set
3190                       (error, ENOTSUP,
3191                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3192                        NULL,
3193                        "count action not supported");
3194 }
3195
3196 /**
3197  * Validate the L2 encap action.
3198  *
3199  * @param[in] dev
3200  *   Pointer to the rte_eth_dev structure.
3201  * @param[in] action_flags
3202  *   Holds the actions detected until now.
3203  * @param[in] action
3204  *   Pointer to the action structure.
3205  * @param[in] attr
3206  *   Pointer to flow attributes.
3207  * @param[out] error
3208  *   Pointer to error structure.
3209  *
3210  * @return
3211  *   0 on success, a negative errno value otherwise and rte_errno is set.
3212  */
3213 static int
3214 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3215                                  uint64_t action_flags,
3216                                  const struct rte_flow_action *action,
3217                                  const struct rte_flow_attr *attr,
3218                                  struct rte_flow_error *error)
3219 {
3220         const struct mlx5_priv *priv = dev->data->dev_private;
3221
3222         if (!(action->conf))
3223                 return rte_flow_error_set(error, EINVAL,
3224                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3225                                           "configuration cannot be null");
3226         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3227                 return rte_flow_error_set(error, EINVAL,
3228                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3229                                           "can only have a single encap action "
3230                                           "in a flow");
3231         if (!attr->transfer && priv->representor)
3232                 return rte_flow_error_set(error, ENOTSUP,
3233                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3234                                           "encap action for VF representor "
3235                                           "not supported on NIC table");
3236         return 0;
3237 }
3238
3239 /**
3240  * Validate a decap action.
3241  *
3242  * @param[in] dev
3243  *   Pointer to the rte_eth_dev structure.
3244  * @param[in] action_flags
3245  *   Holds the actions detected until now.
3246  * @param[in] action
3247  *   Pointer to the action structure.
3248  * @param[in] item_flags
3249  *   Holds the items detected.
3250  * @param[in] attr
3251  *   Pointer to flow attributes
3252  * @param[out] error
3253  *   Pointer to error structure.
3254  *
3255  * @return
3256  *   0 on success, a negative errno value otherwise and rte_errno is set.
3257  */
3258 static int
3259 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3260                               uint64_t action_flags,
3261                               const struct rte_flow_action *action,
3262                               const uint64_t item_flags,
3263                               const struct rte_flow_attr *attr,
3264                               struct rte_flow_error *error)
3265 {
3266         const struct mlx5_priv *priv = dev->data->dev_private;
3267
3268         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3269             !priv->config.decap_en)
3270                 return rte_flow_error_set(error, ENOTSUP,
3271                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3272                                           "decap is not enabled");
3273         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3274                 return rte_flow_error_set(error, ENOTSUP,
3275                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3276                                           action_flags &
3277                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3278                                           "have a single decap action" : "decap "
3279                                           "after encap is not supported");
3280         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3281                 return rte_flow_error_set(error, EINVAL,
3282                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3283                                           "can't have decap action after"
3284                                           " modify action");
3285         if (attr->egress)
3286                 return rte_flow_error_set(error, ENOTSUP,
3287                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3288                                           NULL,
3289                                           "decap action not supported for "
3290                                           "egress");
3291         if (!attr->transfer && priv->representor)
3292                 return rte_flow_error_set(error, ENOTSUP,
3293                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3294                                           "decap action for VF representor "
3295                                           "not supported on NIC table");
3296         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3297             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3298                 return rte_flow_error_set(error, ENOTSUP,
3299                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3300                                 "VXLAN item should be present for VXLAN decap");
3301         return 0;
3302 }
3303
3304 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3305
3306 /**
3307  * Validate the raw encap and decap actions.
3308  *
3309  * @param[in] dev
3310  *   Pointer to the rte_eth_dev structure.
3311  * @param[in] decap
3312  *   Pointer to the decap action.
3313  * @param[in] encap
3314  *   Pointer to the encap action.
3315  * @param[in] attr
3316  *   Pointer to flow attributes
3317  * @param[in/out] action_flags
3318  *   Holds the actions detected until now.
3319  * @param[out] actions_n
3320  *   pointer to the number of actions counter.
3321  * @param[in] action
3322  *   Pointer to the action structure.
3323  * @param[in] item_flags
3324  *   Holds the items detected.
3325  * @param[out] error
3326  *   Pointer to error structure.
3327  *
3328  * @return
3329  *   0 on success, a negative errno value otherwise and rte_errno is set.
3330  */
3331 static int
3332 flow_dv_validate_action_raw_encap_decap
3333         (struct rte_eth_dev *dev,
3334          const struct rte_flow_action_raw_decap *decap,
3335          const struct rte_flow_action_raw_encap *encap,
3336          const struct rte_flow_attr *attr, uint64_t *action_flags,
3337          int *actions_n, const struct rte_flow_action *action,
3338          uint64_t item_flags, struct rte_flow_error *error)
3339 {
3340         const struct mlx5_priv *priv = dev->data->dev_private;
3341         int ret;
3342
3343         if (encap && (!encap->size || !encap->data))
3344                 return rte_flow_error_set(error, EINVAL,
3345                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3346                                           "raw encap data cannot be empty");
3347         if (decap && encap) {
3348                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3349                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3350                         /* L3 encap. */
3351                         decap = NULL;
3352                 else if (encap->size <=
3353                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3354                            decap->size >
3355                            MLX5_ENCAPSULATION_DECISION_SIZE)
3356                         /* L3 decap. */
3357                         encap = NULL;
3358                 else if (encap->size >
3359                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3360                            decap->size >
3361                            MLX5_ENCAPSULATION_DECISION_SIZE)
3362                         /* 2 L2 actions: encap and decap. */
3363                         ;
3364                 else
3365                         return rte_flow_error_set(error,
3366                                 ENOTSUP,
3367                                 RTE_FLOW_ERROR_TYPE_ACTION,
3368                                 NULL, "unsupported too small "
3369                                 "raw decap and too small raw "
3370                                 "encap combination");
3371         }
3372         if (decap) {
3373                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3374                                                     item_flags, attr, error);
3375                 if (ret < 0)
3376                         return ret;
3377                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3378                 ++(*actions_n);
3379         }
3380         if (encap) {
3381                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3382                         return rte_flow_error_set(error, ENOTSUP,
3383                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3384                                                   NULL,
3385                                                   "small raw encap size");
3386                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3387                         return rte_flow_error_set(error, EINVAL,
3388                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3389                                                   NULL,
3390                                                   "more than one encap action");
3391                 if (!attr->transfer && priv->representor)
3392                         return rte_flow_error_set
3393                                         (error, ENOTSUP,
3394                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3395                                          "encap action for VF representor "
3396                                          "not supported on NIC table");
3397                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3398                 ++(*actions_n);
3399         }
3400         return 0;
3401 }
3402
3403 /**
3404  * Match encap_decap resource.
3405  *
3406  * @param list
3407  *   Pointer to the hash list.
3408  * @param entry
3409  *   Pointer to exist resource entry object.
3410  * @param key
3411  *   Key of the new entry.
3412  * @param ctx_cb
3413  *   Pointer to new encap_decap resource.
3414  *
3415  * @return
3416  *   0 on matching, none-zero otherwise.
3417  */
3418 int
3419 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3420                              struct mlx5_hlist_entry *entry,
3421                              uint64_t key __rte_unused, void *cb_ctx)
3422 {
3423         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3424         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3425         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3426
3427         cache_resource = container_of(entry,
3428                                       struct mlx5_flow_dv_encap_decap_resource,
3429                                       entry);
3430         if (resource->reformat_type == cache_resource->reformat_type &&
3431             resource->ft_type == cache_resource->ft_type &&
3432             resource->flags == cache_resource->flags &&
3433             resource->size == cache_resource->size &&
3434             !memcmp((const void *)resource->buf,
3435                     (const void *)cache_resource->buf,
3436                     resource->size))
3437                 return 0;
3438         return -1;
3439 }
3440
3441 /**
3442  * Allocate encap_decap resource.
3443  *
3444  * @param list
3445  *   Pointer to the hash list.
3446  * @param entry
3447  *   Pointer to exist resource entry object.
3448  * @param ctx_cb
3449  *   Pointer to new encap_decap resource.
3450  *
3451  * @return
3452  *   0 on matching, none-zero otherwise.
3453  */
3454 struct mlx5_hlist_entry *
3455 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3456                               uint64_t key __rte_unused,
3457                               void *cb_ctx)
3458 {
3459         struct mlx5_dev_ctx_shared *sh = list->ctx;
3460         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3461         struct mlx5dv_dr_domain *domain;
3462         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3463         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3464         uint32_t idx;
3465         int ret;
3466
3467         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3468                 domain = sh->fdb_domain;
3469         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3470                 domain = sh->rx_domain;
3471         else
3472                 domain = sh->tx_domain;
3473         /* Register new encap/decap resource. */
3474         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3475                                        &idx);
3476         if (!cache_resource) {
3477                 rte_flow_error_set(ctx->error, ENOMEM,
3478                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3479                                    "cannot allocate resource memory");
3480                 return NULL;
3481         }
3482         *cache_resource = *resource;
3483         cache_resource->idx = idx;
3484         ret = mlx5_flow_os_create_flow_action_packet_reformat
3485                                         (sh->ctx, domain, cache_resource,
3486                                          &cache_resource->action);
3487         if (ret) {
3488                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3489                 rte_flow_error_set(ctx->error, ENOMEM,
3490                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3491                                    NULL, "cannot create action");
3492                 return NULL;
3493         }
3494
3495         return &cache_resource->entry;
3496 }
3497
3498 /**
3499  * Find existing encap/decap resource or create and register a new one.
3500  *
3501  * @param[in, out] dev
3502  *   Pointer to rte_eth_dev structure.
3503  * @param[in, out] resource
3504  *   Pointer to encap/decap resource.
3505  * @parm[in, out] dev_flow
3506  *   Pointer to the dev_flow.
3507  * @param[out] error
3508  *   pointer to error structure.
3509  *
3510  * @return
3511  *   0 on success otherwise -errno and errno is set.
3512  */
3513 static int
3514 flow_dv_encap_decap_resource_register
3515                         (struct rte_eth_dev *dev,
3516                          struct mlx5_flow_dv_encap_decap_resource *resource,
3517                          struct mlx5_flow *dev_flow,
3518                          struct rte_flow_error *error)
3519 {
3520         struct mlx5_priv *priv = dev->data->dev_private;
3521         struct mlx5_dev_ctx_shared *sh = priv->sh;
3522         struct mlx5_hlist_entry *entry;
3523         union {
3524                 struct {
3525                         uint32_t ft_type:8;
3526                         uint32_t refmt_type:8;
3527                         /*
3528                          * Header reformat actions can be shared between
3529                          * non-root tables. One bit to indicate non-root
3530                          * table or not.
3531                          */
3532                         uint32_t is_root:1;
3533                         uint32_t reserve:15;
3534                 };
3535                 uint32_t v32;
3536         } encap_decap_key = {
3537                 {
3538                         .ft_type = resource->ft_type,
3539                         .refmt_type = resource->reformat_type,
3540                         .is_root = !!dev_flow->dv.group,
3541                         .reserve = 0,
3542                 }
3543         };
3544         struct mlx5_flow_cb_ctx ctx = {
3545                 .error = error,
3546                 .data = resource,
3547         };
3548         uint64_t key64;
3549
3550         resource->flags = dev_flow->dv.group ? 0 : 1;
3551         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3552                                  sizeof(encap_decap_key.v32), 0);
3553         if (resource->reformat_type !=
3554             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3555             resource->size)
3556                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3557         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3558         if (!entry)
3559                 return -rte_errno;
3560         resource = container_of(entry, typeof(*resource), entry);
3561         dev_flow->dv.encap_decap = resource;
3562         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3563         return 0;
3564 }
3565
3566 /**
3567  * Find existing table jump resource or create and register a new one.
3568  *
3569  * @param[in, out] dev
3570  *   Pointer to rte_eth_dev structure.
3571  * @param[in, out] tbl
3572  *   Pointer to flow table resource.
3573  * @parm[in, out] dev_flow
3574  *   Pointer to the dev_flow.
3575  * @param[out] error
3576  *   pointer to error structure.
3577  *
3578  * @return
3579  *   0 on success otherwise -errno and errno is set.
3580  */
3581 static int
3582 flow_dv_jump_tbl_resource_register
3583                         (struct rte_eth_dev *dev __rte_unused,
3584                          struct mlx5_flow_tbl_resource *tbl,
3585                          struct mlx5_flow *dev_flow,
3586                          struct rte_flow_error *error __rte_unused)
3587 {
3588         struct mlx5_flow_tbl_data_entry *tbl_data =
3589                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3590
3591         MLX5_ASSERT(tbl);
3592         MLX5_ASSERT(tbl_data->jump.action);
3593         dev_flow->handle->rix_jump = tbl_data->idx;
3594         dev_flow->dv.jump = &tbl_data->jump;
3595         return 0;
3596 }
3597
3598 int
3599 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3600                          struct mlx5_cache_entry *entry, void *cb_ctx)
3601 {
3602         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3603         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3604         struct mlx5_flow_dv_port_id_action_resource *res =
3605                         container_of(entry, typeof(*res), entry);
3606
3607         return ref->port_id != res->port_id;
3608 }
3609
3610 struct mlx5_cache_entry *
3611 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3612                           struct mlx5_cache_entry *entry __rte_unused,
3613                           void *cb_ctx)
3614 {
3615         struct mlx5_dev_ctx_shared *sh = list->ctx;
3616         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3617         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3618         struct mlx5_flow_dv_port_id_action_resource *cache;
3619         uint32_t idx;
3620         int ret;
3621
3622         /* Register new port id action resource. */
3623         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3624         if (!cache) {
3625                 rte_flow_error_set(ctx->error, ENOMEM,
3626                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3627                                    "cannot allocate port_id action cache memory");
3628                 return NULL;
3629         }
3630         *cache = *ref;
3631         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3632                                                         ref->port_id,
3633                                                         &cache->action);
3634         if (ret) {
3635                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3636                 rte_flow_error_set(ctx->error, ENOMEM,
3637                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3638                                    "cannot create action");
3639                 return NULL;
3640         }
3641         cache->idx = idx;
3642         return &cache->entry;
3643 }
3644
3645 /**
3646  * Find existing table port ID resource or create and register a new one.
3647  *
3648  * @param[in, out] dev
3649  *   Pointer to rte_eth_dev structure.
3650  * @param[in, out] resource
3651  *   Pointer to port ID action resource.
3652  * @parm[in, out] dev_flow
3653  *   Pointer to the dev_flow.
3654  * @param[out] error
3655  *   pointer to error structure.
3656  *
3657  * @return
3658  *   0 on success otherwise -errno and errno is set.
3659  */
3660 static int
3661 flow_dv_port_id_action_resource_register
3662                         (struct rte_eth_dev *dev,
3663                          struct mlx5_flow_dv_port_id_action_resource *resource,
3664                          struct mlx5_flow *dev_flow,
3665                          struct rte_flow_error *error)
3666 {
3667         struct mlx5_priv *priv = dev->data->dev_private;
3668         struct mlx5_cache_entry *entry;
3669         struct mlx5_flow_dv_port_id_action_resource *cache;
3670         struct mlx5_flow_cb_ctx ctx = {
3671                 .error = error,
3672                 .data = resource,
3673         };
3674
3675         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3676         if (!entry)
3677                 return -rte_errno;
3678         cache = container_of(entry, typeof(*cache), entry);
3679         dev_flow->dv.port_id_action = cache;
3680         dev_flow->handle->rix_port_id_action = cache->idx;
3681         return 0;
3682 }
3683
3684 int
3685 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3686                          struct mlx5_cache_entry *entry, void *cb_ctx)
3687 {
3688         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3689         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3690         struct mlx5_flow_dv_push_vlan_action_resource *res =
3691                         container_of(entry, typeof(*res), entry);
3692
3693         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3694 }
3695
3696 struct mlx5_cache_entry *
3697 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3698                           struct mlx5_cache_entry *entry __rte_unused,
3699                           void *cb_ctx)
3700 {
3701         struct mlx5_dev_ctx_shared *sh = list->ctx;
3702         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3703         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3704         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3705         struct mlx5dv_dr_domain *domain;
3706         uint32_t idx;
3707         int ret;
3708
3709         /* Register new port id action resource. */
3710         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3711         if (!cache) {
3712                 rte_flow_error_set(ctx->error, ENOMEM,
3713                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3714                                    "cannot allocate push_vlan action cache memory");
3715                 return NULL;
3716         }
3717         *cache = *ref;
3718         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3719                 domain = sh->fdb_domain;
3720         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3721                 domain = sh->rx_domain;
3722         else
3723                 domain = sh->tx_domain;
3724         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3725                                                         &cache->action);
3726         if (ret) {
3727                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3728                 rte_flow_error_set(ctx->error, ENOMEM,
3729                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3730                                    "cannot create push vlan action");
3731                 return NULL;
3732         }
3733         cache->idx = idx;
3734         return &cache->entry;
3735 }
3736
3737 /**
3738  * Find existing push vlan resource or create and register a new one.
3739  *
3740  * @param [in, out] dev
3741  *   Pointer to rte_eth_dev structure.
3742  * @param[in, out] resource
3743  *   Pointer to port ID action resource.
3744  * @parm[in, out] dev_flow
3745  *   Pointer to the dev_flow.
3746  * @param[out] error
3747  *   pointer to error structure.
3748  *
3749  * @return
3750  *   0 on success otherwise -errno and errno is set.
3751  */
3752 static int
3753 flow_dv_push_vlan_action_resource_register
3754                        (struct rte_eth_dev *dev,
3755                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3756                         struct mlx5_flow *dev_flow,
3757                         struct rte_flow_error *error)
3758 {
3759         struct mlx5_priv *priv = dev->data->dev_private;
3760         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3761         struct mlx5_cache_entry *entry;
3762         struct mlx5_flow_cb_ctx ctx = {
3763                 .error = error,
3764                 .data = resource,
3765         };
3766
3767         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3768         if (!entry)
3769                 return -rte_errno;
3770         cache = container_of(entry, typeof(*cache), entry);
3771
3772         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3773         dev_flow->dv.push_vlan_res = cache;
3774         return 0;
3775 }
3776
3777 /**
3778  * Get the size of specific rte_flow_item_type hdr size
3779  *
3780  * @param[in] item_type
3781  *   Tested rte_flow_item_type.
3782  *
3783  * @return
3784  *   sizeof struct item_type, 0 if void or irrelevant.
3785  */
3786 static size_t
3787 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3788 {
3789         size_t retval;
3790
3791         switch (item_type) {
3792         case RTE_FLOW_ITEM_TYPE_ETH:
3793                 retval = sizeof(struct rte_ether_hdr);
3794                 break;
3795         case RTE_FLOW_ITEM_TYPE_VLAN:
3796                 retval = sizeof(struct rte_vlan_hdr);
3797                 break;
3798         case RTE_FLOW_ITEM_TYPE_IPV4:
3799                 retval = sizeof(struct rte_ipv4_hdr);
3800                 break;
3801         case RTE_FLOW_ITEM_TYPE_IPV6:
3802                 retval = sizeof(struct rte_ipv6_hdr);
3803                 break;
3804         case RTE_FLOW_ITEM_TYPE_UDP:
3805                 retval = sizeof(struct rte_udp_hdr);
3806                 break;
3807         case RTE_FLOW_ITEM_TYPE_TCP:
3808                 retval = sizeof(struct rte_tcp_hdr);
3809                 break;
3810         case RTE_FLOW_ITEM_TYPE_VXLAN:
3811         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3812                 retval = sizeof(struct rte_vxlan_hdr);
3813                 break;
3814         case RTE_FLOW_ITEM_TYPE_GRE:
3815         case RTE_FLOW_ITEM_TYPE_NVGRE:
3816                 retval = sizeof(struct rte_gre_hdr);
3817                 break;
3818         case RTE_FLOW_ITEM_TYPE_MPLS:
3819                 retval = sizeof(struct rte_mpls_hdr);
3820                 break;
3821         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3822         default:
3823                 retval = 0;
3824                 break;
3825         }
3826         return retval;
3827 }
3828
3829 #define MLX5_ENCAP_IPV4_VERSION         0x40
3830 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3831 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3832 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3833 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3834 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3835 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3836
3837 /**
3838  * Convert the encap action data from list of rte_flow_item to raw buffer
3839  *
3840  * @param[in] items
3841  *   Pointer to rte_flow_item objects list.
3842  * @param[out] buf
3843  *   Pointer to the output buffer.
3844  * @param[out] size
3845  *   Pointer to the output buffer size.
3846  * @param[out] error
3847  *   Pointer to the error structure.
3848  *
3849  * @return
3850  *   0 on success, a negative errno value otherwise and rte_errno is set.
3851  */
3852 static int
3853 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3854                            size_t *size, struct rte_flow_error *error)
3855 {
3856         struct rte_ether_hdr *eth = NULL;
3857         struct rte_vlan_hdr *vlan = NULL;
3858         struct rte_ipv4_hdr *ipv4 = NULL;
3859         struct rte_ipv6_hdr *ipv6 = NULL;
3860         struct rte_udp_hdr *udp = NULL;
3861         struct rte_vxlan_hdr *vxlan = NULL;
3862         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3863         struct rte_gre_hdr *gre = NULL;
3864         size_t len;
3865         size_t temp_size = 0;
3866
3867         if (!items)
3868                 return rte_flow_error_set(error, EINVAL,
3869                                           RTE_FLOW_ERROR_TYPE_ACTION,
3870                                           NULL, "invalid empty data");
3871         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3872                 len = flow_dv_get_item_hdr_len(items->type);
3873                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3874                         return rte_flow_error_set(error, EINVAL,
3875                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3876                                                   (void *)items->type,
3877                                                   "items total size is too big"
3878                                                   " for encap action");
3879                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3880                 switch (items->type) {
3881                 case RTE_FLOW_ITEM_TYPE_ETH:
3882                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3883                         break;
3884                 case RTE_FLOW_ITEM_TYPE_VLAN:
3885                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3886                         if (!eth)
3887                                 return rte_flow_error_set(error, EINVAL,
3888                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3889                                                 (void *)items->type,
3890                                                 "eth header not found");
3891                         if (!eth->ether_type)
3892                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3893                         break;
3894                 case RTE_FLOW_ITEM_TYPE_IPV4:
3895                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3896                         if (!vlan && !eth)
3897                                 return rte_flow_error_set(error, EINVAL,
3898                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3899                                                 (void *)items->type,
3900                                                 "neither eth nor vlan"
3901                                                 " header found");
3902                         if (vlan && !vlan->eth_proto)
3903                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3904                         else if (eth && !eth->ether_type)
3905                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3906                         if (!ipv4->version_ihl)
3907                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3908                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3909                         if (!ipv4->time_to_live)
3910                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3911                         break;
3912                 case RTE_FLOW_ITEM_TYPE_IPV6:
3913                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3914                         if (!vlan && !eth)
3915                                 return rte_flow_error_set(error, EINVAL,
3916                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3917                                                 (void *)items->type,
3918                                                 "neither eth nor vlan"
3919                                                 " header found");
3920                         if (vlan && !vlan->eth_proto)
3921                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3922                         else if (eth && !eth->ether_type)
3923                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3924                         if (!ipv6->vtc_flow)
3925                                 ipv6->vtc_flow =
3926                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3927                         if (!ipv6->hop_limits)
3928                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3929                         break;
3930                 case RTE_FLOW_ITEM_TYPE_UDP:
3931                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3932                         if (!ipv4 && !ipv6)
3933                                 return rte_flow_error_set(error, EINVAL,
3934                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3935                                                 (void *)items->type,
3936                                                 "ip header not found");
3937                         if (ipv4 && !ipv4->next_proto_id)
3938                                 ipv4->next_proto_id = IPPROTO_UDP;
3939                         else if (ipv6 && !ipv6->proto)
3940                                 ipv6->proto = IPPROTO_UDP;
3941                         break;
3942                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3943                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3944                         if (!udp)
3945                                 return rte_flow_error_set(error, EINVAL,
3946                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3947                                                 (void *)items->type,
3948                                                 "udp header not found");
3949                         if (!udp->dst_port)
3950                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3951                         if (!vxlan->vx_flags)
3952                                 vxlan->vx_flags =
3953                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3954                         break;
3955                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3956                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3957                         if (!udp)
3958                                 return rte_flow_error_set(error, EINVAL,
3959                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3960                                                 (void *)items->type,
3961                                                 "udp header not found");
3962                         if (!vxlan_gpe->proto)
3963                                 return rte_flow_error_set(error, EINVAL,
3964                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3965                                                 (void *)items->type,
3966                                                 "next protocol not found");
3967                         if (!udp->dst_port)
3968                                 udp->dst_port =
3969                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3970                         if (!vxlan_gpe->vx_flags)
3971                                 vxlan_gpe->vx_flags =
3972                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3973                         break;
3974                 case RTE_FLOW_ITEM_TYPE_GRE:
3975                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3976                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3977                         if (!gre->proto)
3978                                 return rte_flow_error_set(error, EINVAL,
3979                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3980                                                 (void *)items->type,
3981                                                 "next protocol not found");
3982                         if (!ipv4 && !ipv6)
3983                                 return rte_flow_error_set(error, EINVAL,
3984                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3985                                                 (void *)items->type,
3986                                                 "ip header not found");
3987                         if (ipv4 && !ipv4->next_proto_id)
3988                                 ipv4->next_proto_id = IPPROTO_GRE;
3989                         else if (ipv6 && !ipv6->proto)
3990                                 ipv6->proto = IPPROTO_GRE;
3991                         break;
3992                 case RTE_FLOW_ITEM_TYPE_VOID:
3993                         break;
3994                 default:
3995                         return rte_flow_error_set(error, EINVAL,
3996                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3997                                                   (void *)items->type,
3998                                                   "unsupported item type");
3999                         break;
4000                 }
4001                 temp_size += len;
4002         }
4003         *size = temp_size;
4004         return 0;
4005 }
4006
4007 static int
4008 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4009 {
4010         struct rte_ether_hdr *eth = NULL;
4011         struct rte_vlan_hdr *vlan = NULL;
4012         struct rte_ipv6_hdr *ipv6 = NULL;
4013         struct rte_udp_hdr *udp = NULL;
4014         char *next_hdr;
4015         uint16_t proto;
4016
4017         eth = (struct rte_ether_hdr *)data;
4018         next_hdr = (char *)(eth + 1);
4019         proto = RTE_BE16(eth->ether_type);
4020
4021         /* VLAN skipping */
4022         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4023                 vlan = (struct rte_vlan_hdr *)next_hdr;
4024                 proto = RTE_BE16(vlan->eth_proto);
4025                 next_hdr += sizeof(struct rte_vlan_hdr);
4026         }
4027
4028         /* HW calculates IPv4 csum. no need to proceed */
4029         if (proto == RTE_ETHER_TYPE_IPV4)
4030                 return 0;
4031
4032         /* non IPv4/IPv6 header. not supported */
4033         if (proto != RTE_ETHER_TYPE_IPV6) {
4034                 return rte_flow_error_set(error, ENOTSUP,
4035                                           RTE_FLOW_ERROR_TYPE_ACTION,
4036                                           NULL, "Cannot offload non IPv4/IPv6");
4037         }
4038
4039         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4040
4041         /* ignore non UDP */
4042         if (ipv6->proto != IPPROTO_UDP)
4043                 return 0;
4044
4045         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4046         udp->dgram_cksum = 0;
4047
4048         return 0;
4049 }
4050
4051 /**
4052  * Convert L2 encap action to DV specification.
4053  *
4054  * @param[in] dev
4055  *   Pointer to rte_eth_dev structure.
4056  * @param[in] action
4057  *   Pointer to action structure.
4058  * @param[in, out] dev_flow
4059  *   Pointer to the mlx5_flow.
4060  * @param[in] transfer
4061  *   Mark if the flow is E-Switch flow.
4062  * @param[out] error
4063  *   Pointer to the error structure.
4064  *
4065  * @return
4066  *   0 on success, a negative errno value otherwise and rte_errno is set.
4067  */
4068 static int
4069 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4070                                const struct rte_flow_action *action,
4071                                struct mlx5_flow *dev_flow,
4072                                uint8_t transfer,
4073                                struct rte_flow_error *error)
4074 {
4075         const struct rte_flow_item *encap_data;
4076         const struct rte_flow_action_raw_encap *raw_encap_data;
4077         struct mlx5_flow_dv_encap_decap_resource res = {
4078                 .reformat_type =
4079                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4080                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4081                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4082         };
4083
4084         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4085                 raw_encap_data =
4086                         (const struct rte_flow_action_raw_encap *)action->conf;
4087                 res.size = raw_encap_data->size;
4088                 memcpy(res.buf, raw_encap_data->data, res.size);
4089         } else {
4090                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4091                         encap_data =
4092                                 ((const struct rte_flow_action_vxlan_encap *)
4093                                                 action->conf)->definition;
4094                 else
4095                         encap_data =
4096                                 ((const struct rte_flow_action_nvgre_encap *)
4097                                                 action->conf)->definition;
4098                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4099                                                &res.size, error))
4100                         return -rte_errno;
4101         }
4102         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4103                 return -rte_errno;
4104         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4105                 return rte_flow_error_set(error, EINVAL,
4106                                           RTE_FLOW_ERROR_TYPE_ACTION,
4107                                           NULL, "can't create L2 encap action");
4108         return 0;
4109 }
4110
4111 /**
4112  * Convert L2 decap action to DV specification.
4113  *
4114  * @param[in] dev
4115  *   Pointer to rte_eth_dev structure.
4116  * @param[in, out] dev_flow
4117  *   Pointer to the mlx5_flow.
4118  * @param[in] transfer
4119  *   Mark if the flow is E-Switch flow.
4120  * @param[out] error
4121  *   Pointer to the error structure.
4122  *
4123  * @return
4124  *   0 on success, a negative errno value otherwise and rte_errno is set.
4125  */
4126 static int
4127 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4128                                struct mlx5_flow *dev_flow,
4129                                uint8_t transfer,
4130                                struct rte_flow_error *error)
4131 {
4132         struct mlx5_flow_dv_encap_decap_resource res = {
4133                 .size = 0,
4134                 .reformat_type =
4135                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4136                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4137                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4138         };
4139
4140         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4141                 return rte_flow_error_set(error, EINVAL,
4142                                           RTE_FLOW_ERROR_TYPE_ACTION,
4143                                           NULL, "can't create L2 decap action");
4144         return 0;
4145 }
4146
4147 /**
4148  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4149  *
4150  * @param[in] dev
4151  *   Pointer to rte_eth_dev structure.
4152  * @param[in] action
4153  *   Pointer to action structure.
4154  * @param[in, out] dev_flow
4155  *   Pointer to the mlx5_flow.
4156  * @param[in] attr
4157  *   Pointer to the flow attributes.
4158  * @param[out] error
4159  *   Pointer to the error structure.
4160  *
4161  * @return
4162  *   0 on success, a negative errno value otherwise and rte_errno is set.
4163  */
4164 static int
4165 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4166                                 const struct rte_flow_action *action,
4167                                 struct mlx5_flow *dev_flow,
4168                                 const struct rte_flow_attr *attr,
4169                                 struct rte_flow_error *error)
4170 {
4171         const struct rte_flow_action_raw_encap *encap_data;
4172         struct mlx5_flow_dv_encap_decap_resource res;
4173
4174         memset(&res, 0, sizeof(res));
4175         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4176         res.size = encap_data->size;
4177         memcpy(res.buf, encap_data->data, res.size);
4178         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4179                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4180                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4181         if (attr->transfer)
4182                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4183         else
4184                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4185                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4186         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4187                 return rte_flow_error_set(error, EINVAL,
4188                                           RTE_FLOW_ERROR_TYPE_ACTION,
4189                                           NULL, "can't create encap action");
4190         return 0;
4191 }
4192
4193 /**
4194  * Create action push VLAN.
4195  *
4196  * @param[in] dev
4197  *   Pointer to rte_eth_dev structure.
4198  * @param[in] attr
4199  *   Pointer to the flow attributes.
4200  * @param[in] vlan
4201  *   Pointer to the vlan to push to the Ethernet header.
4202  * @param[in, out] dev_flow
4203  *   Pointer to the mlx5_flow.
4204  * @param[out] error
4205  *   Pointer to the error structure.
4206  *
4207  * @return
4208  *   0 on success, a negative errno value otherwise and rte_errno is set.
4209  */
4210 static int
4211 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4212                                 const struct rte_flow_attr *attr,
4213                                 const struct rte_vlan_hdr *vlan,
4214                                 struct mlx5_flow *dev_flow,
4215                                 struct rte_flow_error *error)
4216 {
4217         struct mlx5_flow_dv_push_vlan_action_resource res;
4218
4219         memset(&res, 0, sizeof(res));
4220         res.vlan_tag =
4221                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4222                                  vlan->vlan_tci);
4223         if (attr->transfer)
4224                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4225         else
4226                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4227                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4228         return flow_dv_push_vlan_action_resource_register
4229                                             (dev, &res, dev_flow, error);
4230 }
4231
4232 /**
4233  * Validate the modify-header actions.
4234  *
4235  * @param[in] action_flags
4236  *   Holds the actions detected until now.
4237  * @param[in] action
4238  *   Pointer to the modify action.
4239  * @param[out] error
4240  *   Pointer to error structure.
4241  *
4242  * @return
4243  *   0 on success, a negative errno value otherwise and rte_errno is set.
4244  */
4245 static int
4246 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4247                                    const struct rte_flow_action *action,
4248                                    struct rte_flow_error *error)
4249 {
4250         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4251                 return rte_flow_error_set(error, EINVAL,
4252                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4253                                           NULL, "action configuration not set");
4254         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4255                 return rte_flow_error_set(error, EINVAL,
4256                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4257                                           "can't have encap action before"
4258                                           " modify action");
4259         return 0;
4260 }
4261
4262 /**
4263  * Validate the modify-header MAC address actions.
4264  *
4265  * @param[in] action_flags
4266  *   Holds the actions detected until now.
4267  * @param[in] action
4268  *   Pointer to the modify action.
4269  * @param[in] item_flags
4270  *   Holds the items detected.
4271  * @param[out] error
4272  *   Pointer to error structure.
4273  *
4274  * @return
4275  *   0 on success, a negative errno value otherwise and rte_errno is set.
4276  */
4277 static int
4278 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4279                                    const struct rte_flow_action *action,
4280                                    const uint64_t item_flags,
4281                                    struct rte_flow_error *error)
4282 {
4283         int ret = 0;
4284
4285         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4286         if (!ret) {
4287                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4288                         return rte_flow_error_set(error, EINVAL,
4289                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4290                                                   NULL,
4291                                                   "no L2 item in pattern");
4292         }
4293         return ret;
4294 }
4295
4296 /**
4297  * Validate the modify-header IPv4 address actions.
4298  *
4299  * @param[in] action_flags
4300  *   Holds the actions detected until now.
4301  * @param[in] action
4302  *   Pointer to the modify action.
4303  * @param[in] item_flags
4304  *   Holds the items detected.
4305  * @param[out] error
4306  *   Pointer to error structure.
4307  *
4308  * @return
4309  *   0 on success, a negative errno value otherwise and rte_errno is set.
4310  */
4311 static int
4312 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4313                                     const struct rte_flow_action *action,
4314                                     const uint64_t item_flags,
4315                                     struct rte_flow_error *error)
4316 {
4317         int ret = 0;
4318         uint64_t layer;
4319
4320         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4321         if (!ret) {
4322                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4323                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4324                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4325                 if (!(item_flags & layer))
4326                         return rte_flow_error_set(error, EINVAL,
4327                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4328                                                   NULL,
4329                                                   "no ipv4 item in pattern");
4330         }
4331         return ret;
4332 }
4333
4334 /**
4335  * Validate the modify-header IPv6 address actions.
4336  *
4337  * @param[in] action_flags
4338  *   Holds the actions detected until now.
4339  * @param[in] action
4340  *   Pointer to the modify action.
4341  * @param[in] item_flags
4342  *   Holds the items detected.
4343  * @param[out] error
4344  *   Pointer to error structure.
4345  *
4346  * @return
4347  *   0 on success, a negative errno value otherwise and rte_errno is set.
4348  */
4349 static int
4350 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4351                                     const struct rte_flow_action *action,
4352                                     const uint64_t item_flags,
4353                                     struct rte_flow_error *error)
4354 {
4355         int ret = 0;
4356         uint64_t layer;
4357
4358         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4359         if (!ret) {
4360                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4361                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4362                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4363                 if (!(item_flags & layer))
4364                         return rte_flow_error_set(error, EINVAL,
4365                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4366                                                   NULL,
4367                                                   "no ipv6 item in pattern");
4368         }
4369         return ret;
4370 }
4371
4372 /**
4373  * Validate the modify-header TP actions.
4374  *
4375  * @param[in] action_flags
4376  *   Holds the actions detected until now.
4377  * @param[in] action
4378  *   Pointer to the modify action.
4379  * @param[in] item_flags
4380  *   Holds the items detected.
4381  * @param[out] error
4382  *   Pointer to error structure.
4383  *
4384  * @return
4385  *   0 on success, a negative errno value otherwise and rte_errno is set.
4386  */
4387 static int
4388 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4389                                   const struct rte_flow_action *action,
4390                                   const uint64_t item_flags,
4391                                   struct rte_flow_error *error)
4392 {
4393         int ret = 0;
4394         uint64_t layer;
4395
4396         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4397         if (!ret) {
4398                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4399                                  MLX5_FLOW_LAYER_INNER_L4 :
4400                                  MLX5_FLOW_LAYER_OUTER_L4;
4401                 if (!(item_flags & layer))
4402                         return rte_flow_error_set(error, EINVAL,
4403                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4404                                                   NULL, "no transport layer "
4405                                                   "in pattern");
4406         }
4407         return ret;
4408 }
4409
4410 /**
4411  * Validate the modify-header actions of increment/decrement
4412  * TCP Sequence-number.
4413  *
4414  * @param[in] action_flags
4415  *   Holds the actions detected until now.
4416  * @param[in] action
4417  *   Pointer to the modify action.
4418  * @param[in] item_flags
4419  *   Holds the items detected.
4420  * @param[out] error
4421  *   Pointer to error structure.
4422  *
4423  * @return
4424  *   0 on success, a negative errno value otherwise and rte_errno is set.
4425  */
4426 static int
4427 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4428                                        const struct rte_flow_action *action,
4429                                        const uint64_t item_flags,
4430                                        struct rte_flow_error *error)
4431 {
4432         int ret = 0;
4433         uint64_t layer;
4434
4435         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4436         if (!ret) {
4437                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4438                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4439                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4440                 if (!(item_flags & layer))
4441                         return rte_flow_error_set(error, EINVAL,
4442                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4443                                                   NULL, "no TCP item in"
4444                                                   " pattern");
4445                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4446                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4447                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4448                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4449                         return rte_flow_error_set(error, EINVAL,
4450                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4451                                                   NULL,
4452                                                   "cannot decrease and increase"
4453                                                   " TCP sequence number"
4454                                                   " at the same time");
4455         }
4456         return ret;
4457 }
4458
4459 /**
4460  * Validate the modify-header actions of increment/decrement
4461  * TCP Acknowledgment number.
4462  *
4463  * @param[in] action_flags
4464  *   Holds the actions detected until now.
4465  * @param[in] action
4466  *   Pointer to the modify action.
4467  * @param[in] item_flags
4468  *   Holds the items detected.
4469  * @param[out] error
4470  *   Pointer to error structure.
4471  *
4472  * @return
4473  *   0 on success, a negative errno value otherwise and rte_errno is set.
4474  */
4475 static int
4476 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4477                                        const struct rte_flow_action *action,
4478                                        const uint64_t item_flags,
4479                                        struct rte_flow_error *error)
4480 {
4481         int ret = 0;
4482         uint64_t layer;
4483
4484         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4485         if (!ret) {
4486                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4487                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4488                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4489                 if (!(item_flags & layer))
4490                         return rte_flow_error_set(error, EINVAL,
4491                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4492                                                   NULL, "no TCP item in"
4493                                                   " pattern");
4494                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4495                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4496                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4497                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4498                         return rte_flow_error_set(error, EINVAL,
4499                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4500                                                   NULL,
4501                                                   "cannot decrease and increase"
4502                                                   " TCP acknowledgment number"
4503                                                   " at the same time");
4504         }
4505         return ret;
4506 }
4507
4508 /**
4509  * Validate the modify-header TTL actions.
4510  *
4511  * @param[in] action_flags
4512  *   Holds the actions detected until now.
4513  * @param[in] action
4514  *   Pointer to the modify action.
4515  * @param[in] item_flags
4516  *   Holds the items detected.
4517  * @param[out] error
4518  *   Pointer to error structure.
4519  *
4520  * @return
4521  *   0 on success, a negative errno value otherwise and rte_errno is set.
4522  */
4523 static int
4524 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4525                                    const struct rte_flow_action *action,
4526                                    const uint64_t item_flags,
4527                                    struct rte_flow_error *error)
4528 {
4529         int ret = 0;
4530         uint64_t layer;
4531
4532         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4533         if (!ret) {
4534                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4535                                  MLX5_FLOW_LAYER_INNER_L3 :
4536                                  MLX5_FLOW_LAYER_OUTER_L3;
4537                 if (!(item_flags & layer))
4538                         return rte_flow_error_set(error, EINVAL,
4539                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4540                                                   NULL,
4541                                                   "no IP protocol in pattern");
4542         }
4543         return ret;
4544 }
4545
4546 /**
4547  * Validate the generic modify field actions.
4548  * @param[in] dev
4549  *   Pointer to the rte_eth_dev structure.
4550  * @param[in] action_flags
4551  *   Holds the actions detected until now.
4552  * @param[in] action
4553  *   Pointer to the modify action.
4554  * @param[in] attr
4555  *   Pointer to the flow attributes.
4556  * @param[out] error
4557  *   Pointer to error structure.
4558  *
4559  * @return
4560  *   Number of header fields to modify (0 or more) on success,
4561  *   a negative errno value otherwise and rte_errno is set.
4562  */
4563 static int
4564 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4565                                    const uint64_t action_flags,
4566                                    const struct rte_flow_action *action,
4567                                    const struct rte_flow_attr *attr,
4568                                    struct rte_flow_error *error)
4569 {
4570         int ret = 0;
4571         struct mlx5_priv *priv = dev->data->dev_private;
4572         struct mlx5_dev_config *config = &priv->config;
4573         const struct rte_flow_action_modify_field *action_modify_field =
4574                 action->conf;
4575         uint32_t dst_width =
4576                 mlx5_flow_item_field_width(action_modify_field->dst.field);
4577         uint32_t src_width =
4578                 mlx5_flow_item_field_width(action_modify_field->src.field);
4579
4580         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4581         if (ret)
4582                 return ret;
4583
4584         if (action_modify_field->width == 0)
4585                 return rte_flow_error_set(error, EINVAL,
4586                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4587                                 "no bits are requested to be modified");
4588         else if (action_modify_field->width > dst_width ||
4589                  action_modify_field->width > src_width)
4590                 return rte_flow_error_set(error, EINVAL,
4591                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4592                                 "cannot modify more bits than"
4593                                 " the width of a field");
4594         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4595             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4596                 if ((action_modify_field->dst.offset +
4597                      action_modify_field->width > dst_width) ||
4598                     (action_modify_field->dst.offset % 32))
4599                         return rte_flow_error_set(error, EINVAL,
4600                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4601                                         "destination offset is too big"
4602                                         " or not aligned to 4 bytes");
4603                 if (action_modify_field->dst.level &&
4604                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4605                         return rte_flow_error_set(error, ENOTSUP,
4606                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4607                                         "inner header fields modification"
4608                                         " is not supported");
4609         }
4610         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4611             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4612                 if (!attr->transfer && !attr->group)
4613                         return rte_flow_error_set(error, ENOTSUP,
4614                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4615                                         "modify field action is not"
4616                                         " supported for group 0");
4617                 if ((action_modify_field->src.offset +
4618                      action_modify_field->width > src_width) ||
4619                     (action_modify_field->src.offset % 32))
4620                         return rte_flow_error_set(error, EINVAL,
4621                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4622                                         "source offset is too big"
4623                                         " or not aligned to 4 bytes");
4624                 if (action_modify_field->src.level &&
4625                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4626                         return rte_flow_error_set(error, ENOTSUP,
4627                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4628                                         "inner header fields modification"
4629                                         " is not supported");
4630         }
4631         if (action_modify_field->dst.field ==
4632             action_modify_field->src.field)
4633                 return rte_flow_error_set(error, EINVAL,
4634                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4635                                 "source and destination fields"
4636                                 " cannot be the same");
4637         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4638             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4639                 return rte_flow_error_set(error, EINVAL,
4640                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4641                                 "immediate value or a pointer to it"
4642                                 " cannot be used as a destination");
4643         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4644             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4645                 return rte_flow_error_set(error, ENOTSUP,
4646                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4647                                 "modifications of an arbitrary"
4648                                 " place in a packet is not supported");
4649         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4650             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4651                 return rte_flow_error_set(error, ENOTSUP,
4652                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4653                                 "modifications of the 802.1Q Tag"
4654                                 " Identifier is not supported");
4655         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4656             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4657                 return rte_flow_error_set(error, ENOTSUP,
4658                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4659                                 "modifications of the VXLAN Network"
4660                                 " Identifier is not supported");
4661         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4662             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4663                 return rte_flow_error_set(error, ENOTSUP,
4664                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4665                                 "modifications of the GENEVE Network"
4666                                 " Identifier is not supported");
4667         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4668             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4669             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4670             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4671                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4672                     !mlx5_flow_ext_mreg_supported(dev))
4673                         return rte_flow_error_set(error, ENOTSUP,
4674                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4675                                         "cannot modify mark or metadata without"
4676                                         " extended metadata register support");
4677         }
4678         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4679                 return rte_flow_error_set(error, ENOTSUP,
4680                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4681                                 "add and sub operations"
4682                                 " are not supported");
4683         return (action_modify_field->width / 32) +
4684                !!(action_modify_field->width % 32);
4685 }
4686
4687 /**
4688  * Validate jump action.
4689  *
4690  * @param[in] action
4691  *   Pointer to the jump action.
4692  * @param[in] action_flags
4693  *   Holds the actions detected until now.
4694  * @param[in] attributes
4695  *   Pointer to flow attributes
4696  * @param[in] external
4697  *   Action belongs to flow rule created by request external to PMD.
4698  * @param[out] error
4699  *   Pointer to error structure.
4700  *
4701  * @return
4702  *   0 on success, a negative errno value otherwise and rte_errno is set.
4703  */
4704 static int
4705 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4706                              const struct mlx5_flow_tunnel *tunnel,
4707                              const struct rte_flow_action *action,
4708                              uint64_t action_flags,
4709                              const struct rte_flow_attr *attributes,
4710                              bool external, struct rte_flow_error *error)
4711 {
4712         uint32_t target_group, table;
4713         int ret = 0;
4714         struct flow_grp_info grp_info = {
4715                 .external = !!external,
4716                 .transfer = !!attributes->transfer,
4717                 .fdb_def_rule = 1,
4718                 .std_tbl_fix = 0
4719         };
4720         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4721                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4722                 return rte_flow_error_set(error, EINVAL,
4723                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4724                                           "can't have 2 fate actions in"
4725                                           " same flow");
4726         if (!action->conf)
4727                 return rte_flow_error_set(error, EINVAL,
4728                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4729                                           NULL, "action configuration not set");
4730         target_group =
4731                 ((const struct rte_flow_action_jump *)action->conf)->group;
4732         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4733                                        &grp_info, error);
4734         if (ret)
4735                 return ret;
4736         if (attributes->group == target_group &&
4737             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4738                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4739                 return rte_flow_error_set(error, EINVAL,
4740                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4741                                           "target group must be other than"
4742                                           " the current flow group");
4743         return 0;
4744 }
4745
4746 /*
4747  * Validate the port_id action.
4748  *
4749  * @param[in] dev
4750  *   Pointer to rte_eth_dev structure.
4751  * @param[in] action_flags
4752  *   Bit-fields that holds the actions detected until now.
4753  * @param[in] action
4754  *   Port_id RTE action structure.
4755  * @param[in] attr
4756  *   Attributes of flow that includes this action.
4757  * @param[out] error
4758  *   Pointer to error structure.
4759  *
4760  * @return
4761  *   0 on success, a negative errno value otherwise and rte_errno is set.
4762  */
4763 static int
4764 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4765                                 uint64_t action_flags,
4766                                 const struct rte_flow_action *action,
4767                                 const struct rte_flow_attr *attr,
4768                                 struct rte_flow_error *error)
4769 {
4770         const struct rte_flow_action_port_id *port_id;
4771         struct mlx5_priv *act_priv;
4772         struct mlx5_priv *dev_priv;
4773         uint16_t port;
4774
4775         if (!attr->transfer)
4776                 return rte_flow_error_set(error, ENOTSUP,
4777                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4778                                           NULL,
4779                                           "port id action is valid in transfer"
4780                                           " mode only");
4781         if (!action || !action->conf)
4782                 return rte_flow_error_set(error, ENOTSUP,
4783                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4784                                           NULL,
4785                                           "port id action parameters must be"
4786                                           " specified");
4787         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4788                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4789                 return rte_flow_error_set(error, EINVAL,
4790                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4791                                           "can have only one fate actions in"
4792                                           " a flow");
4793         dev_priv = mlx5_dev_to_eswitch_info(dev);
4794         if (!dev_priv)
4795                 return rte_flow_error_set(error, rte_errno,
4796                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4797                                           NULL,
4798                                           "failed to obtain E-Switch info");
4799         port_id = action->conf;
4800         port = port_id->original ? dev->data->port_id : port_id->id;
4801         act_priv = mlx5_port_to_eswitch_info(port, false);
4802         if (!act_priv)
4803                 return rte_flow_error_set
4804                                 (error, rte_errno,
4805                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4806                                  "failed to obtain E-Switch port id for port");
4807         if (act_priv->domain_id != dev_priv->domain_id)
4808                 return rte_flow_error_set
4809                                 (error, EINVAL,
4810                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4811                                  "port does not belong to"
4812                                  " E-Switch being configured");
4813         return 0;
4814 }
4815
4816 /**
4817  * Get the maximum number of modify header actions.
4818  *
4819  * @param dev
4820  *   Pointer to rte_eth_dev structure.
4821  * @param flags
4822  *   Flags bits to check if root level.
4823  *
4824  * @return
4825  *   Max number of modify header actions device can support.
4826  */
4827 static inline unsigned int
4828 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4829                               uint64_t flags)
4830 {
4831         /*
4832          * There's no way to directly query the max capacity from FW.
4833          * The maximal value on root table should be assumed to be supported.
4834          */
4835         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4836                 return MLX5_MAX_MODIFY_NUM;
4837         else
4838                 return MLX5_ROOT_TBL_MODIFY_NUM;
4839 }
4840
4841 /**
4842  * Validate the meter action.
4843  *
4844  * @param[in] dev
4845  *   Pointer to rte_eth_dev structure.
4846  * @param[in] action_flags
4847  *   Bit-fields that holds the actions detected until now.
4848  * @param[in] action
4849  *   Pointer to the meter action.
4850  * @param[in] attr
4851  *   Attributes of flow that includes this action.
4852  * @param[out] error
4853  *   Pointer to error structure.
4854  *
4855  * @return
4856  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4857  */
4858 static int
4859 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4860                                 uint64_t action_flags,
4861                                 const struct rte_flow_action *action,
4862                                 const struct rte_flow_attr *attr,
4863                                 bool *def_policy,
4864                                 struct rte_flow_error *error)
4865 {
4866         struct mlx5_priv *priv = dev->data->dev_private;
4867         const struct rte_flow_action_meter *am = action->conf;
4868         struct mlx5_flow_meter_info *fm;
4869         struct mlx5_flow_meter_policy *mtr_policy;
4870         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
4871
4872         if (!am)
4873                 return rte_flow_error_set(error, EINVAL,
4874                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4875                                           "meter action conf is NULL");
4876
4877         if (action_flags & MLX5_FLOW_ACTION_METER)
4878                 return rte_flow_error_set(error, ENOTSUP,
4879                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4880                                           "meter chaining not support");
4881         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4882                 return rte_flow_error_set(error, ENOTSUP,
4883                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4884                                           "meter with jump not support");
4885         if (!priv->mtr_en)
4886                 return rte_flow_error_set(error, ENOTSUP,
4887                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4888                                           NULL,
4889                                           "meter action not supported");
4890         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
4891         if (!fm)
4892                 return rte_flow_error_set(error, EINVAL,
4893                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4894                                           "Meter not found");
4895         /* aso meter can always be shared by different domains */
4896         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
4897             !(fm->transfer == attr->transfer ||
4898               (!fm->ingress && !attr->ingress && attr->egress) ||
4899               (!fm->egress && !attr->egress && attr->ingress)))
4900                 return rte_flow_error_set(error, EINVAL,
4901                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4902                         "Flow attributes domain are either invalid "
4903                         "or have a domain conflict with current "
4904                         "meter attributes");
4905         if (fm->def_policy) {
4906                 if (!((attr->transfer &&
4907                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
4908                         (attr->egress &&
4909                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
4910                         (attr->ingress &&
4911                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
4912                         return rte_flow_error_set(error, EINVAL,
4913                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4914                                           "Flow attributes domain "
4915                                           "have a conflict with current "
4916                                           "meter domain attributes");
4917                 *def_policy = true;
4918         } else {
4919                 mtr_policy = mlx5_flow_meter_policy_find(dev,
4920                                                 fm->policy_id, NULL);
4921                 if (!mtr_policy)
4922                         return rte_flow_error_set(error, EINVAL,
4923                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4924                                           "Invalid policy id for meter ");
4925                 if (!((attr->transfer && mtr_policy->transfer) ||
4926                         (attr->egress && mtr_policy->egress) ||
4927                         (attr->ingress && mtr_policy->ingress)))
4928                         return rte_flow_error_set(error, EINVAL,
4929                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4930                                           "Flow attributes domain "
4931                                           "have a conflict with current "
4932                                           "meter domain attributes");
4933                 *def_policy = false;
4934         }
4935         return 0;
4936 }
4937
4938 /**
4939  * Validate the age action.
4940  *
4941  * @param[in] action_flags
4942  *   Holds the actions detected until now.
4943  * @param[in] action
4944  *   Pointer to the age action.
4945  * @param[in] dev
4946  *   Pointer to the Ethernet device structure.
4947  * @param[out] error
4948  *   Pointer to error structure.
4949  *
4950  * @return
4951  *   0 on success, a negative errno value otherwise and rte_errno is set.
4952  */
4953 static int
4954 flow_dv_validate_action_age(uint64_t action_flags,
4955                             const struct rte_flow_action *action,
4956                             struct rte_eth_dev *dev,
4957                             struct rte_flow_error *error)
4958 {
4959         struct mlx5_priv *priv = dev->data->dev_private;
4960         const struct rte_flow_action_age *age = action->conf;
4961
4962         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4963             !priv->sh->aso_age_mng))
4964                 return rte_flow_error_set(error, ENOTSUP,
4965                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4966                                           NULL,
4967                                           "age action not supported");
4968         if (!(action->conf))
4969                 return rte_flow_error_set(error, EINVAL,
4970                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4971                                           "configuration cannot be null");
4972         if (!(age->timeout))
4973                 return rte_flow_error_set(error, EINVAL,
4974                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4975                                           "invalid timeout value 0");
4976         if (action_flags & MLX5_FLOW_ACTION_AGE)
4977                 return rte_flow_error_set(error, EINVAL,
4978                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4979                                           "duplicate age actions set");
4980         return 0;
4981 }
4982
4983 /**
4984  * Validate the modify-header IPv4 DSCP actions.
4985  *
4986  * @param[in] action_flags
4987  *   Holds the actions detected until now.
4988  * @param[in] action
4989  *   Pointer to the modify action.
4990  * @param[in] item_flags
4991  *   Holds the items detected.
4992  * @param[out] error
4993  *   Pointer to error structure.
4994  *
4995  * @return
4996  *   0 on success, a negative errno value otherwise and rte_errno is set.
4997  */
4998 static int
4999 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5000                                          const struct rte_flow_action *action,
5001                                          const uint64_t item_flags,
5002                                          struct rte_flow_error *error)
5003 {
5004         int ret = 0;
5005
5006         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5007         if (!ret) {
5008                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5009                         return rte_flow_error_set(error, EINVAL,
5010                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5011                                                   NULL,
5012                                                   "no ipv4 item in pattern");
5013         }
5014         return ret;
5015 }
5016
5017 /**
5018  * Validate the modify-header IPv6 DSCP actions.
5019  *
5020  * @param[in] action_flags
5021  *   Holds the actions detected until now.
5022  * @param[in] action
5023  *   Pointer to the modify action.
5024  * @param[in] item_flags
5025  *   Holds the items detected.
5026  * @param[out] error
5027  *   Pointer to error structure.
5028  *
5029  * @return
5030  *   0 on success, a negative errno value otherwise and rte_errno is set.
5031  */
5032 static int
5033 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5034                                          const struct rte_flow_action *action,
5035                                          const uint64_t item_flags,
5036                                          struct rte_flow_error *error)
5037 {
5038         int ret = 0;
5039
5040         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5041         if (!ret) {
5042                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5043                         return rte_flow_error_set(error, EINVAL,
5044                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5045                                                   NULL,
5046                                                   "no ipv6 item in pattern");
5047         }
5048         return ret;
5049 }
5050
5051 /**
5052  * Match modify-header resource.
5053  *
5054  * @param list
5055  *   Pointer to the hash list.
5056  * @param entry
5057  *   Pointer to exist resource entry object.
5058  * @param key
5059  *   Key of the new entry.
5060  * @param ctx
5061  *   Pointer to new modify-header resource.
5062  *
5063  * @return
5064  *   0 on matching, non-zero otherwise.
5065  */
5066 int
5067 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5068                         struct mlx5_hlist_entry *entry,
5069                         uint64_t key __rte_unused, void *cb_ctx)
5070 {
5071         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5072         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5073         struct mlx5_flow_dv_modify_hdr_resource *resource =
5074                         container_of(entry, typeof(*resource), entry);
5075         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5076
5077         key_len += ref->actions_num * sizeof(ref->actions[0]);
5078         return ref->actions_num != resource->actions_num ||
5079                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5080 }
5081
5082 struct mlx5_hlist_entry *
5083 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5084                          void *cb_ctx)
5085 {
5086         struct mlx5_dev_ctx_shared *sh = list->ctx;
5087         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5088         struct mlx5dv_dr_domain *ns;
5089         struct mlx5_flow_dv_modify_hdr_resource *entry;
5090         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5091         int ret;
5092         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5093         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5094
5095         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5096                             SOCKET_ID_ANY);
5097         if (!entry) {
5098                 rte_flow_error_set(ctx->error, ENOMEM,
5099                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5100                                    "cannot allocate resource memory");
5101                 return NULL;
5102         }
5103         rte_memcpy(&entry->ft_type,
5104                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5105                    key_len + data_len);
5106         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5107                 ns = sh->fdb_domain;
5108         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5109                 ns = sh->tx_domain;
5110         else
5111                 ns = sh->rx_domain;
5112         ret = mlx5_flow_os_create_flow_action_modify_header
5113                                         (sh->ctx, ns, entry,
5114                                          data_len, &entry->action);
5115         if (ret) {
5116                 mlx5_free(entry);
5117                 rte_flow_error_set(ctx->error, ENOMEM,
5118                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5119                                    NULL, "cannot create modification action");
5120                 return NULL;
5121         }
5122         return &entry->entry;
5123 }
5124
5125 /**
5126  * Validate the sample action.
5127  *
5128  * @param[in, out] action_flags
5129  *   Holds the actions detected until now.
5130  * @param[in] action
5131  *   Pointer to the sample action.
5132  * @param[in] dev
5133  *   Pointer to the Ethernet device structure.
5134  * @param[in] attr
5135  *   Attributes of flow that includes this action.
5136  * @param[in] item_flags
5137  *   Holds the items detected.
5138  * @param[in] rss
5139  *   Pointer to the RSS action.
5140  * @param[out] sample_rss
5141  *   Pointer to the RSS action in sample action list.
5142  * @param[out] count
5143  *   Pointer to the COUNT action in sample action list.
5144  * @param[out] fdb_mirror_limit
5145  *   Pointer to the FDB mirror limitation flag.
5146  * @param[out] error
5147  *   Pointer to error structure.
5148  *
5149  * @return
5150  *   0 on success, a negative errno value otherwise and rte_errno is set.
5151  */
5152 static int
5153 flow_dv_validate_action_sample(uint64_t *action_flags,
5154                                const struct rte_flow_action *action,
5155                                struct rte_eth_dev *dev,
5156                                const struct rte_flow_attr *attr,
5157                                uint64_t item_flags,
5158                                const struct rte_flow_action_rss *rss,
5159                                const struct rte_flow_action_rss **sample_rss,
5160                                const struct rte_flow_action_count **count,
5161                                int *fdb_mirror_limit,
5162                                struct rte_flow_error *error)
5163 {
5164         struct mlx5_priv *priv = dev->data->dev_private;
5165         struct mlx5_dev_config *dev_conf = &priv->config;
5166         const struct rte_flow_action_sample *sample = action->conf;
5167         const struct rte_flow_action *act;
5168         uint64_t sub_action_flags = 0;
5169         uint16_t queue_index = 0xFFFF;
5170         int actions_n = 0;
5171         int ret;
5172
5173         if (!sample)
5174                 return rte_flow_error_set(error, EINVAL,
5175                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5176                                           "configuration cannot be NULL");
5177         if (sample->ratio == 0)
5178                 return rte_flow_error_set(error, EINVAL,
5179                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5180                                           "ratio value starts from 1");
5181         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5182                 return rte_flow_error_set(error, ENOTSUP,
5183                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5184                                           NULL,
5185                                           "sample action not supported");
5186         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5187                 return rte_flow_error_set(error, EINVAL,
5188                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5189                                           "Multiple sample actions not "
5190                                           "supported");
5191         if (*action_flags & MLX5_FLOW_ACTION_METER)
5192                 return rte_flow_error_set(error, EINVAL,
5193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5194                                           "wrong action order, meter should "
5195                                           "be after sample action");
5196         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5197                 return rte_flow_error_set(error, EINVAL,
5198                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5199                                           "wrong action order, jump should "
5200                                           "be after sample action");
5201         act = sample->actions;
5202         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5203                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5204                         return rte_flow_error_set(error, ENOTSUP,
5205                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5206                                                   act, "too many actions");
5207                 switch (act->type) {
5208                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5209                         ret = mlx5_flow_validate_action_queue(act,
5210                                                               sub_action_flags,
5211                                                               dev,
5212                                                               attr, error);
5213                         if (ret < 0)
5214                                 return ret;
5215                         queue_index = ((const struct rte_flow_action_queue *)
5216                                                         (act->conf))->index;
5217                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5218                         ++actions_n;
5219                         break;
5220                 case RTE_FLOW_ACTION_TYPE_RSS:
5221                         *sample_rss = act->conf;
5222                         ret = mlx5_flow_validate_action_rss(act,
5223                                                             sub_action_flags,
5224                                                             dev, attr,
5225                                                             item_flags,
5226                                                             error);
5227                         if (ret < 0)
5228                                 return ret;
5229                         if (rss && *sample_rss &&
5230                             ((*sample_rss)->level != rss->level ||
5231                             (*sample_rss)->types != rss->types))
5232                                 return rte_flow_error_set(error, ENOTSUP,
5233                                         RTE_FLOW_ERROR_TYPE_ACTION,
5234                                         NULL,
5235                                         "Can't use the different RSS types "
5236                                         "or level in the same flow");
5237                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5238                                 queue_index = (*sample_rss)->queue[0];
5239                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5240                         ++actions_n;
5241                         break;
5242                 case RTE_FLOW_ACTION_TYPE_MARK:
5243                         ret = flow_dv_validate_action_mark(dev, act,
5244                                                            sub_action_flags,
5245                                                            attr, error);
5246                         if (ret < 0)
5247                                 return ret;
5248                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5249                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5250                                                 MLX5_FLOW_ACTION_MARK_EXT;
5251                         else
5252                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5253                         ++actions_n;
5254                         break;
5255                 case RTE_FLOW_ACTION_TYPE_COUNT:
5256                         ret = flow_dv_validate_action_count
5257                                 (dev, act,
5258                                  *action_flags | sub_action_flags,
5259                                  error);
5260                         if (ret < 0)
5261                                 return ret;
5262                         *count = act->conf;
5263                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5264                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5265                         ++actions_n;
5266                         break;
5267                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5268                         ret = flow_dv_validate_action_port_id(dev,
5269                                                               sub_action_flags,
5270                                                               act,
5271                                                               attr,
5272                                                               error);
5273                         if (ret)
5274                                 return ret;
5275                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5276                         ++actions_n;
5277                         break;
5278                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5279                         ret = flow_dv_validate_action_raw_encap_decap
5280                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5281                                  &actions_n, action, item_flags, error);
5282                         if (ret < 0)
5283                                 return ret;
5284                         ++actions_n;
5285                         break;
5286                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5287                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5288                         ret = flow_dv_validate_action_l2_encap(dev,
5289                                                                sub_action_flags,
5290                                                                act, attr,
5291                                                                error);
5292                         if (ret < 0)
5293                                 return ret;
5294                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5295                         ++actions_n;
5296                         break;
5297                 default:
5298                         return rte_flow_error_set(error, ENOTSUP,
5299                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5300                                                   NULL,
5301                                                   "Doesn't support optional "
5302                                                   "action");
5303                 }
5304         }
5305         if (attr->ingress && !attr->transfer) {
5306                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5307                                           MLX5_FLOW_ACTION_RSS)))
5308                         return rte_flow_error_set(error, EINVAL,
5309                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5310                                                   NULL,
5311                                                   "Ingress must has a dest "
5312                                                   "QUEUE for Sample");
5313         } else if (attr->egress && !attr->transfer) {
5314                 return rte_flow_error_set(error, ENOTSUP,
5315                                           RTE_FLOW_ERROR_TYPE_ACTION,
5316                                           NULL,
5317                                           "Sample Only support Ingress "
5318                                           "or E-Switch");
5319         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5320                 MLX5_ASSERT(attr->transfer);
5321                 if (sample->ratio > 1)
5322                         return rte_flow_error_set(error, ENOTSUP,
5323                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5324                                                   NULL,
5325                                                   "E-Switch doesn't support "
5326                                                   "any optional action "
5327                                                   "for sampling");
5328                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5329                         return rte_flow_error_set(error, ENOTSUP,
5330                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5331                                                   NULL,
5332                                                   "unsupported action QUEUE");
5333                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5334                         return rte_flow_error_set(error, ENOTSUP,
5335                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5336                                                   NULL,
5337                                                   "unsupported action QUEUE");
5338                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5339                         return rte_flow_error_set(error, EINVAL,
5340                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5341                                                   NULL,
5342                                                   "E-Switch must has a dest "
5343                                                   "port for mirroring");
5344                 if (!priv->config.hca_attr.reg_c_preserve &&
5345                      priv->representor_id != -1)
5346                         *fdb_mirror_limit = 1;
5347         }
5348         /* Continue validation for Xcap actions.*/
5349         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5350             (queue_index == 0xFFFF ||
5351              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5352                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5353                      MLX5_FLOW_XCAP_ACTIONS)
5354                         return rte_flow_error_set(error, ENOTSUP,
5355                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5356                                                   NULL, "encap and decap "
5357                                                   "combination aren't "
5358                                                   "supported");
5359                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5360                                                         MLX5_FLOW_ACTION_ENCAP))
5361                         return rte_flow_error_set(error, ENOTSUP,
5362                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5363                                                   NULL, "encap is not supported"
5364                                                   " for ingress traffic");
5365         }
5366         return 0;
5367 }
5368
5369 /**
5370  * Find existing modify-header resource or create and register a new one.
5371  *
5372  * @param dev[in, out]
5373  *   Pointer to rte_eth_dev structure.
5374  * @param[in, out] resource
5375  *   Pointer to modify-header resource.
5376  * @parm[in, out] dev_flow
5377  *   Pointer to the dev_flow.
5378  * @param[out] error
5379  *   pointer to error structure.
5380  *
5381  * @return
5382  *   0 on success otherwise -errno and errno is set.
5383  */
5384 static int
5385 flow_dv_modify_hdr_resource_register
5386                         (struct rte_eth_dev *dev,
5387                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5388                          struct mlx5_flow *dev_flow,
5389                          struct rte_flow_error *error)
5390 {
5391         struct mlx5_priv *priv = dev->data->dev_private;
5392         struct mlx5_dev_ctx_shared *sh = priv->sh;
5393         uint32_t key_len = sizeof(*resource) -
5394                            offsetof(typeof(*resource), ft_type) +
5395                            resource->actions_num * sizeof(resource->actions[0]);
5396         struct mlx5_hlist_entry *entry;
5397         struct mlx5_flow_cb_ctx ctx = {
5398                 .error = error,
5399                 .data = resource,
5400         };
5401         uint64_t key64;
5402
5403         resource->flags = dev_flow->dv.group ? 0 :
5404                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5405         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5406                                     resource->flags))
5407                 return rte_flow_error_set(error, EOVERFLOW,
5408                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5409                                           "too many modify header items");
5410         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5411         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5412         if (!entry)
5413                 return -rte_errno;
5414         resource = container_of(entry, typeof(*resource), entry);
5415         dev_flow->handle->dvh.modify_hdr = resource;
5416         return 0;
5417 }
5418
5419 /**
5420  * Get DV flow counter by index.
5421  *
5422  * @param[in] dev
5423  *   Pointer to the Ethernet device structure.
5424  * @param[in] idx
5425  *   mlx5 flow counter index in the container.
5426  * @param[out] ppool
5427  *   mlx5 flow counter pool in the container,
5428  *
5429  * @return
5430  *   Pointer to the counter, NULL otherwise.
5431  */
5432 static struct mlx5_flow_counter *
5433 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5434                            uint32_t idx,
5435                            struct mlx5_flow_counter_pool **ppool)
5436 {
5437         struct mlx5_priv *priv = dev->data->dev_private;
5438         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5439         struct mlx5_flow_counter_pool *pool;
5440
5441         /* Decrease to original index and clear shared bit. */
5442         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5443         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5444         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5445         MLX5_ASSERT(pool);
5446         if (ppool)
5447                 *ppool = pool;
5448         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5449 }
5450
5451 /**
5452  * Check the devx counter belongs to the pool.
5453  *
5454  * @param[in] pool
5455  *   Pointer to the counter pool.
5456  * @param[in] id
5457  *   The counter devx ID.
5458  *
5459  * @return
5460  *   True if counter belongs to the pool, false otherwise.
5461  */
5462 static bool
5463 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5464 {
5465         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5466                    MLX5_COUNTERS_PER_POOL;
5467
5468         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5469                 return true;
5470         return false;
5471 }
5472
5473 /**
5474  * Get a pool by devx counter ID.
5475  *
5476  * @param[in] cmng
5477  *   Pointer to the counter management.
5478  * @param[in] id
5479  *   The counter devx ID.
5480  *
5481  * @return
5482  *   The counter pool pointer if exists, NULL otherwise,
5483  */
5484 static struct mlx5_flow_counter_pool *
5485 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5486 {
5487         uint32_t i;
5488         struct mlx5_flow_counter_pool *pool = NULL;
5489
5490         rte_spinlock_lock(&cmng->pool_update_sl);
5491         /* Check last used pool. */
5492         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5493             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5494                 pool = cmng->pools[cmng->last_pool_idx];
5495                 goto out;
5496         }
5497         /* ID out of range means no suitable pool in the container. */
5498         if (id > cmng->max_id || id < cmng->min_id)
5499                 goto out;
5500         /*
5501          * Find the pool from the end of the container, since mostly counter
5502          * ID is sequence increasing, and the last pool should be the needed
5503          * one.
5504          */
5505         i = cmng->n_valid;
5506         while (i--) {
5507                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5508
5509                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5510                         pool = pool_tmp;
5511                         break;
5512                 }
5513         }
5514 out:
5515         rte_spinlock_unlock(&cmng->pool_update_sl);
5516         return pool;
5517 }
5518
5519 /**
5520  * Resize a counter container.
5521  *
5522  * @param[in] dev
5523  *   Pointer to the Ethernet device structure.
5524  *
5525  * @return
5526  *   0 on success, otherwise negative errno value and rte_errno is set.
5527  */
5528 static int
5529 flow_dv_container_resize(struct rte_eth_dev *dev)
5530 {
5531         struct mlx5_priv *priv = dev->data->dev_private;
5532         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5533         void *old_pools = cmng->pools;
5534         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5535         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5536         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5537
5538         if (!pools) {
5539                 rte_errno = ENOMEM;
5540                 return -ENOMEM;
5541         }
5542         if (old_pools)
5543                 memcpy(pools, old_pools, cmng->n *
5544                                        sizeof(struct mlx5_flow_counter_pool *));
5545         cmng->n = resize;
5546         cmng->pools = pools;
5547         if (old_pools)
5548                 mlx5_free(old_pools);
5549         return 0;
5550 }
5551
5552 /**
5553  * Query a devx flow counter.
5554  *
5555  * @param[in] dev
5556  *   Pointer to the Ethernet device structure.
5557  * @param[in] cnt
5558  *   Index to the flow counter.
5559  * @param[out] pkts
5560  *   The statistics value of packets.
5561  * @param[out] bytes
5562  *   The statistics value of bytes.
5563  *
5564  * @return
5565  *   0 on success, otherwise a negative errno value and rte_errno is set.
5566  */
5567 static inline int
5568 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5569                      uint64_t *bytes)
5570 {
5571         struct mlx5_priv *priv = dev->data->dev_private;
5572         struct mlx5_flow_counter_pool *pool = NULL;
5573         struct mlx5_flow_counter *cnt;
5574         int offset;
5575
5576         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5577         MLX5_ASSERT(pool);
5578         if (priv->sh->cmng.counter_fallback)
5579                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5580                                         0, pkts, bytes, 0, NULL, NULL, 0);
5581         rte_spinlock_lock(&pool->sl);
5582         if (!pool->raw) {
5583                 *pkts = 0;
5584                 *bytes = 0;
5585         } else {
5586                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5587                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5588                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5589         }
5590         rte_spinlock_unlock(&pool->sl);
5591         return 0;
5592 }
5593
5594 /**
5595  * Create and initialize a new counter pool.
5596  *
5597  * @param[in] dev
5598  *   Pointer to the Ethernet device structure.
5599  * @param[out] dcs
5600  *   The devX counter handle.
5601  * @param[in] age
5602  *   Whether the pool is for counter that was allocated for aging.
5603  * @param[in/out] cont_cur
5604  *   Pointer to the container pointer, it will be update in pool resize.
5605  *
5606  * @return
5607  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5608  */
5609 static struct mlx5_flow_counter_pool *
5610 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5611                     uint32_t age)
5612 {
5613         struct mlx5_priv *priv = dev->data->dev_private;
5614         struct mlx5_flow_counter_pool *pool;
5615         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5616         bool fallback = priv->sh->cmng.counter_fallback;
5617         uint32_t size = sizeof(*pool);
5618
5619         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5620         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5621         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5622         if (!pool) {
5623                 rte_errno = ENOMEM;
5624                 return NULL;
5625         }
5626         pool->raw = NULL;
5627         pool->is_aged = !!age;
5628         pool->query_gen = 0;
5629         pool->min_dcs = dcs;
5630         rte_spinlock_init(&pool->sl);
5631         rte_spinlock_init(&pool->csl);
5632         TAILQ_INIT(&pool->counters[0]);
5633         TAILQ_INIT(&pool->counters[1]);
5634         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5635         rte_spinlock_lock(&cmng->pool_update_sl);
5636         pool->index = cmng->n_valid;
5637         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5638                 mlx5_free(pool);
5639                 rte_spinlock_unlock(&cmng->pool_update_sl);
5640                 return NULL;
5641         }
5642         cmng->pools[pool->index] = pool;
5643         cmng->n_valid++;
5644         if (unlikely(fallback)) {
5645                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5646
5647                 if (base < cmng->min_id)
5648                         cmng->min_id = base;
5649                 if (base > cmng->max_id)
5650                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5651                 cmng->last_pool_idx = pool->index;
5652         }
5653         rte_spinlock_unlock(&cmng->pool_update_sl);
5654         return pool;
5655 }
5656
5657 /**
5658  * Prepare a new counter and/or a new counter pool.
5659  *
5660  * @param[in] dev
5661  *   Pointer to the Ethernet device structure.
5662  * @param[out] cnt_free
5663  *   Where to put the pointer of a new counter.
5664  * @param[in] age
5665  *   Whether the pool is for counter that was allocated for aging.
5666  *
5667  * @return
5668  *   The counter pool pointer and @p cnt_free is set on success,
5669  *   NULL otherwise and rte_errno is set.
5670  */
5671 static struct mlx5_flow_counter_pool *
5672 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5673                              struct mlx5_flow_counter **cnt_free,
5674                              uint32_t age)
5675 {
5676         struct mlx5_priv *priv = dev->data->dev_private;
5677         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5678         struct mlx5_flow_counter_pool *pool;
5679         struct mlx5_counters tmp_tq;
5680         struct mlx5_devx_obj *dcs = NULL;
5681         struct mlx5_flow_counter *cnt;
5682         enum mlx5_counter_type cnt_type =
5683                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5684         bool fallback = priv->sh->cmng.counter_fallback;
5685         uint32_t i;
5686
5687         if (fallback) {
5688                 /* bulk_bitmap must be 0 for single counter allocation. */
5689                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5690                 if (!dcs)
5691                         return NULL;
5692                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5693                 if (!pool) {
5694                         pool = flow_dv_pool_create(dev, dcs, age);
5695                         if (!pool) {
5696                                 mlx5_devx_cmd_destroy(dcs);
5697                                 return NULL;
5698                         }
5699                 }
5700                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5701                 cnt = MLX5_POOL_GET_CNT(pool, i);
5702                 cnt->pool = pool;
5703                 cnt->dcs_when_free = dcs;
5704                 *cnt_free = cnt;
5705                 return pool;
5706         }
5707         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5708         if (!dcs) {
5709                 rte_errno = ENODATA;
5710                 return NULL;
5711         }
5712         pool = flow_dv_pool_create(dev, dcs, age);
5713         if (!pool) {
5714                 mlx5_devx_cmd_destroy(dcs);
5715                 return NULL;
5716         }
5717         TAILQ_INIT(&tmp_tq);
5718         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5719                 cnt = MLX5_POOL_GET_CNT(pool, i);
5720                 cnt->pool = pool;
5721                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5722         }
5723         rte_spinlock_lock(&cmng->csl[cnt_type]);
5724         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5725         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5726         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5727         (*cnt_free)->pool = pool;
5728         return pool;
5729 }
5730
5731 /**
5732  * Allocate a flow counter.
5733  *
5734  * @param[in] dev
5735  *   Pointer to the Ethernet device structure.
5736  * @param[in] age
5737  *   Whether the counter was allocated for aging.
5738  *
5739  * @return
5740  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5741  */
5742 static uint32_t
5743 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5744 {
5745         struct mlx5_priv *priv = dev->data->dev_private;
5746         struct mlx5_flow_counter_pool *pool = NULL;
5747         struct mlx5_flow_counter *cnt_free = NULL;
5748         bool fallback = priv->sh->cmng.counter_fallback;
5749         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5750         enum mlx5_counter_type cnt_type =
5751                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5752         uint32_t cnt_idx;
5753
5754         if (!priv->config.devx) {
5755                 rte_errno = ENOTSUP;
5756                 return 0;
5757         }
5758         /* Get free counters from container. */
5759         rte_spinlock_lock(&cmng->csl[cnt_type]);
5760         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5761         if (cnt_free)
5762                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5763         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5764         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5765                 goto err;
5766         pool = cnt_free->pool;
5767         if (fallback)
5768                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5769         /* Create a DV counter action only in the first time usage. */
5770         if (!cnt_free->action) {
5771                 uint16_t offset;
5772                 struct mlx5_devx_obj *dcs;
5773                 int ret;
5774
5775                 if (!fallback) {
5776                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5777                         dcs = pool->min_dcs;
5778                 } else {
5779                         offset = 0;
5780                         dcs = cnt_free->dcs_when_free;
5781                 }
5782                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5783                                                             &cnt_free->action);
5784                 if (ret) {
5785                         rte_errno = errno;
5786                         goto err;
5787                 }
5788         }
5789         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5790                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5791         /* Update the counter reset values. */
5792         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5793                                  &cnt_free->bytes))
5794                 goto err;
5795         if (!fallback && !priv->sh->cmng.query_thread_on)
5796                 /* Start the asynchronous batch query by the host thread. */
5797                 mlx5_set_query_alarm(priv->sh);
5798         return cnt_idx;
5799 err:
5800         if (cnt_free) {
5801                 cnt_free->pool = pool;
5802                 if (fallback)
5803                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5804                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5805                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5806                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5807         }
5808         return 0;
5809 }
5810
5811 /**
5812  * Allocate a shared flow counter.
5813  *
5814  * @param[in] ctx
5815  *   Pointer to the shared counter configuration.
5816  * @param[in] data
5817  *   Pointer to save the allocated counter index.
5818  *
5819  * @return
5820  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5821  */
5822
5823 static int32_t
5824 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5825 {
5826         struct mlx5_shared_counter_conf *conf = ctx;
5827         struct rte_eth_dev *dev = conf->dev;
5828         struct mlx5_flow_counter *cnt;
5829
5830         data->dword = flow_dv_counter_alloc(dev, 0);
5831         data->dword |= MLX5_CNT_SHARED_OFFSET;
5832         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5833         cnt->shared_info.id = conf->id;
5834         return 0;
5835 }
5836
5837 /**
5838  * Get a shared flow counter.
5839  *
5840  * @param[in] dev
5841  *   Pointer to the Ethernet device structure.
5842  * @param[in] id
5843  *   Counter identifier.
5844  *
5845  * @return
5846  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5847  */
5848 static uint32_t
5849 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5850 {
5851         struct mlx5_priv *priv = dev->data->dev_private;
5852         struct mlx5_shared_counter_conf conf = {
5853                 .dev = dev,
5854                 .id = id,
5855         };
5856         union mlx5_l3t_data data = {
5857                 .dword = 0,
5858         };
5859
5860         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5861                                flow_dv_counter_alloc_shared_cb, &conf);
5862         return data.dword;
5863 }
5864
5865 /**
5866  * Get age param from counter index.
5867  *
5868  * @param[in] dev
5869  *   Pointer to the Ethernet device structure.
5870  * @param[in] counter
5871  *   Index to the counter handler.
5872  *
5873  * @return
5874  *   The aging parameter specified for the counter index.
5875  */
5876 static struct mlx5_age_param*
5877 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5878                                 uint32_t counter)
5879 {
5880         struct mlx5_flow_counter *cnt;
5881         struct mlx5_flow_counter_pool *pool = NULL;
5882
5883         flow_dv_counter_get_by_idx(dev, counter, &pool);
5884         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5885         cnt = MLX5_POOL_GET_CNT(pool, counter);
5886         return MLX5_CNT_TO_AGE(cnt);
5887 }
5888
5889 /**
5890  * Remove a flow counter from aged counter list.
5891  *
5892  * @param[in] dev
5893  *   Pointer to the Ethernet device structure.
5894  * @param[in] counter
5895  *   Index to the counter handler.
5896  * @param[in] cnt
5897  *   Pointer to the counter handler.
5898  */
5899 static void
5900 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5901                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5902 {
5903         struct mlx5_age_info *age_info;
5904         struct mlx5_age_param *age_param;
5905         struct mlx5_priv *priv = dev->data->dev_private;
5906         uint16_t expected = AGE_CANDIDATE;
5907
5908         age_info = GET_PORT_AGE_INFO(priv);
5909         age_param = flow_dv_counter_idx_get_age(dev, counter);
5910         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5911                                          AGE_FREE, false, __ATOMIC_RELAXED,
5912                                          __ATOMIC_RELAXED)) {
5913                 /**
5914                  * We need the lock even it is age timeout,
5915                  * since counter may still in process.
5916                  */
5917                 rte_spinlock_lock(&age_info->aged_sl);
5918                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5919                 rte_spinlock_unlock(&age_info->aged_sl);
5920                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5921         }
5922 }
5923
5924 /**
5925  * Release a flow counter.
5926  *
5927  * @param[in] dev
5928  *   Pointer to the Ethernet device structure.
5929  * @param[in] counter
5930  *   Index to the counter handler.
5931  */
5932 static void
5933 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5934 {
5935         struct mlx5_priv *priv = dev->data->dev_private;
5936         struct mlx5_flow_counter_pool *pool = NULL;
5937         struct mlx5_flow_counter *cnt;
5938         enum mlx5_counter_type cnt_type;
5939
5940         if (!counter)
5941                 return;
5942         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5943         MLX5_ASSERT(pool);
5944         if (IS_SHARED_CNT(counter) &&
5945             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5946                 return;
5947         if (pool->is_aged)
5948                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5949         cnt->pool = pool;
5950         /*
5951          * Put the counter back to list to be updated in none fallback mode.
5952          * Currently, we are using two list alternately, while one is in query,
5953          * add the freed counter to the other list based on the pool query_gen
5954          * value. After query finishes, add counter the list to the global
5955          * container counter list. The list changes while query starts. In
5956          * this case, lock will not be needed as query callback and release
5957          * function both operate with the different list.
5958          *
5959          */
5960         if (!priv->sh->cmng.counter_fallback) {
5961                 rte_spinlock_lock(&pool->csl);
5962                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5963                 rte_spinlock_unlock(&pool->csl);
5964         } else {
5965                 cnt->dcs_when_free = cnt->dcs_when_active;
5966                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5967                                            MLX5_COUNTER_TYPE_ORIGIN;
5968                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5969                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5970                                   cnt, next);
5971                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5972         }
5973 }
5974
5975 /**
5976  * Resize a meter id container.
5977  *
5978  * @param[in] dev
5979  *   Pointer to the Ethernet device structure.
5980  *
5981  * @return
5982  *   0 on success, otherwise negative errno value and rte_errno is set.
5983  */
5984 static int
5985 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
5986 {
5987         struct mlx5_priv *priv = dev->data->dev_private;
5988         struct mlx5_aso_mtr_pools_mng *pools_mng =
5989                                 &priv->sh->mtrmng->pools_mng;
5990         void *old_pools = pools_mng->pools;
5991         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
5992         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
5993         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5994
5995         if (!pools) {
5996                 rte_errno = ENOMEM;
5997                 return -ENOMEM;
5998         }
5999         if (!pools_mng->n)
6000                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6001                         mlx5_free(pools);
6002                         return -ENOMEM;
6003                 }
6004         if (old_pools)
6005                 memcpy(pools, old_pools, pools_mng->n *
6006                                        sizeof(struct mlx5_aso_mtr_pool *));
6007         pools_mng->n = resize;
6008         pools_mng->pools = pools;
6009         if (old_pools)
6010                 mlx5_free(old_pools);
6011         return 0;
6012 }
6013
6014 /**
6015  * Prepare a new meter and/or a new meter pool.
6016  *
6017  * @param[in] dev
6018  *   Pointer to the Ethernet device structure.
6019  * @param[out] mtr_free
6020  *   Where to put the pointer of a new meter.g.
6021  *
6022  * @return
6023  *   The meter pool pointer and @mtr_free is set on success,
6024  *   NULL otherwise and rte_errno is set.
6025  */
6026 static struct mlx5_aso_mtr_pool *
6027 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6028                              struct mlx5_aso_mtr **mtr_free)
6029 {
6030         struct mlx5_priv *priv = dev->data->dev_private;
6031         struct mlx5_aso_mtr_pools_mng *pools_mng =
6032                                 &priv->sh->mtrmng->pools_mng;
6033         struct mlx5_aso_mtr_pool *pool = NULL;
6034         struct mlx5_devx_obj *dcs = NULL;
6035         uint32_t i;
6036         uint32_t log_obj_size;
6037
6038         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6039         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6040                         priv->sh->pdn, log_obj_size);
6041         if (!dcs) {
6042                 rte_errno = ENODATA;
6043                 return NULL;
6044         }
6045         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6046         if (!pool) {
6047                 rte_errno = ENOMEM;
6048                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6049                 return NULL;
6050         }
6051         pool->devx_obj = dcs;
6052         pool->index = pools_mng->n_valid;
6053         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6054                 mlx5_free(pool);
6055                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6056                 return NULL;
6057         }
6058         pools_mng->pools[pool->index] = pool;
6059         pools_mng->n_valid++;
6060         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6061                 pool->mtrs[i].offset = i;
6062                 LIST_INSERT_HEAD(&pools_mng->meters,
6063                                                 &pool->mtrs[i], next);
6064         }
6065         pool->mtrs[0].offset = 0;
6066         *mtr_free = &pool->mtrs[0];
6067         return pool;
6068 }
6069
6070 /**
6071  * Release a flow meter into pool.
6072  *
6073  * @param[in] dev
6074  *   Pointer to the Ethernet device structure.
6075  * @param[in] mtr_idx
6076  *   Index to aso flow meter.
6077  */
6078 static void
6079 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6080 {
6081         struct mlx5_priv *priv = dev->data->dev_private;
6082         struct mlx5_aso_mtr_pools_mng *pools_mng =
6083                                 &priv->sh->mtrmng->pools_mng;
6084         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6085
6086         MLX5_ASSERT(aso_mtr);
6087         rte_spinlock_lock(&pools_mng->mtrsl);
6088         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6089         aso_mtr->state = ASO_METER_FREE;
6090         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6091         rte_spinlock_unlock(&pools_mng->mtrsl);
6092 }
6093
6094 /**
6095  * Allocate a aso flow meter.
6096  *
6097  * @param[in] dev
6098  *   Pointer to the Ethernet device structure.
6099  *
6100  * @return
6101  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6102  */
6103 static uint32_t
6104 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6105 {
6106         struct mlx5_priv *priv = dev->data->dev_private;
6107         struct mlx5_aso_mtr *mtr_free = NULL;
6108         struct mlx5_aso_mtr_pools_mng *pools_mng =
6109                                 &priv->sh->mtrmng->pools_mng;
6110         struct mlx5_aso_mtr_pool *pool;
6111         uint32_t mtr_idx = 0;
6112
6113         if (!priv->config.devx) {
6114                 rte_errno = ENOTSUP;
6115                 return 0;
6116         }
6117         /* Allocate the flow meter memory. */
6118         /* Get free meters from management. */
6119         rte_spinlock_lock(&pools_mng->mtrsl);
6120         mtr_free = LIST_FIRST(&pools_mng->meters);
6121         if (mtr_free)
6122                 LIST_REMOVE(mtr_free, next);
6123         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6124                 rte_spinlock_unlock(&pools_mng->mtrsl);
6125                 return 0;
6126         }
6127         mtr_free->state = ASO_METER_WAIT;
6128         rte_spinlock_unlock(&pools_mng->mtrsl);
6129         pool = container_of(mtr_free,
6130                         struct mlx5_aso_mtr_pool,
6131                         mtrs[mtr_free->offset]);
6132         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6133         if (!mtr_free->fm.meter_action) {
6134 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6135                 struct rte_flow_error error;
6136                 uint8_t reg_id;
6137
6138                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6139                 mtr_free->fm.meter_action =
6140                         mlx5_glue->dv_create_flow_action_aso
6141                                                 (priv->sh->rx_domain,
6142                                                  pool->devx_obj->obj,
6143                                                  mtr_free->offset,
6144                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6145                                                  reg_id - REG_C_0);
6146 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6147                 if (!mtr_free->fm.meter_action) {
6148                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6149                         return 0;
6150                 }
6151         }
6152         return mtr_idx;
6153 }
6154
6155 /**
6156  * Verify the @p attributes will be correctly understood by the NIC and store
6157  * them in the @p flow if everything is correct.
6158  *
6159  * @param[in] dev
6160  *   Pointer to dev struct.
6161  * @param[in] attributes
6162  *   Pointer to flow attributes
6163  * @param[in] external
6164  *   This flow rule is created by request external to PMD.
6165  * @param[out] error
6166  *   Pointer to error structure.
6167  *
6168  * @return
6169  *   - 0 on success and non root table.
6170  *   - 1 on success and root table.
6171  *   - a negative errno value otherwise and rte_errno is set.
6172  */
6173 static int
6174 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6175                             const struct mlx5_flow_tunnel *tunnel,
6176                             const struct rte_flow_attr *attributes,
6177                             const struct flow_grp_info *grp_info,
6178                             struct rte_flow_error *error)
6179 {
6180         struct mlx5_priv *priv = dev->data->dev_private;
6181         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6182         int ret = 0;
6183
6184 #ifndef HAVE_MLX5DV_DR
6185         RTE_SET_USED(tunnel);
6186         RTE_SET_USED(grp_info);
6187         if (attributes->group)
6188                 return rte_flow_error_set(error, ENOTSUP,
6189                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6190                                           NULL,
6191                                           "groups are not supported");
6192 #else
6193         uint32_t table = 0;
6194
6195         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6196                                        grp_info, error);
6197         if (ret)
6198                 return ret;
6199         if (!table)
6200                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6201 #endif
6202         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6203             attributes->priority > lowest_priority)
6204                 return rte_flow_error_set(error, ENOTSUP,
6205                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6206                                           NULL,
6207                                           "priority out of range");
6208         if (attributes->transfer) {
6209                 if (!priv->config.dv_esw_en)
6210                         return rte_flow_error_set
6211                                 (error, ENOTSUP,
6212                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6213                                  "E-Switch dr is not supported");
6214                 if (!(priv->representor || priv->master))
6215                         return rte_flow_error_set
6216                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6217                                  NULL, "E-Switch configuration can only be"
6218                                  " done by a master or a representor device");
6219                 if (attributes->egress)
6220                         return rte_flow_error_set
6221                                 (error, ENOTSUP,
6222                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6223                                  "egress is not supported");
6224         }
6225         if (!(attributes->egress ^ attributes->ingress))
6226                 return rte_flow_error_set(error, ENOTSUP,
6227                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6228                                           "must specify exactly one of "
6229                                           "ingress or egress");
6230         return ret;
6231 }
6232
6233 /**
6234  * Internal validation function. For validating both actions and items.
6235  *
6236  * @param[in] dev
6237  *   Pointer to the rte_eth_dev structure.
6238  * @param[in] attr
6239  *   Pointer to the flow attributes.
6240  * @param[in] items
6241  *   Pointer to the list of items.
6242  * @param[in] actions
6243  *   Pointer to the list of actions.
6244  * @param[in] external
6245  *   This flow rule is created by request external to PMD.
6246  * @param[in] hairpin
6247  *   Number of hairpin TX actions, 0 means classic flow.
6248  * @param[out] error
6249  *   Pointer to the error structure.
6250  *
6251  * @return
6252  *   0 on success, a negative errno value otherwise and rte_errno is set.
6253  */
6254 static int
6255 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6256                  const struct rte_flow_item items[],
6257                  const struct rte_flow_action actions[],
6258                  bool external, int hairpin, struct rte_flow_error *error)
6259 {
6260         int ret;
6261         uint64_t action_flags = 0;
6262         uint64_t item_flags = 0;
6263         uint64_t last_item = 0;
6264         uint8_t next_protocol = 0xff;
6265         uint16_t ether_type = 0;
6266         int actions_n = 0;
6267         uint8_t item_ipv6_proto = 0;
6268         int fdb_mirror_limit = 0;
6269         int modify_after_mirror = 0;
6270         const struct rte_flow_item *geneve_item = NULL;
6271         const struct rte_flow_item *gre_item = NULL;
6272         const struct rte_flow_item *gtp_item = NULL;
6273         const struct rte_flow_action_raw_decap *decap;
6274         const struct rte_flow_action_raw_encap *encap;
6275         const struct rte_flow_action_rss *rss = NULL;
6276         const struct rte_flow_action_rss *sample_rss = NULL;
6277         const struct rte_flow_action_count *count = NULL;
6278         const struct rte_flow_action_count *sample_count = NULL;
6279         const struct rte_flow_item_tcp nic_tcp_mask = {
6280                 .hdr = {
6281                         .tcp_flags = 0xFF,
6282                         .src_port = RTE_BE16(UINT16_MAX),
6283                         .dst_port = RTE_BE16(UINT16_MAX),
6284                 }
6285         };
6286         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6287                 .hdr = {
6288                         .src_addr =
6289                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6290                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6291                         .dst_addr =
6292                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6293                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6294                         .vtc_flow = RTE_BE32(0xffffffff),
6295                         .proto = 0xff,
6296                         .hop_limits = 0xff,
6297                 },
6298                 .has_frag_ext = 1,
6299         };
6300         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6301                 .hdr = {
6302                         .common = {
6303                                 .u32 =
6304                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6305                                         .type = 0xFF,
6306                                         }).u32),
6307                         },
6308                         .dummy[0] = 0xffffffff,
6309                 },
6310         };
6311         struct mlx5_priv *priv = dev->data->dev_private;
6312         struct mlx5_dev_config *dev_conf = &priv->config;
6313         uint16_t queue_index = 0xFFFF;
6314         const struct rte_flow_item_vlan *vlan_m = NULL;
6315         uint32_t rw_act_num = 0;
6316         uint64_t is_root;
6317         const struct mlx5_flow_tunnel *tunnel;
6318         struct flow_grp_info grp_info = {
6319                 .external = !!external,
6320                 .transfer = !!attr->transfer,
6321                 .fdb_def_rule = !!priv->fdb_def_rule,
6322         };
6323         const struct rte_eth_hairpin_conf *conf;
6324         bool def_policy = false;
6325
6326         if (items == NULL)
6327                 return -1;
6328         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
6329                 tunnel = flow_items_to_tunnel(items);
6330                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6331                                 MLX5_FLOW_ACTION_DECAP;
6332         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
6333                 tunnel = flow_actions_to_tunnel(actions);
6334                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6335         } else {
6336                 tunnel = NULL;
6337         }
6338         if (tunnel && priv->representor)
6339                 return rte_flow_error_set(error, ENOTSUP,
6340                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6341                                           "decap not supported "
6342                                           "for VF representor");
6343         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6344                                 (dev, tunnel, attr, items, actions);
6345         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6346         if (ret < 0)
6347                 return ret;
6348         is_root = (uint64_t)ret;
6349         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6350                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6351                 int type = items->type;
6352
6353                 if (!mlx5_flow_os_item_supported(type))
6354                         return rte_flow_error_set(error, ENOTSUP,
6355                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6356                                                   NULL, "item not supported");
6357                 switch (type) {
6358                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6359                         if (items[0].type != (typeof(items[0].type))
6360                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
6361                                 return rte_flow_error_set
6362                                                 (error, EINVAL,
6363                                                 RTE_FLOW_ERROR_TYPE_ITEM,
6364                                                 NULL, "MLX5 private items "
6365                                                 "must be the first");
6366                         break;
6367                 case RTE_FLOW_ITEM_TYPE_VOID:
6368                         break;
6369                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6370                         ret = flow_dv_validate_item_port_id
6371                                         (dev, items, attr, item_flags, error);
6372                         if (ret < 0)
6373                                 return ret;
6374                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6375                         break;
6376                 case RTE_FLOW_ITEM_TYPE_ETH:
6377                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6378                                                           true, error);
6379                         if (ret < 0)
6380                                 return ret;
6381                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6382                                              MLX5_FLOW_LAYER_OUTER_L2;
6383                         if (items->mask != NULL && items->spec != NULL) {
6384                                 ether_type =
6385                                         ((const struct rte_flow_item_eth *)
6386                                          items->spec)->type;
6387                                 ether_type &=
6388                                         ((const struct rte_flow_item_eth *)
6389                                          items->mask)->type;
6390                                 ether_type = rte_be_to_cpu_16(ether_type);
6391                         } else {
6392                                 ether_type = 0;
6393                         }
6394                         break;
6395                 case RTE_FLOW_ITEM_TYPE_VLAN:
6396                         ret = flow_dv_validate_item_vlan(items, item_flags,
6397                                                          dev, error);
6398                         if (ret < 0)
6399                                 return ret;
6400                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6401                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6402                         if (items->mask != NULL && items->spec != NULL) {
6403                                 ether_type =
6404                                         ((const struct rte_flow_item_vlan *)
6405                                          items->spec)->inner_type;
6406                                 ether_type &=
6407                                         ((const struct rte_flow_item_vlan *)
6408                                          items->mask)->inner_type;
6409                                 ether_type = rte_be_to_cpu_16(ether_type);
6410                         } else {
6411                                 ether_type = 0;
6412                         }
6413                         /* Store outer VLAN mask for of_push_vlan action. */
6414                         if (!tunnel)
6415                                 vlan_m = items->mask;
6416                         break;
6417                 case RTE_FLOW_ITEM_TYPE_IPV4:
6418                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6419                                                   &item_flags, &tunnel);
6420                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6421                                                          last_item, ether_type,
6422                                                          error);
6423                         if (ret < 0)
6424                                 return ret;
6425                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6426                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6427                         if (items->mask != NULL &&
6428                             ((const struct rte_flow_item_ipv4 *)
6429                              items->mask)->hdr.next_proto_id) {
6430                                 next_protocol =
6431                                         ((const struct rte_flow_item_ipv4 *)
6432                                          (items->spec))->hdr.next_proto_id;
6433                                 next_protocol &=
6434                                         ((const struct rte_flow_item_ipv4 *)
6435                                          (items->mask))->hdr.next_proto_id;
6436                         } else {
6437                                 /* Reset for inner layer. */
6438                                 next_protocol = 0xff;
6439                         }
6440                         break;
6441                 case RTE_FLOW_ITEM_TYPE_IPV6:
6442                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6443                                                   &item_flags, &tunnel);
6444                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6445                                                            last_item,
6446                                                            ether_type,
6447                                                            &nic_ipv6_mask,
6448                                                            error);
6449                         if (ret < 0)
6450                                 return ret;
6451                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6452                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6453                         if (items->mask != NULL &&
6454                             ((const struct rte_flow_item_ipv6 *)
6455                              items->mask)->hdr.proto) {
6456                                 item_ipv6_proto =
6457                                         ((const struct rte_flow_item_ipv6 *)
6458                                          items->spec)->hdr.proto;
6459                                 next_protocol =
6460                                         ((const struct rte_flow_item_ipv6 *)
6461                                          items->spec)->hdr.proto;
6462                                 next_protocol &=
6463                                         ((const struct rte_flow_item_ipv6 *)
6464                                          items->mask)->hdr.proto;
6465                         } else {
6466                                 /* Reset for inner layer. */
6467                                 next_protocol = 0xff;
6468                         }
6469                         break;
6470                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6471                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6472                                                                   item_flags,
6473                                                                   error);
6474                         if (ret < 0)
6475                                 return ret;
6476                         last_item = tunnel ?
6477                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6478                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6479                         if (items->mask != NULL &&
6480                             ((const struct rte_flow_item_ipv6_frag_ext *)
6481                              items->mask)->hdr.next_header) {
6482                                 next_protocol =
6483                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6484                                  items->spec)->hdr.next_header;
6485                                 next_protocol &=
6486                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6487                                  items->mask)->hdr.next_header;
6488                         } else {
6489                                 /* Reset for inner layer. */
6490                                 next_protocol = 0xff;
6491                         }
6492                         break;
6493                 case RTE_FLOW_ITEM_TYPE_TCP:
6494                         ret = mlx5_flow_validate_item_tcp
6495                                                 (items, item_flags,
6496                                                  next_protocol,
6497                                                  &nic_tcp_mask,
6498                                                  error);
6499                         if (ret < 0)
6500                                 return ret;
6501                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6502                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6503                         break;
6504                 case RTE_FLOW_ITEM_TYPE_UDP:
6505                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6506                                                           next_protocol,
6507                                                           error);
6508                         if (ret < 0)
6509                                 return ret;
6510                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6511                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6512                         break;
6513                 case RTE_FLOW_ITEM_TYPE_GRE:
6514                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6515                                                           next_protocol, error);
6516                         if (ret < 0)
6517                                 return ret;
6518                         gre_item = items;
6519                         last_item = MLX5_FLOW_LAYER_GRE;
6520                         break;
6521                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6522                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6523                                                             next_protocol,
6524                                                             error);
6525                         if (ret < 0)
6526                                 return ret;
6527                         last_item = MLX5_FLOW_LAYER_NVGRE;
6528                         break;
6529                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6530                         ret = mlx5_flow_validate_item_gre_key
6531                                 (items, item_flags, gre_item, error);
6532                         if (ret < 0)
6533                                 return ret;
6534                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6535                         break;
6536                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6537                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6538                                                             error);
6539                         if (ret < 0)
6540                                 return ret;
6541                         last_item = MLX5_FLOW_LAYER_VXLAN;
6542                         break;
6543                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6544                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6545                                                                 item_flags, dev,
6546                                                                 error);
6547                         if (ret < 0)
6548                                 return ret;
6549                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6550                         break;
6551                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6552                         ret = mlx5_flow_validate_item_geneve(items,
6553                                                              item_flags, dev,
6554                                                              error);
6555                         if (ret < 0)
6556                                 return ret;
6557                         geneve_item = items;
6558                         last_item = MLX5_FLOW_LAYER_GENEVE;
6559                         break;
6560                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6561                         ret = mlx5_flow_validate_item_geneve_opt(items,
6562                                                                  last_item,
6563                                                                  geneve_item,
6564                                                                  dev,
6565                                                                  error);
6566                         if (ret < 0)
6567                                 return ret;
6568                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6569                         break;
6570                 case RTE_FLOW_ITEM_TYPE_MPLS:
6571                         ret = mlx5_flow_validate_item_mpls(dev, items,
6572                                                            item_flags,
6573                                                            last_item, error);
6574                         if (ret < 0)
6575                                 return ret;
6576                         last_item = MLX5_FLOW_LAYER_MPLS;
6577                         break;
6578
6579                 case RTE_FLOW_ITEM_TYPE_MARK:
6580                         ret = flow_dv_validate_item_mark(dev, items, attr,
6581                                                          error);
6582                         if (ret < 0)
6583                                 return ret;
6584                         last_item = MLX5_FLOW_ITEM_MARK;
6585                         break;
6586                 case RTE_FLOW_ITEM_TYPE_META:
6587                         ret = flow_dv_validate_item_meta(dev, items, attr,
6588                                                          error);
6589                         if (ret < 0)
6590                                 return ret;
6591                         last_item = MLX5_FLOW_ITEM_METADATA;
6592                         break;
6593                 case RTE_FLOW_ITEM_TYPE_ICMP:
6594                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6595                                                            next_protocol,
6596                                                            error);
6597                         if (ret < 0)
6598                                 return ret;
6599                         last_item = MLX5_FLOW_LAYER_ICMP;
6600                         break;
6601                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6602                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6603                                                             next_protocol,
6604                                                             error);
6605                         if (ret < 0)
6606                                 return ret;
6607                         item_ipv6_proto = IPPROTO_ICMPV6;
6608                         last_item = MLX5_FLOW_LAYER_ICMP6;
6609                         break;
6610                 case RTE_FLOW_ITEM_TYPE_TAG:
6611                         ret = flow_dv_validate_item_tag(dev, items,
6612                                                         attr, error);
6613                         if (ret < 0)
6614                                 return ret;
6615                         last_item = MLX5_FLOW_ITEM_TAG;
6616                         break;
6617                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6618                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6619                         break;
6620                 case RTE_FLOW_ITEM_TYPE_GTP:
6621                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6622                                                         error);
6623                         if (ret < 0)
6624                                 return ret;
6625                         gtp_item = items;
6626                         last_item = MLX5_FLOW_LAYER_GTP;
6627                         break;
6628                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6629                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6630                                                             gtp_item, attr,
6631                                                             error);
6632                         if (ret < 0)
6633                                 return ret;
6634                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6635                         break;
6636                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6637                         /* Capacity will be checked in the translate stage. */
6638                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6639                                                             last_item,
6640                                                             ether_type,
6641                                                             &nic_ecpri_mask,
6642                                                             error);
6643                         if (ret < 0)
6644                                 return ret;
6645                         last_item = MLX5_FLOW_LAYER_ECPRI;
6646                         break;
6647                 default:
6648                         return rte_flow_error_set(error, ENOTSUP,
6649                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6650                                                   NULL, "item not supported");
6651                 }
6652                 item_flags |= last_item;
6653         }
6654         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6655                 int type = actions->type;
6656
6657                 if (!mlx5_flow_os_action_supported(type))
6658                         return rte_flow_error_set(error, ENOTSUP,
6659                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6660                                                   actions,
6661                                                   "action not supported");
6662                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
6663                         return rte_flow_error_set(error, ENOTSUP,
6664                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6665                                                   actions, "too many actions");
6666                 if (action_flags &
6667                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
6668                         return rte_flow_error_set(error, ENOTSUP,
6669                                 RTE_FLOW_ERROR_TYPE_ACTION,
6670                                 NULL, "meter action with policy "
6671                                 "must be the last action");
6672                 switch (type) {
6673                 case RTE_FLOW_ACTION_TYPE_VOID:
6674                         break;
6675                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
6676                         ret = flow_dv_validate_action_port_id(dev,
6677                                                               action_flags,
6678                                                               actions,
6679                                                               attr,
6680                                                               error);
6681                         if (ret)
6682                                 return ret;
6683                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6684                         ++actions_n;
6685                         break;
6686                 case RTE_FLOW_ACTION_TYPE_FLAG:
6687                         ret = flow_dv_validate_action_flag(dev, action_flags,
6688                                                            attr, error);
6689                         if (ret < 0)
6690                                 return ret;
6691                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6692                                 /* Count all modify-header actions as one. */
6693                                 if (!(action_flags &
6694                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6695                                         ++actions_n;
6696                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
6697                                                 MLX5_FLOW_ACTION_MARK_EXT;
6698                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6699                                         modify_after_mirror = 1;
6700
6701                         } else {
6702                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
6703                                 ++actions_n;
6704                         }
6705                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6706                         break;
6707                 case RTE_FLOW_ACTION_TYPE_MARK:
6708                         ret = flow_dv_validate_action_mark(dev, actions,
6709                                                            action_flags,
6710                                                            attr, error);
6711                         if (ret < 0)
6712                                 return ret;
6713                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6714                                 /* Count all modify-header actions as one. */
6715                                 if (!(action_flags &
6716                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6717                                         ++actions_n;
6718                                 action_flags |= MLX5_FLOW_ACTION_MARK |
6719                                                 MLX5_FLOW_ACTION_MARK_EXT;
6720                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6721                                         modify_after_mirror = 1;
6722                         } else {
6723                                 action_flags |= MLX5_FLOW_ACTION_MARK;
6724                                 ++actions_n;
6725                         }
6726                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6727                         break;
6728                 case RTE_FLOW_ACTION_TYPE_SET_META:
6729                         ret = flow_dv_validate_action_set_meta(dev, actions,
6730                                                                action_flags,
6731                                                                attr, error);
6732                         if (ret < 0)
6733                                 return ret;
6734                         /* Count all modify-header actions as one action. */
6735                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6736                                 ++actions_n;
6737                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6738                                 modify_after_mirror = 1;
6739                         action_flags |= MLX5_FLOW_ACTION_SET_META;
6740                         rw_act_num += MLX5_ACT_NUM_SET_META;
6741                         break;
6742                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
6743                         ret = flow_dv_validate_action_set_tag(dev, actions,
6744                                                               action_flags,
6745                                                               attr, error);
6746                         if (ret < 0)
6747                                 return ret;
6748                         /* Count all modify-header actions as one action. */
6749                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6750                                 ++actions_n;
6751                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6752                                 modify_after_mirror = 1;
6753                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6754                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6755                         break;
6756                 case RTE_FLOW_ACTION_TYPE_DROP:
6757                         ret = mlx5_flow_validate_action_drop(action_flags,
6758                                                              attr, error);
6759                         if (ret < 0)
6760                                 return ret;
6761                         action_flags |= MLX5_FLOW_ACTION_DROP;
6762                         ++actions_n;
6763                         break;
6764                 case RTE_FLOW_ACTION_TYPE_QUEUE:
6765                         ret = mlx5_flow_validate_action_queue(actions,
6766                                                               action_flags, dev,
6767                                                               attr, error);
6768                         if (ret < 0)
6769                                 return ret;
6770                         queue_index = ((const struct rte_flow_action_queue *)
6771                                                         (actions->conf))->index;
6772                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
6773                         ++actions_n;
6774                         break;
6775                 case RTE_FLOW_ACTION_TYPE_RSS:
6776                         rss = actions->conf;
6777                         ret = mlx5_flow_validate_action_rss(actions,
6778                                                             action_flags, dev,
6779                                                             attr, item_flags,
6780                                                             error);
6781                         if (ret < 0)
6782                                 return ret;
6783                         if (rss && sample_rss &&
6784                             (sample_rss->level != rss->level ||
6785                             sample_rss->types != rss->types))
6786                                 return rte_flow_error_set(error, ENOTSUP,
6787                                         RTE_FLOW_ERROR_TYPE_ACTION,
6788                                         NULL,
6789                                         "Can't use the different RSS types "
6790                                         "or level in the same flow");
6791                         if (rss != NULL && rss->queue_num)
6792                                 queue_index = rss->queue[0];
6793                         action_flags |= MLX5_FLOW_ACTION_RSS;
6794                         ++actions_n;
6795                         break;
6796                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6797                         ret =
6798                         mlx5_flow_validate_action_default_miss(action_flags,
6799                                         attr, error);
6800                         if (ret < 0)
6801                                 return ret;
6802                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6803                         ++actions_n;
6804                         break;
6805                 case RTE_FLOW_ACTION_TYPE_COUNT:
6806                         ret = flow_dv_validate_action_count(dev, actions,
6807                                                             action_flags,
6808                                                             error);
6809                         if (ret < 0)
6810                                 return ret;
6811                         count = actions->conf;
6812                         action_flags |= MLX5_FLOW_ACTION_COUNT;
6813                         ++actions_n;
6814                         break;
6815                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6816                         if (flow_dv_validate_action_pop_vlan(dev,
6817                                                              action_flags,
6818                                                              actions,
6819                                                              item_flags, attr,
6820                                                              error))
6821                                 return -rte_errno;
6822                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6823                                 modify_after_mirror = 1;
6824                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6825                         ++actions_n;
6826                         break;
6827                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6828                         ret = flow_dv_validate_action_push_vlan(dev,
6829                                                                 action_flags,
6830                                                                 vlan_m,
6831                                                                 actions, attr,
6832                                                                 error);
6833                         if (ret < 0)
6834                                 return ret;
6835                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6836                                 modify_after_mirror = 1;
6837                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6838                         ++actions_n;
6839                         break;
6840                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6841                         ret = flow_dv_validate_action_set_vlan_pcp
6842                                                 (action_flags, actions, error);
6843                         if (ret < 0)
6844                                 return ret;
6845                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6846                                 modify_after_mirror = 1;
6847                         /* Count PCP with push_vlan command. */
6848                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
6849                         break;
6850                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6851                         ret = flow_dv_validate_action_set_vlan_vid
6852                                                 (item_flags, action_flags,
6853                                                  actions, error);
6854                         if (ret < 0)
6855                                 return ret;
6856                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6857                                 modify_after_mirror = 1;
6858                         /* Count VID with push_vlan command. */
6859                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6860                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
6861                         break;
6862                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6863                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6864                         ret = flow_dv_validate_action_l2_encap(dev,
6865                                                                action_flags,
6866                                                                actions, attr,
6867                                                                error);
6868                         if (ret < 0)
6869                                 return ret;
6870                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
6871                         ++actions_n;
6872                         break;
6873                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6874                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6875                         ret = flow_dv_validate_action_decap(dev, action_flags,
6876                                                             actions, item_flags,
6877                                                             attr, error);
6878                         if (ret < 0)
6879                                 return ret;
6880                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6881                                 modify_after_mirror = 1;
6882                         action_flags |= MLX5_FLOW_ACTION_DECAP;
6883                         ++actions_n;
6884                         break;
6885                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6886                         ret = flow_dv_validate_action_raw_encap_decap
6887                                 (dev, NULL, actions->conf, attr, &action_flags,
6888                                  &actions_n, actions, item_flags, error);
6889                         if (ret < 0)
6890                                 return ret;
6891                         break;
6892                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6893                         decap = actions->conf;
6894                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
6895                                 ;
6896                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6897                                 encap = NULL;
6898                                 actions--;
6899                         } else {
6900                                 encap = actions->conf;
6901                         }
6902                         ret = flow_dv_validate_action_raw_encap_decap
6903                                            (dev,
6904                                             decap ? decap : &empty_decap, encap,
6905                                             attr, &action_flags, &actions_n,
6906                                             actions, item_flags, error);
6907                         if (ret < 0)
6908                                 return ret;
6909                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
6910                             (action_flags & MLX5_FLOW_ACTION_DECAP))
6911                                 modify_after_mirror = 1;
6912                         break;
6913                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6914                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6915                         ret = flow_dv_validate_action_modify_mac(action_flags,
6916                                                                  actions,
6917                                                                  item_flags,
6918                                                                  error);
6919                         if (ret < 0)
6920                                 return ret;
6921                         /* Count all modify-header actions as one action. */
6922                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6923                                 ++actions_n;
6924                         action_flags |= actions->type ==
6925                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
6926                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
6927                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
6928                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6929                                 modify_after_mirror = 1;
6930                         /*
6931                          * Even if the source and destination MAC addresses have
6932                          * overlap in the header with 4B alignment, the convert
6933                          * function will handle them separately and 4 SW actions
6934                          * will be created. And 2 actions will be added each
6935                          * time no matter how many bytes of address will be set.
6936                          */
6937                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
6938                         break;
6939                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6940                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6941                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
6942                                                                   actions,
6943                                                                   item_flags,
6944                                                                   error);
6945                         if (ret < 0)
6946                                 return ret;
6947                         /* Count all modify-header actions as one action. */
6948                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6949                                 ++actions_n;
6950                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6951                                 modify_after_mirror = 1;
6952                         action_flags |= actions->type ==
6953                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
6954                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
6955                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
6956                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
6957                         break;
6958                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
6959                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
6960                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
6961                                                                   actions,
6962                                                                   item_flags,
6963                                                                   error);
6964                         if (ret < 0)
6965                                 return ret;
6966                         if (item_ipv6_proto == IPPROTO_ICMPV6)
6967                                 return rte_flow_error_set(error, ENOTSUP,
6968                                         RTE_FLOW_ERROR_TYPE_ACTION,
6969                                         actions,
6970                                         "Can't change header "
6971                                         "with ICMPv6 proto");
6972                         /* Count all modify-header actions as one action. */
6973                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6974                                 ++actions_n;
6975                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6976                                 modify_after_mirror = 1;
6977                         action_flags |= actions->type ==
6978                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6979                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
6980                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
6981                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6982                         break;
6983                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6984                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6985                         ret = flow_dv_validate_action_modify_tp(action_flags,
6986                                                                 actions,
6987                                                                 item_flags,
6988                                                                 error);
6989                         if (ret < 0)
6990                                 return ret;
6991                         /* Count all modify-header actions as one action. */
6992                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6993                                 ++actions_n;
6994                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6995                                 modify_after_mirror = 1;
6996                         action_flags |= actions->type ==
6997                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6998                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
6999                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7000                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7001                         break;
7002                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7003                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7004                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7005                                                                  actions,
7006                                                                  item_flags,
7007                                                                  error);
7008                         if (ret < 0)
7009                                 return ret;
7010                         /* Count all modify-header actions as one action. */
7011                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7012                                 ++actions_n;
7013                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7014                                 modify_after_mirror = 1;
7015                         action_flags |= actions->type ==
7016                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7017                                                 MLX5_FLOW_ACTION_SET_TTL :
7018                                                 MLX5_FLOW_ACTION_DEC_TTL;
7019                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7020                         break;
7021                 case RTE_FLOW_ACTION_TYPE_JUMP:
7022                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7023                                                            action_flags,
7024                                                            attr, external,
7025                                                            error);
7026                         if (ret)
7027                                 return ret;
7028                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7029                             fdb_mirror_limit)
7030                                 return rte_flow_error_set(error, EINVAL,
7031                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7032                                                   NULL,
7033                                                   "sample and jump action combination is not supported");
7034                         ++actions_n;
7035                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7036                         break;
7037                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7038                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7039                         ret = flow_dv_validate_action_modify_tcp_seq
7040                                                                 (action_flags,
7041                                                                  actions,
7042                                                                  item_flags,
7043                                                                  error);
7044                         if (ret < 0)
7045                                 return ret;
7046                         /* Count all modify-header actions as one action. */
7047                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7048                                 ++actions_n;
7049                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7050                                 modify_after_mirror = 1;
7051                         action_flags |= actions->type ==
7052                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7053                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7054                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7055                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7056                         break;
7057                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7058                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7059                         ret = flow_dv_validate_action_modify_tcp_ack
7060                                                                 (action_flags,
7061                                                                  actions,
7062                                                                  item_flags,
7063                                                                  error);
7064                         if (ret < 0)
7065                                 return ret;
7066                         /* Count all modify-header actions as one action. */
7067                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7068                                 ++actions_n;
7069                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7070                                 modify_after_mirror = 1;
7071                         action_flags |= actions->type ==
7072                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7073                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7074                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7075                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7076                         break;
7077                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7078                         break;
7079                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7080                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7081                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7082                         break;
7083                 case RTE_FLOW_ACTION_TYPE_METER:
7084                         ret = mlx5_flow_validate_action_meter(dev,
7085                                                               action_flags,
7086                                                               actions, attr,
7087                                                               &def_policy,
7088                                                               error);
7089                         if (ret < 0)
7090                                 return ret;
7091                         action_flags |= MLX5_FLOW_ACTION_METER;
7092                         if (!def_policy)
7093                                 action_flags |=
7094                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7095                         ++actions_n;
7096                         /* Meter action will add one more TAG action. */
7097                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7098                         break;
7099                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7100                         if (!attr->transfer && !attr->group)
7101                                 return rte_flow_error_set(error, ENOTSUP,
7102                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7103                                                                            NULL,
7104                           "Shared ASO age action is not supported for group 0");
7105                         action_flags |= MLX5_FLOW_ACTION_AGE;
7106                         ++actions_n;
7107                         break;
7108                 case RTE_FLOW_ACTION_TYPE_AGE:
7109                         ret = flow_dv_validate_action_age(action_flags,
7110                                                           actions, dev,
7111                                                           error);
7112                         if (ret < 0)
7113                                 return ret;
7114                         /*
7115                          * Validate the regular AGE action (using counter)
7116                          * mutual exclusion with share counter actions.
7117                          */
7118                         if (!priv->sh->flow_hit_aso_en) {
7119                                 if (count && count->shared)
7120                                         return rte_flow_error_set
7121                                                 (error, EINVAL,
7122                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7123                                                 NULL,
7124                                                 "old age and shared count combination is not supported");
7125                                 if (sample_count)
7126                                         return rte_flow_error_set
7127                                                 (error, EINVAL,
7128                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7129                                                 NULL,
7130                                                 "old age action and count must be in the same sub flow");
7131                         }
7132                         action_flags |= MLX5_FLOW_ACTION_AGE;
7133                         ++actions_n;
7134                         break;
7135                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7136                         ret = flow_dv_validate_action_modify_ipv4_dscp
7137                                                          (action_flags,
7138                                                           actions,
7139                                                           item_flags,
7140                                                           error);
7141                         if (ret < 0)
7142                                 return ret;
7143                         /* Count all modify-header actions as one action. */
7144                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7145                                 ++actions_n;
7146                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7147                                 modify_after_mirror = 1;
7148                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7149                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7150                         break;
7151                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7152                         ret = flow_dv_validate_action_modify_ipv6_dscp
7153                                                                 (action_flags,
7154                                                                  actions,
7155                                                                  item_flags,
7156                                                                  error);
7157                         if (ret < 0)
7158                                 return ret;
7159                         /* Count all modify-header actions as one action. */
7160                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7161                                 ++actions_n;
7162                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7163                                 modify_after_mirror = 1;
7164                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7165                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7166                         break;
7167                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7168                         ret = flow_dv_validate_action_sample(&action_flags,
7169                                                              actions, dev,
7170                                                              attr, item_flags,
7171                                                              rss, &sample_rss,
7172                                                              &sample_count,
7173                                                              &fdb_mirror_limit,
7174                                                              error);
7175                         if (ret < 0)
7176                                 return ret;
7177                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7178                         ++actions_n;
7179                         break;
7180                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7181                         if (actions[0].type != (typeof(actions[0].type))
7182                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
7183                                 return rte_flow_error_set
7184                                                 (error, EINVAL,
7185                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7186                                                 NULL, "MLX5 private action "
7187                                                 "must be the first");
7188
7189                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
7190                         break;
7191                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7192                         ret = flow_dv_validate_action_modify_field(dev,
7193                                                                    action_flags,
7194                                                                    actions,
7195                                                                    attr,
7196                                                                    error);
7197                         if (ret < 0)
7198                                 return ret;
7199                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7200                                 modify_after_mirror = 1;
7201                         /* Count all modify-header actions as one action. */
7202                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
7203                                 ++actions_n;
7204                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7205                         rw_act_num += ret;
7206                         break;
7207                 default:
7208                         return rte_flow_error_set(error, ENOTSUP,
7209                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7210                                                   actions,
7211                                                   "action not supported");
7212                 }
7213         }
7214         /*
7215          * Validate actions in flow rules
7216          * - Explicit decap action is prohibited by the tunnel offload API.
7217          * - Drop action in tunnel steer rule is prohibited by the API.
7218          * - Application cannot use MARK action because it's value can mask
7219          *   tunnel default miss nitification.
7220          * - JUMP in tunnel match rule has no support in current PMD
7221          *   implementation.
7222          * - TAG & META are reserved for future uses.
7223          */
7224         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7225                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7226                                             MLX5_FLOW_ACTION_MARK     |
7227                                             MLX5_FLOW_ACTION_SET_TAG  |
7228                                             MLX5_FLOW_ACTION_SET_META |
7229                                             MLX5_FLOW_ACTION_DROP;
7230
7231                 if (action_flags & bad_actions_mask)
7232                         return rte_flow_error_set
7233                                         (error, EINVAL,
7234                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7235                                         "Invalid RTE action in tunnel "
7236                                         "set decap rule");
7237                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7238                         return rte_flow_error_set
7239                                         (error, EINVAL,
7240                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7241                                         "tunnel set decap rule must terminate "
7242                                         "with JUMP");
7243                 if (!attr->ingress)
7244                         return rte_flow_error_set
7245                                         (error, EINVAL,
7246                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7247                                         "tunnel flows for ingress traffic only");
7248         }
7249         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7250                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7251                                             MLX5_FLOW_ACTION_MARK    |
7252                                             MLX5_FLOW_ACTION_SET_TAG |
7253                                             MLX5_FLOW_ACTION_SET_META;
7254
7255                 if (action_flags & bad_actions_mask)
7256                         return rte_flow_error_set
7257                                         (error, EINVAL,
7258                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7259                                         "Invalid RTE action in tunnel "
7260                                         "set match rule");
7261         }
7262         /*
7263          * Validate the drop action mutual exclusion with other actions.
7264          * Drop action is mutually-exclusive with any other action, except for
7265          * Count action.
7266          * Drop action compatibility with tunnel offload was already validated.
7267          */
7268         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7269                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7270         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7271             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7272                 return rte_flow_error_set(error, EINVAL,
7273                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7274                                           "Drop action is mutually-exclusive "
7275                                           "with any other action, except for "
7276                                           "Count action");
7277         /* Eswitch has few restrictions on using items and actions */
7278         if (attr->transfer) {
7279                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7280                     action_flags & MLX5_FLOW_ACTION_FLAG)
7281                         return rte_flow_error_set(error, ENOTSUP,
7282                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7283                                                   NULL,
7284                                                   "unsupported action FLAG");
7285                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7286                     action_flags & MLX5_FLOW_ACTION_MARK)
7287                         return rte_flow_error_set(error, ENOTSUP,
7288                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7289                                                   NULL,
7290                                                   "unsupported action MARK");
7291                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7292                         return rte_flow_error_set(error, ENOTSUP,
7293                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7294                                                   NULL,
7295                                                   "unsupported action QUEUE");
7296                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7297                         return rte_flow_error_set(error, ENOTSUP,
7298                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7299                                                   NULL,
7300                                                   "unsupported action RSS");
7301                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7302                         return rte_flow_error_set(error, EINVAL,
7303                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7304                                                   actions,
7305                                                   "no fate action is found");
7306         } else {
7307                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7308                         return rte_flow_error_set(error, EINVAL,
7309                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7310                                                   actions,
7311                                                   "no fate action is found");
7312         }
7313         /*
7314          * Continue validation for Xcap and VLAN actions.
7315          * If hairpin is working in explicit TX rule mode, there is no actions
7316          * splitting and the validation of hairpin ingress flow should be the
7317          * same as other standard flows.
7318          */
7319         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7320                              MLX5_FLOW_VLAN_ACTIONS)) &&
7321             (queue_index == 0xFFFF ||
7322              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7323              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7324              conf->tx_explicit != 0))) {
7325                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7326                     MLX5_FLOW_XCAP_ACTIONS)
7327                         return rte_flow_error_set(error, ENOTSUP,
7328                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7329                                                   NULL, "encap and decap "
7330                                                   "combination aren't supported");
7331                 if (!attr->transfer && attr->ingress) {
7332                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7333                                 return rte_flow_error_set
7334                                                 (error, ENOTSUP,
7335                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7336                                                  NULL, "encap is not supported"
7337                                                  " for ingress traffic");
7338                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7339                                 return rte_flow_error_set
7340                                                 (error, ENOTSUP,
7341                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7342                                                  NULL, "push VLAN action not "
7343                                                  "supported for ingress");
7344                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7345                                         MLX5_FLOW_VLAN_ACTIONS)
7346                                 return rte_flow_error_set
7347                                                 (error, ENOTSUP,
7348                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7349                                                  NULL, "no support for "
7350                                                  "multiple VLAN actions");
7351                 }
7352         }
7353         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7354                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7355                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7356                         attr->ingress)
7357                         return rte_flow_error_set
7358                                 (error, ENOTSUP,
7359                                 RTE_FLOW_ERROR_TYPE_ACTION,
7360                                 NULL, "fate action not supported for "
7361                                 "meter with policy");
7362                 if (attr->egress) {
7363                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7364                                 return rte_flow_error_set
7365                                         (error, ENOTSUP,
7366                                         RTE_FLOW_ERROR_TYPE_ACTION,
7367                                         NULL, "modify header action in egress "
7368                                         "cannot be done before meter action");
7369                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7370                                 return rte_flow_error_set
7371                                         (error, ENOTSUP,
7372                                         RTE_FLOW_ERROR_TYPE_ACTION,
7373                                         NULL, "encap action in egress "
7374                                         "cannot be done before meter action");
7375                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7376                                 return rte_flow_error_set
7377                                         (error, ENOTSUP,
7378                                         RTE_FLOW_ERROR_TYPE_ACTION,
7379                                         NULL, "push vlan action in egress "
7380                                         "cannot be done before meter action");
7381                 }
7382         }
7383         /*
7384          * Hairpin flow will add one more TAG action in TX implicit mode.
7385          * In TX explicit mode, there will be no hairpin flow ID.
7386          */
7387         if (hairpin > 0)
7388                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7389         /* extra metadata enabled: one more TAG action will be add. */
7390         if (dev_conf->dv_flow_en &&
7391             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7392             mlx5_flow_ext_mreg_supported(dev))
7393                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7394         if (rw_act_num >
7395                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7396                 return rte_flow_error_set(error, ENOTSUP,
7397                                           RTE_FLOW_ERROR_TYPE_ACTION,
7398                                           NULL, "too many header modify"
7399                                           " actions to support");
7400         }
7401         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7402         if (fdb_mirror_limit && modify_after_mirror)
7403                 return rte_flow_error_set(error, EINVAL,
7404                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7405                                 "sample before modify action is not supported");
7406         return 0;
7407 }
7408
7409 /**
7410  * Internal preparation function. Allocates the DV flow size,
7411  * this size is constant.
7412  *
7413  * @param[in] dev
7414  *   Pointer to the rte_eth_dev structure.
7415  * @param[in] attr
7416  *   Pointer to the flow attributes.
7417  * @param[in] items
7418  *   Pointer to the list of items.
7419  * @param[in] actions
7420  *   Pointer to the list of actions.
7421  * @param[out] error
7422  *   Pointer to the error structure.
7423  *
7424  * @return
7425  *   Pointer to mlx5_flow object on success,
7426  *   otherwise NULL and rte_errno is set.
7427  */
7428 static struct mlx5_flow *
7429 flow_dv_prepare(struct rte_eth_dev *dev,
7430                 const struct rte_flow_attr *attr __rte_unused,
7431                 const struct rte_flow_item items[] __rte_unused,
7432                 const struct rte_flow_action actions[] __rte_unused,
7433                 struct rte_flow_error *error)
7434 {
7435         uint32_t handle_idx = 0;
7436         struct mlx5_flow *dev_flow;
7437         struct mlx5_flow_handle *dev_handle;
7438         struct mlx5_priv *priv = dev->data->dev_private;
7439         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7440
7441         MLX5_ASSERT(wks);
7442         wks->skip_matcher_reg = 0;
7443         /* In case of corrupting the memory. */
7444         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7445                 rte_flow_error_set(error, ENOSPC,
7446                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7447                                    "not free temporary device flow");
7448                 return NULL;
7449         }
7450         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7451                                    &handle_idx);
7452         if (!dev_handle) {
7453                 rte_flow_error_set(error, ENOMEM,
7454                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7455                                    "not enough memory to create flow handle");
7456                 return NULL;
7457         }
7458         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7459         dev_flow = &wks->flows[wks->flow_idx++];
7460         memset(dev_flow, 0, sizeof(*dev_flow));
7461         dev_flow->handle = dev_handle;
7462         dev_flow->handle_idx = handle_idx;
7463         /*
7464          * In some old rdma-core releases, before continuing, a check of the
7465          * length of matching parameter will be done at first. It needs to use
7466          * the length without misc4 param. If the flow has misc4 support, then
7467          * the length needs to be adjusted accordingly. Each param member is
7468          * aligned with a 64B boundary naturally.
7469          */
7470         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7471                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7472         dev_flow->ingress = attr->ingress;
7473         dev_flow->dv.transfer = attr->transfer;
7474         return dev_flow;
7475 }
7476
7477 #ifdef RTE_LIBRTE_MLX5_DEBUG
7478 /**
7479  * Sanity check for match mask and value. Similar to check_valid_spec() in
7480  * kernel driver. If unmasked bit is present in value, it returns failure.
7481  *
7482  * @param match_mask
7483  *   pointer to match mask buffer.
7484  * @param match_value
7485  *   pointer to match value buffer.
7486  *
7487  * @return
7488  *   0 if valid, -EINVAL otherwise.
7489  */
7490 static int
7491 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7492 {
7493         uint8_t *m = match_mask;
7494         uint8_t *v = match_value;
7495         unsigned int i;
7496
7497         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7498                 if (v[i] & ~m[i]) {
7499                         DRV_LOG(ERR,
7500                                 "match_value differs from match_criteria"
7501                                 " %p[%u] != %p[%u]",
7502                                 match_value, i, match_mask, i);
7503                         return -EINVAL;
7504                 }
7505         }
7506         return 0;
7507 }
7508 #endif
7509
7510 /**
7511  * Add match of ip_version.
7512  *
7513  * @param[in] group
7514  *   Flow group.
7515  * @param[in] headers_v
7516  *   Values header pointer.
7517  * @param[in] headers_m
7518  *   Masks header pointer.
7519  * @param[in] ip_version
7520  *   The IP version to set.
7521  */
7522 static inline void
7523 flow_dv_set_match_ip_version(uint32_t group,
7524                              void *headers_v,
7525                              void *headers_m,
7526                              uint8_t ip_version)
7527 {
7528         if (group == 0)
7529                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7530         else
7531                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7532                          ip_version);
7533         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7534         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7535         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7536 }
7537
7538 /**
7539  * Add Ethernet item to matcher and to the value.
7540  *
7541  * @param[in, out] matcher
7542  *   Flow matcher.
7543  * @param[in, out] key
7544  *   Flow matcher value.
7545  * @param[in] item
7546  *   Flow pattern to translate.
7547  * @param[in] inner
7548  *   Item is inner pattern.
7549  */
7550 static void
7551 flow_dv_translate_item_eth(void *matcher, void *key,
7552                            const struct rte_flow_item *item, int inner,
7553                            uint32_t group)
7554 {
7555         const struct rte_flow_item_eth *eth_m = item->mask;
7556         const struct rte_flow_item_eth *eth_v = item->spec;
7557         const struct rte_flow_item_eth nic_mask = {
7558                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7559                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7560                 .type = RTE_BE16(0xffff),
7561                 .has_vlan = 0,
7562         };
7563         void *hdrs_m;
7564         void *hdrs_v;
7565         char *l24_v;
7566         unsigned int i;
7567
7568         if (!eth_v)
7569                 return;
7570         if (!eth_m)
7571                 eth_m = &nic_mask;
7572         if (inner) {
7573                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7574                                          inner_headers);
7575                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7576         } else {
7577                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7578                                          outer_headers);
7579                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7580         }
7581         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7582                &eth_m->dst, sizeof(eth_m->dst));
7583         /* The value must be in the range of the mask. */
7584         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7585         for (i = 0; i < sizeof(eth_m->dst); ++i)
7586                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7587         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7588                &eth_m->src, sizeof(eth_m->src));
7589         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7590         /* The value must be in the range of the mask. */
7591         for (i = 0; i < sizeof(eth_m->dst); ++i)
7592                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7593         /*
7594          * HW supports match on one Ethertype, the Ethertype following the last
7595          * VLAN tag of the packet (see PRM).
7596          * Set match on ethertype only if ETH header is not followed by VLAN.
7597          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7598          * ethertype, and use ip_version field instead.
7599          * eCPRI over Ether layer will use type value 0xAEFE.
7600          */
7601         if (eth_m->type == 0xFFFF) {
7602                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7603                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7604                 switch (eth_v->type) {
7605                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7606                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7607                         return;
7608                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7609                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7610                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7611                         return;
7612                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7613                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7614                         return;
7615                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7616                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7617                         return;
7618                 default:
7619                         break;
7620                 }
7621         }
7622         if (eth_m->has_vlan) {
7623                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7624                 if (eth_v->has_vlan) {
7625                         /*
7626                          * Here, when also has_more_vlan field in VLAN item is
7627                          * not set, only single-tagged packets will be matched.
7628                          */
7629                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7630                         return;
7631                 }
7632         }
7633         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7634                  rte_be_to_cpu_16(eth_m->type));
7635         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7636         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7637 }
7638
7639 /**
7640  * Add VLAN item to matcher and to the value.
7641  *
7642  * @param[in, out] dev_flow
7643  *   Flow descriptor.
7644  * @param[in, out] matcher
7645  *   Flow matcher.
7646  * @param[in, out] key
7647  *   Flow matcher value.
7648  * @param[in] item
7649  *   Flow pattern to translate.
7650  * @param[in] inner
7651  *   Item is inner pattern.
7652  */
7653 static void
7654 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
7655                             void *matcher, void *key,
7656                             const struct rte_flow_item *item,
7657                             int inner, uint32_t group)
7658 {
7659         const struct rte_flow_item_vlan *vlan_m = item->mask;
7660         const struct rte_flow_item_vlan *vlan_v = item->spec;
7661         void *hdrs_m;
7662         void *hdrs_v;
7663         uint16_t tci_m;
7664         uint16_t tci_v;
7665
7666         if (inner) {
7667                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7668                                          inner_headers);
7669                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7670         } else {
7671                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7672                                          outer_headers);
7673                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7674                 /*
7675                  * This is workaround, masks are not supported,
7676                  * and pre-validated.
7677                  */
7678                 if (vlan_v)
7679                         dev_flow->handle->vf_vlan.tag =
7680                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
7681         }
7682         /*
7683          * When VLAN item exists in flow, mark packet as tagged,
7684          * even if TCI is not specified.
7685          */
7686         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
7687                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7688                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7689         }
7690         if (!vlan_v)
7691                 return;
7692         if (!vlan_m)
7693                 vlan_m = &rte_flow_item_vlan_mask;
7694         tci_m = rte_be_to_cpu_16(vlan_m->tci);
7695         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
7696         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
7697         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
7698         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
7699         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
7700         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
7701         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
7702         /*
7703          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7704          * ethertype, and use ip_version field instead.
7705          */
7706         if (vlan_m->inner_type == 0xFFFF) {
7707                 switch (vlan_v->inner_type) {
7708                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7709                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7710                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7711                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7712                         return;
7713                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7714                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7715                         return;
7716                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7717                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7718                         return;
7719                 default:
7720                         break;
7721                 }
7722         }
7723         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
7724                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7725                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7726                 /* Only one vlan_tag bit can be set. */
7727                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7728                 return;
7729         }
7730         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7731                  rte_be_to_cpu_16(vlan_m->inner_type));
7732         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
7733                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
7734 }
7735
7736 /**
7737  * Add IPV4 item to matcher and to the value.
7738  *
7739  * @param[in, out] matcher
7740  *   Flow matcher.
7741  * @param[in, out] key
7742  *   Flow matcher value.
7743  * @param[in] item
7744  *   Flow pattern to translate.
7745  * @param[in] inner
7746  *   Item is inner pattern.
7747  * @param[in] group
7748  *   The group to insert the rule.
7749  */
7750 static void
7751 flow_dv_translate_item_ipv4(void *matcher, void *key,
7752                             const struct rte_flow_item *item,
7753                             int inner, uint32_t group)
7754 {
7755         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
7756         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
7757         const struct rte_flow_item_ipv4 nic_mask = {
7758                 .hdr = {
7759                         .src_addr = RTE_BE32(0xffffffff),
7760                         .dst_addr = RTE_BE32(0xffffffff),
7761                         .type_of_service = 0xff,
7762                         .next_proto_id = 0xff,
7763                         .time_to_live = 0xff,
7764                 },
7765         };
7766         void *headers_m;
7767         void *headers_v;
7768         char *l24_m;
7769         char *l24_v;
7770         uint8_t tos;
7771
7772         if (inner) {
7773                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7774                                          inner_headers);
7775                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7776         } else {
7777                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7778                                          outer_headers);
7779                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7780         }
7781         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
7782         if (!ipv4_v)
7783                 return;
7784         if (!ipv4_m)
7785                 ipv4_m = &nic_mask;
7786         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7787                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7788         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7789                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7790         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
7791         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
7792         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7793                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7794         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7795                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7796         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
7797         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
7798         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
7799         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
7800                  ipv4_m->hdr.type_of_service);
7801         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
7802         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
7803                  ipv4_m->hdr.type_of_service >> 2);
7804         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
7805         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7806                  ipv4_m->hdr.next_proto_id);
7807         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7808                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
7809         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7810                  ipv4_m->hdr.time_to_live);
7811         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7812                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
7813         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7814                  !!(ipv4_m->hdr.fragment_offset));
7815         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7816                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
7817 }
7818
7819 /**
7820  * Add IPV6 item to matcher and to the value.
7821  *
7822  * @param[in, out] matcher
7823  *   Flow matcher.
7824  * @param[in, out] key
7825  *   Flow matcher value.
7826  * @param[in] item
7827  *   Flow pattern to translate.
7828  * @param[in] inner
7829  *   Item is inner pattern.
7830  * @param[in] group
7831  *   The group to insert the rule.
7832  */
7833 static void
7834 flow_dv_translate_item_ipv6(void *matcher, void *key,
7835                             const struct rte_flow_item *item,
7836                             int inner, uint32_t group)
7837 {
7838         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
7839         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
7840         const struct rte_flow_item_ipv6 nic_mask = {
7841                 .hdr = {
7842                         .src_addr =
7843                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7844                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7845                         .dst_addr =
7846                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7847                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7848                         .vtc_flow = RTE_BE32(0xffffffff),
7849                         .proto = 0xff,
7850                         .hop_limits = 0xff,
7851                 },
7852         };
7853         void *headers_m;
7854         void *headers_v;
7855         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7856         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7857         char *l24_m;
7858         char *l24_v;
7859         uint32_t vtc_m;
7860         uint32_t vtc_v;
7861         int i;
7862         int size;
7863
7864         if (inner) {
7865                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7866                                          inner_headers);
7867                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7868         } else {
7869                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7870                                          outer_headers);
7871                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7872         }
7873         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
7874         if (!ipv6_v)
7875                 return;
7876         if (!ipv6_m)
7877                 ipv6_m = &nic_mask;
7878         size = sizeof(ipv6_m->hdr.dst_addr);
7879         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7880                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7881         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7882                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7883         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
7884         for (i = 0; i < size; ++i)
7885                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
7886         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7887                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7888         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7889                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7890         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
7891         for (i = 0; i < size; ++i)
7892                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
7893         /* TOS. */
7894         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
7895         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
7896         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
7897         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
7898         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
7899         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
7900         /* Label. */
7901         if (inner) {
7902                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
7903                          vtc_m);
7904                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
7905                          vtc_v);
7906         } else {
7907                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
7908                          vtc_m);
7909                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
7910                          vtc_v);
7911         }
7912         /* Protocol. */
7913         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7914                  ipv6_m->hdr.proto);
7915         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7916                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
7917         /* Hop limit. */
7918         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7919                  ipv6_m->hdr.hop_limits);
7920         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7921                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
7922         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7923                  !!(ipv6_m->has_frag_ext));
7924         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7925                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
7926 }
7927
7928 /**
7929  * Add IPV6 fragment extension item to matcher and to the value.
7930  *
7931  * @param[in, out] matcher
7932  *   Flow matcher.
7933  * @param[in, out] key
7934  *   Flow matcher value.
7935  * @param[in] item
7936  *   Flow pattern to translate.
7937  * @param[in] inner
7938  *   Item is inner pattern.
7939  */
7940 static void
7941 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
7942                                      const struct rte_flow_item *item,
7943                                      int inner)
7944 {
7945         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
7946         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
7947         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
7948                 .hdr = {
7949                         .next_header = 0xff,
7950                         .frag_data = RTE_BE16(0xffff),
7951                 },
7952         };
7953         void *headers_m;
7954         void *headers_v;
7955
7956         if (inner) {
7957                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7958                                          inner_headers);
7959                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7960         } else {
7961                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7962                                          outer_headers);
7963                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7964         }
7965         /* IPv6 fragment extension item exists, so packet is IP fragment. */
7966         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7967         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
7968         if (!ipv6_frag_ext_v)
7969                 return;
7970         if (!ipv6_frag_ext_m)
7971                 ipv6_frag_ext_m = &nic_mask;
7972         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7973                  ipv6_frag_ext_m->hdr.next_header);
7974         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7975                  ipv6_frag_ext_v->hdr.next_header &
7976                  ipv6_frag_ext_m->hdr.next_header);
7977 }
7978
7979 /**
7980  * Add TCP item to matcher and to the value.
7981  *
7982  * @param[in, out] matcher
7983  *   Flow matcher.
7984  * @param[in, out] key
7985  *   Flow matcher value.
7986  * @param[in] item
7987  *   Flow pattern to translate.
7988  * @param[in] inner
7989  *   Item is inner pattern.
7990  */
7991 static void
7992 flow_dv_translate_item_tcp(void *matcher, void *key,
7993                            const struct rte_flow_item *item,
7994                            int inner)
7995 {
7996         const struct rte_flow_item_tcp *tcp_m = item->mask;
7997         const struct rte_flow_item_tcp *tcp_v = item->spec;
7998         void *headers_m;
7999         void *headers_v;
8000
8001         if (inner) {
8002                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8003                                          inner_headers);
8004                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8005         } else {
8006                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8007                                          outer_headers);
8008                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8009         }
8010         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8011         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8012         if (!tcp_v)
8013                 return;
8014         if (!tcp_m)
8015                 tcp_m = &rte_flow_item_tcp_mask;
8016         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8017                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8018         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8019                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8020         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8021                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8022         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8023                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8024         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8025                  tcp_m->hdr.tcp_flags);
8026         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8027                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8028 }
8029
8030 /**
8031  * Add UDP item to matcher and to the value.
8032  *
8033  * @param[in, out] matcher
8034  *   Flow matcher.
8035  * @param[in, out] key
8036  *   Flow matcher value.
8037  * @param[in] item
8038  *   Flow pattern to translate.
8039  * @param[in] inner
8040  *   Item is inner pattern.
8041  */
8042 static void
8043 flow_dv_translate_item_udp(void *matcher, void *key,
8044                            const struct rte_flow_item *item,
8045                            int inner)
8046 {
8047         const struct rte_flow_item_udp *udp_m = item->mask;
8048         const struct rte_flow_item_udp *udp_v = item->spec;
8049         void *headers_m;
8050         void *headers_v;
8051
8052         if (inner) {
8053                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8054                                          inner_headers);
8055                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8056         } else {
8057                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8058                                          outer_headers);
8059                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8060         }
8061         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8062         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8063         if (!udp_v)
8064                 return;
8065         if (!udp_m)
8066                 udp_m = &rte_flow_item_udp_mask;
8067         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8068                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8069         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8070                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8071         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8072                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8073         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8074                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8075 }
8076
8077 /**
8078  * Add GRE optional Key item to matcher and to the value.
8079  *
8080  * @param[in, out] matcher
8081  *   Flow matcher.
8082  * @param[in, out] key
8083  *   Flow matcher value.
8084  * @param[in] item
8085  *   Flow pattern to translate.
8086  * @param[in] inner
8087  *   Item is inner pattern.
8088  */
8089 static void
8090 flow_dv_translate_item_gre_key(void *matcher, void *key,
8091                                    const struct rte_flow_item *item)
8092 {
8093         const rte_be32_t *key_m = item->mask;
8094         const rte_be32_t *key_v = item->spec;
8095         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8096         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8097         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8098
8099         /* GRE K bit must be on and should already be validated */
8100         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8101         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8102         if (!key_v)
8103                 return;
8104         if (!key_m)
8105                 key_m = &gre_key_default_mask;
8106         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8107                  rte_be_to_cpu_32(*key_m) >> 8);
8108         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8109                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8110         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8111                  rte_be_to_cpu_32(*key_m) & 0xFF);
8112         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8113                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8114 }
8115
8116 /**
8117  * Add GRE item to matcher and to the value.
8118  *
8119  * @param[in, out] matcher
8120  *   Flow matcher.
8121  * @param[in, out] key
8122  *   Flow matcher value.
8123  * @param[in] item
8124  *   Flow pattern to translate.
8125  * @param[in] inner
8126  *   Item is inner pattern.
8127  */
8128 static void
8129 flow_dv_translate_item_gre(void *matcher, void *key,
8130                            const struct rte_flow_item *item,
8131                            int inner)
8132 {
8133         const struct rte_flow_item_gre *gre_m = item->mask;
8134         const struct rte_flow_item_gre *gre_v = item->spec;
8135         void *headers_m;
8136         void *headers_v;
8137         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8138         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8139         struct {
8140                 union {
8141                         __extension__
8142                         struct {
8143                                 uint16_t version:3;
8144                                 uint16_t rsvd0:9;
8145                                 uint16_t s_present:1;
8146                                 uint16_t k_present:1;
8147                                 uint16_t rsvd_bit1:1;
8148                                 uint16_t c_present:1;
8149                         };
8150                         uint16_t value;
8151                 };
8152         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8153
8154         if (inner) {
8155                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8156                                          inner_headers);
8157                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8158         } else {
8159                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8160                                          outer_headers);
8161                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8162         }
8163         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8164         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8165         if (!gre_v)
8166                 return;
8167         if (!gre_m)
8168                 gre_m = &rte_flow_item_gre_mask;
8169         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8170                  rte_be_to_cpu_16(gre_m->protocol));
8171         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8172                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8173         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8174         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8175         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8176                  gre_crks_rsvd0_ver_m.c_present);
8177         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8178                  gre_crks_rsvd0_ver_v.c_present &
8179                  gre_crks_rsvd0_ver_m.c_present);
8180         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8181                  gre_crks_rsvd0_ver_m.k_present);
8182         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8183                  gre_crks_rsvd0_ver_v.k_present &
8184                  gre_crks_rsvd0_ver_m.k_present);
8185         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8186                  gre_crks_rsvd0_ver_m.s_present);
8187         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8188                  gre_crks_rsvd0_ver_v.s_present &
8189                  gre_crks_rsvd0_ver_m.s_present);
8190 }
8191
8192 /**
8193  * Add NVGRE item to matcher and to the value.
8194  *
8195  * @param[in, out] matcher
8196  *   Flow matcher.
8197  * @param[in, out] key
8198  *   Flow matcher value.
8199  * @param[in] item
8200  *   Flow pattern to translate.
8201  * @param[in] inner
8202  *   Item is inner pattern.
8203  */
8204 static void
8205 flow_dv_translate_item_nvgre(void *matcher, void *key,
8206                              const struct rte_flow_item *item,
8207                              int inner)
8208 {
8209         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8210         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8211         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8212         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8213         const char *tni_flow_id_m;
8214         const char *tni_flow_id_v;
8215         char *gre_key_m;
8216         char *gre_key_v;
8217         int size;
8218         int i;
8219
8220         /* For NVGRE, GRE header fields must be set with defined values. */
8221         const struct rte_flow_item_gre gre_spec = {
8222                 .c_rsvd0_ver = RTE_BE16(0x2000),
8223                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8224         };
8225         const struct rte_flow_item_gre gre_mask = {
8226                 .c_rsvd0_ver = RTE_BE16(0xB000),
8227                 .protocol = RTE_BE16(UINT16_MAX),
8228         };
8229         const struct rte_flow_item gre_item = {
8230                 .spec = &gre_spec,
8231                 .mask = &gre_mask,
8232                 .last = NULL,
8233         };
8234         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8235         if (!nvgre_v)
8236                 return;
8237         if (!nvgre_m)
8238                 nvgre_m = &rte_flow_item_nvgre_mask;
8239         tni_flow_id_m = (const char *)nvgre_m->tni;
8240         tni_flow_id_v = (const char *)nvgre_v->tni;
8241         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8242         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8243         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8244         memcpy(gre_key_m, tni_flow_id_m, size);
8245         for (i = 0; i < size; ++i)
8246                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8247 }
8248
8249 /**
8250  * Add VXLAN item to matcher and to the value.
8251  *
8252  * @param[in, out] matcher
8253  *   Flow matcher.
8254  * @param[in, out] key
8255  *   Flow matcher value.
8256  * @param[in] item
8257  *   Flow pattern to translate.
8258  * @param[in] inner
8259  *   Item is inner pattern.
8260  */
8261 static void
8262 flow_dv_translate_item_vxlan(void *matcher, void *key,
8263                              const struct rte_flow_item *item,
8264                              int inner)
8265 {
8266         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8267         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8268         void *headers_m;
8269         void *headers_v;
8270         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8271         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8272         char *vni_m;
8273         char *vni_v;
8274         uint16_t dport;
8275         int size;
8276         int i;
8277
8278         if (inner) {
8279                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8280                                          inner_headers);
8281                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8282         } else {
8283                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8284                                          outer_headers);
8285                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8286         }
8287         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8288                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8289         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8290                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8291                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8292         }
8293         if (!vxlan_v)
8294                 return;
8295         if (!vxlan_m)
8296                 vxlan_m = &rte_flow_item_vxlan_mask;
8297         size = sizeof(vxlan_m->vni);
8298         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8299         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8300         memcpy(vni_m, vxlan_m->vni, size);
8301         for (i = 0; i < size; ++i)
8302                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8303 }
8304
8305 /**
8306  * Add VXLAN-GPE item to matcher and to the value.
8307  *
8308  * @param[in, out] matcher
8309  *   Flow matcher.
8310  * @param[in, out] key
8311  *   Flow matcher value.
8312  * @param[in] item
8313  *   Flow pattern to translate.
8314  * @param[in] inner
8315  *   Item is inner pattern.
8316  */
8317
8318 static void
8319 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8320                                  const struct rte_flow_item *item, int inner)
8321 {
8322         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8323         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8324         void *headers_m;
8325         void *headers_v;
8326         void *misc_m =
8327                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8328         void *misc_v =
8329                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8330         char *vni_m;
8331         char *vni_v;
8332         uint16_t dport;
8333         int size;
8334         int i;
8335         uint8_t flags_m = 0xff;
8336         uint8_t flags_v = 0xc;
8337
8338         if (inner) {
8339                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8340                                          inner_headers);
8341                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8342         } else {
8343                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8344                                          outer_headers);
8345                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8346         }
8347         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8348                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8349         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8350                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8351                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8352         }
8353         if (!vxlan_v)
8354                 return;
8355         if (!vxlan_m)
8356                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8357         size = sizeof(vxlan_m->vni);
8358         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8359         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8360         memcpy(vni_m, vxlan_m->vni, size);
8361         for (i = 0; i < size; ++i)
8362                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8363         if (vxlan_m->flags) {
8364                 flags_m = vxlan_m->flags;
8365                 flags_v = vxlan_v->flags;
8366         }
8367         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8368         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8369         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8370                  vxlan_m->protocol);
8371         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8372                  vxlan_v->protocol);
8373 }
8374
8375 /**
8376  * Add Geneve item to matcher and to the value.
8377  *
8378  * @param[in, out] matcher
8379  *   Flow matcher.
8380  * @param[in, out] key
8381  *   Flow matcher value.
8382  * @param[in] item
8383  *   Flow pattern to translate.
8384  * @param[in] inner
8385  *   Item is inner pattern.
8386  */
8387
8388 static void
8389 flow_dv_translate_item_geneve(void *matcher, void *key,
8390                               const struct rte_flow_item *item, int inner)
8391 {
8392         const struct rte_flow_item_geneve *geneve_m = item->mask;
8393         const struct rte_flow_item_geneve *geneve_v = item->spec;
8394         void *headers_m;
8395         void *headers_v;
8396         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8397         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8398         uint16_t dport;
8399         uint16_t gbhdr_m;
8400         uint16_t gbhdr_v;
8401         char *vni_m;
8402         char *vni_v;
8403         size_t size, i;
8404
8405         if (inner) {
8406                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8407                                          inner_headers);
8408                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8409         } else {
8410                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8411                                          outer_headers);
8412                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8413         }
8414         dport = MLX5_UDP_PORT_GENEVE;
8415         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8416                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8417                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8418         }
8419         if (!geneve_v)
8420                 return;
8421         if (!geneve_m)
8422                 geneve_m = &rte_flow_item_geneve_mask;
8423         size = sizeof(geneve_m->vni);
8424         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8425         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8426         memcpy(vni_m, geneve_m->vni, size);
8427         for (i = 0; i < size; ++i)
8428                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8429         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8430                  rte_be_to_cpu_16(geneve_m->protocol));
8431         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8432                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8433         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8434         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8435         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8436                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8437         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8438                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8439         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8440                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8441         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8442                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8443                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8444 }
8445
8446 /**
8447  * Create Geneve TLV option resource.
8448  *
8449  * @param dev[in, out]
8450  *   Pointer to rte_eth_dev structure.
8451  * @param[in, out] tag_be24
8452  *   Tag value in big endian then R-shift 8.
8453  * @parm[in, out] dev_flow
8454  *   Pointer to the dev_flow.
8455  * @param[out] error
8456  *   pointer to error structure.
8457  *
8458  * @return
8459  *   0 on success otherwise -errno and errno is set.
8460  */
8461
8462 int
8463 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8464                                              const struct rte_flow_item *item,
8465                                              struct rte_flow_error *error)
8466 {
8467         struct mlx5_priv *priv = dev->data->dev_private;
8468         struct mlx5_dev_ctx_shared *sh = priv->sh;
8469         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8470                         sh->geneve_tlv_option_resource;
8471         struct mlx5_devx_obj *obj;
8472         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8473         int ret = 0;
8474
8475         if (!geneve_opt_v)
8476                 return -1;
8477         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8478         if (geneve_opt_resource != NULL) {
8479                 if (geneve_opt_resource->option_class ==
8480                         geneve_opt_v->option_class &&
8481                         geneve_opt_resource->option_type ==
8482                         geneve_opt_v->option_type &&
8483                         geneve_opt_resource->length ==
8484                         geneve_opt_v->option_len) {
8485                         /* We already have GENVE TLV option obj allocated. */
8486                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8487                                            __ATOMIC_RELAXED);
8488                 } else {
8489                         ret = rte_flow_error_set(error, ENOMEM,
8490                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8491                                 "Only one GENEVE TLV option supported");
8492                         goto exit;
8493                 }
8494         } else {
8495                 /* Create a GENEVE TLV object and resource. */
8496                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8497                                 geneve_opt_v->option_class,
8498                                 geneve_opt_v->option_type,
8499                                 geneve_opt_v->option_len);
8500                 if (!obj) {
8501                         ret = rte_flow_error_set(error, ENODATA,
8502                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8503                                 "Failed to create GENEVE TLV Devx object");
8504                         goto exit;
8505                 }
8506                 sh->geneve_tlv_option_resource =
8507                                 mlx5_malloc(MLX5_MEM_ZERO,
8508                                                 sizeof(*geneve_opt_resource),
8509                                                 0, SOCKET_ID_ANY);
8510                 if (!sh->geneve_tlv_option_resource) {
8511                         claim_zero(mlx5_devx_cmd_destroy(obj));
8512                         ret = rte_flow_error_set(error, ENOMEM,
8513                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8514                                 "GENEVE TLV object memory allocation failed");
8515                         goto exit;
8516                 }
8517                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8518                 geneve_opt_resource->obj = obj;
8519                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8520                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8521                 geneve_opt_resource->length = geneve_opt_v->option_len;
8522                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8523                                 __ATOMIC_RELAXED);
8524         }
8525 exit:
8526         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8527         return ret;
8528 }
8529
8530 /**
8531  * Add Geneve TLV option item to matcher.
8532  *
8533  * @param[in, out] dev
8534  *   Pointer to rte_eth_dev structure.
8535  * @param[in, out] matcher
8536  *   Flow matcher.
8537  * @param[in, out] key
8538  *   Flow matcher value.
8539  * @param[in] item
8540  *   Flow pattern to translate.
8541  * @param[out] error
8542  *   Pointer to error structure.
8543  */
8544 static int
8545 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8546                                   void *key, const struct rte_flow_item *item,
8547                                   struct rte_flow_error *error)
8548 {
8549         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8550         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8551         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8552         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8553         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8554                         misc_parameters_3);
8555         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8556         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8557         int ret = 0;
8558
8559         if (!geneve_opt_v)
8560                 return -1;
8561         if (!geneve_opt_m)
8562                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8563         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8564                                                            error);
8565         if (ret) {
8566                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8567                 return ret;
8568         }
8569         /*
8570          * Set the option length in GENEVE header if not requested.
8571          * The GENEVE TLV option length is expressed by the option length field
8572          * in the GENEVE header.
8573          * If the option length was not requested but the GENEVE TLV option item
8574          * is present we set the option length field implicitly.
8575          */
8576         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8577                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8578                          MLX5_GENEVE_OPTLEN_MASK);
8579                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8580                          geneve_opt_v->option_len + 1);
8581         }
8582         /* Set the data. */
8583         if (geneve_opt_v->data) {
8584                 memcpy(&opt_data_key, geneve_opt_v->data,
8585                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8586                                 sizeof(opt_data_key)));
8587                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8588                                 sizeof(opt_data_key));
8589                 memcpy(&opt_data_mask, geneve_opt_m->data,
8590                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8591                                 sizeof(opt_data_mask)));
8592                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8593                                 sizeof(opt_data_mask));
8594                 MLX5_SET(fte_match_set_misc3, misc3_m,
8595                                 geneve_tlv_option_0_data,
8596                                 rte_be_to_cpu_32(opt_data_mask));
8597                 MLX5_SET(fte_match_set_misc3, misc3_v,
8598                                 geneve_tlv_option_0_data,
8599                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8600         }
8601         return ret;
8602 }
8603
8604 /**
8605  * Add MPLS item to matcher and to the value.
8606  *
8607  * @param[in, out] matcher
8608  *   Flow matcher.
8609  * @param[in, out] key
8610  *   Flow matcher value.
8611  * @param[in] item
8612  *   Flow pattern to translate.
8613  * @param[in] prev_layer
8614  *   The protocol layer indicated in previous item.
8615  * @param[in] inner
8616  *   Item is inner pattern.
8617  */
8618 static void
8619 flow_dv_translate_item_mpls(void *matcher, void *key,
8620                             const struct rte_flow_item *item,
8621                             uint64_t prev_layer,
8622                             int inner)
8623 {
8624         const uint32_t *in_mpls_m = item->mask;
8625         const uint32_t *in_mpls_v = item->spec;
8626         uint32_t *out_mpls_m = 0;
8627         uint32_t *out_mpls_v = 0;
8628         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8629         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8630         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8631                                      misc_parameters_2);
8632         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8633         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8634         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8635
8636         switch (prev_layer) {
8637         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8638                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
8639                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8640                          MLX5_UDP_PORT_MPLS);
8641                 break;
8642         case MLX5_FLOW_LAYER_GRE:
8643                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
8644                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8645                          RTE_ETHER_TYPE_MPLS);
8646                 break;
8647         default:
8648                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8649                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8650                          IPPROTO_MPLS);
8651                 break;
8652         }
8653         if (!in_mpls_v)
8654                 return;
8655         if (!in_mpls_m)
8656                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
8657         switch (prev_layer) {
8658         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8659                 out_mpls_m =
8660                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8661                                                  outer_first_mpls_over_udp);
8662                 out_mpls_v =
8663                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8664                                                  outer_first_mpls_over_udp);
8665                 break;
8666         case MLX5_FLOW_LAYER_GRE:
8667                 out_mpls_m =
8668                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8669                                                  outer_first_mpls_over_gre);
8670                 out_mpls_v =
8671                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8672                                                  outer_first_mpls_over_gre);
8673                 break;
8674         default:
8675                 /* Inner MPLS not over GRE is not supported. */
8676                 if (!inner) {
8677                         out_mpls_m =
8678                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8679                                                          misc2_m,
8680                                                          outer_first_mpls);
8681                         out_mpls_v =
8682                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8683                                                          misc2_v,
8684                                                          outer_first_mpls);
8685                 }
8686                 break;
8687         }
8688         if (out_mpls_m && out_mpls_v) {
8689                 *out_mpls_m = *in_mpls_m;
8690                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
8691         }
8692 }
8693
8694 /**
8695  * Add metadata register item to matcher
8696  *
8697  * @param[in, out] matcher
8698  *   Flow matcher.
8699  * @param[in, out] key
8700  *   Flow matcher value.
8701  * @param[in] reg_type
8702  *   Type of device metadata register
8703  * @param[in] value
8704  *   Register value
8705  * @param[in] mask
8706  *   Register mask
8707  */
8708 static void
8709 flow_dv_match_meta_reg(void *matcher, void *key,
8710                        enum modify_reg reg_type,
8711                        uint32_t data, uint32_t mask)
8712 {
8713         void *misc2_m =
8714                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
8715         void *misc2_v =
8716                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8717         uint32_t temp;
8718
8719         data &= mask;
8720         switch (reg_type) {
8721         case REG_A:
8722                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
8723                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
8724                 break;
8725         case REG_B:
8726                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
8727                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
8728                 break;
8729         case REG_C_0:
8730                 /*
8731                  * The metadata register C0 field might be divided into
8732                  * source vport index and META item value, we should set
8733                  * this field according to specified mask, not as whole one.
8734                  */
8735                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
8736                 temp |= mask;
8737                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
8738                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
8739                 temp &= ~mask;
8740                 temp |= data;
8741                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
8742                 break;
8743         case REG_C_1:
8744                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
8745                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
8746                 break;
8747         case REG_C_2:
8748                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
8749                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
8750                 break;
8751         case REG_C_3:
8752                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
8753                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
8754                 break;
8755         case REG_C_4:
8756                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
8757                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
8758                 break;
8759         case REG_C_5:
8760                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
8761                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
8762                 break;
8763         case REG_C_6:
8764                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
8765                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
8766                 break;
8767         case REG_C_7:
8768                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
8769                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
8770                 break;
8771         default:
8772                 MLX5_ASSERT(false);
8773                 break;
8774         }
8775 }
8776
8777 /**
8778  * Add MARK item to matcher
8779  *
8780  * @param[in] dev
8781  *   The device to configure through.
8782  * @param[in, out] matcher
8783  *   Flow matcher.
8784  * @param[in, out] key
8785  *   Flow matcher value.
8786  * @param[in] item
8787  *   Flow pattern to translate.
8788  */
8789 static void
8790 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
8791                             void *matcher, void *key,
8792                             const struct rte_flow_item *item)
8793 {
8794         struct mlx5_priv *priv = dev->data->dev_private;
8795         const struct rte_flow_item_mark *mark;
8796         uint32_t value;
8797         uint32_t mask;
8798
8799         mark = item->mask ? (const void *)item->mask :
8800                             &rte_flow_item_mark_mask;
8801         mask = mark->id & priv->sh->dv_mark_mask;
8802         mark = (const void *)item->spec;
8803         MLX5_ASSERT(mark);
8804         value = mark->id & priv->sh->dv_mark_mask & mask;
8805         if (mask) {
8806                 enum modify_reg reg;
8807
8808                 /* Get the metadata register index for the mark. */
8809                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
8810                 MLX5_ASSERT(reg > 0);
8811                 if (reg == REG_C_0) {
8812                         struct mlx5_priv *priv = dev->data->dev_private;
8813                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8814                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8815
8816                         mask &= msk_c0;
8817                         mask <<= shl_c0;
8818                         value <<= shl_c0;
8819                 }
8820                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8821         }
8822 }
8823
8824 /**
8825  * Add META item to matcher
8826  *
8827  * @param[in] dev
8828  *   The devich to configure through.
8829  * @param[in, out] matcher
8830  *   Flow matcher.
8831  * @param[in, out] key
8832  *   Flow matcher value.
8833  * @param[in] attr
8834  *   Attributes of flow that includes this item.
8835  * @param[in] item
8836  *   Flow pattern to translate.
8837  */
8838 static void
8839 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
8840                             void *matcher, void *key,
8841                             const struct rte_flow_attr *attr,
8842                             const struct rte_flow_item *item)
8843 {
8844         const struct rte_flow_item_meta *meta_m;
8845         const struct rte_flow_item_meta *meta_v;
8846
8847         meta_m = (const void *)item->mask;
8848         if (!meta_m)
8849                 meta_m = &rte_flow_item_meta_mask;
8850         meta_v = (const void *)item->spec;
8851         if (meta_v) {
8852                 int reg;
8853                 uint32_t value = meta_v->data;
8854                 uint32_t mask = meta_m->data;
8855
8856                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
8857                 if (reg < 0)
8858                         return;
8859                 MLX5_ASSERT(reg != REG_NON);
8860                 /*
8861                  * In datapath code there is no endianness
8862                  * coversions for perfromance reasons, all
8863                  * pattern conversions are done in rte_flow.
8864                  */
8865                 value = rte_cpu_to_be_32(value);
8866                 mask = rte_cpu_to_be_32(mask);
8867                 if (reg == REG_C_0) {
8868                         struct mlx5_priv *priv = dev->data->dev_private;
8869                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8870                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8871 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
8872                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
8873
8874                         value >>= shr_c0;
8875                         mask >>= shr_c0;
8876 #endif
8877                         value <<= shl_c0;
8878                         mask <<= shl_c0;
8879                         MLX5_ASSERT(msk_c0);
8880                         MLX5_ASSERT(!(~msk_c0 & mask));
8881                 }
8882                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8883         }
8884 }
8885
8886 /**
8887  * Add vport metadata Reg C0 item to matcher
8888  *
8889  * @param[in, out] matcher
8890  *   Flow matcher.
8891  * @param[in, out] key
8892  *   Flow matcher value.
8893  * @param[in] reg
8894  *   Flow pattern to translate.
8895  */
8896 static void
8897 flow_dv_translate_item_meta_vport(void *matcher, void *key,
8898                                   uint32_t value, uint32_t mask)
8899 {
8900         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
8901 }
8902
8903 /**
8904  * Add tag item to matcher
8905  *
8906  * @param[in] dev
8907  *   The devich to configure through.
8908  * @param[in, out] matcher
8909  *   Flow matcher.
8910  * @param[in, out] key
8911  *   Flow matcher value.
8912  * @param[in] item
8913  *   Flow pattern to translate.
8914  */
8915 static void
8916 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
8917                                 void *matcher, void *key,
8918                                 const struct rte_flow_item *item)
8919 {
8920         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
8921         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
8922         uint32_t mask, value;
8923
8924         MLX5_ASSERT(tag_v);
8925         value = tag_v->data;
8926         mask = tag_m ? tag_m->data : UINT32_MAX;
8927         if (tag_v->id == REG_C_0) {
8928                 struct mlx5_priv *priv = dev->data->dev_private;
8929                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8930                 uint32_t shl_c0 = rte_bsf32(msk_c0);
8931
8932                 mask &= msk_c0;
8933                 mask <<= shl_c0;
8934                 value <<= shl_c0;
8935         }
8936         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
8937 }
8938
8939 /**
8940  * Add TAG item to matcher
8941  *
8942  * @param[in] dev
8943  *   The devich to configure through.
8944  * @param[in, out] matcher
8945  *   Flow matcher.
8946  * @param[in, out] key
8947  *   Flow matcher value.
8948  * @param[in] item
8949  *   Flow pattern to translate.
8950  */
8951 static void
8952 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
8953                            void *matcher, void *key,
8954                            const struct rte_flow_item *item)
8955 {
8956         const struct rte_flow_item_tag *tag_v = item->spec;
8957         const struct rte_flow_item_tag *tag_m = item->mask;
8958         enum modify_reg reg;
8959
8960         MLX5_ASSERT(tag_v);
8961         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
8962         /* Get the metadata register index for the tag. */
8963         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
8964         MLX5_ASSERT(reg > 0);
8965         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
8966 }
8967
8968 /**
8969  * Add source vport match to the specified matcher.
8970  *
8971  * @param[in, out] matcher
8972  *   Flow matcher.
8973  * @param[in, out] key
8974  *   Flow matcher value.
8975  * @param[in] port
8976  *   Source vport value to match
8977  * @param[in] mask
8978  *   Mask
8979  */
8980 static void
8981 flow_dv_translate_item_source_vport(void *matcher, void *key,
8982                                     int16_t port, uint16_t mask)
8983 {
8984         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8985         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8986
8987         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
8988         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
8989 }
8990
8991 /**
8992  * Translate port-id item to eswitch match on  port-id.
8993  *
8994  * @param[in] dev
8995  *   The devich to configure through.
8996  * @param[in, out] matcher
8997  *   Flow matcher.
8998  * @param[in, out] key
8999  *   Flow matcher value.
9000  * @param[in] item
9001  *   Flow pattern to translate.
9002  * @param[in]
9003  *   Flow attributes.
9004  *
9005  * @return
9006  *   0 on success, a negative errno value otherwise.
9007  */
9008 static int
9009 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9010                                void *key, const struct rte_flow_item *item,
9011                                const struct rte_flow_attr *attr)
9012 {
9013         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9014         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9015         struct mlx5_priv *priv;
9016         uint16_t mask, id;
9017
9018         mask = pid_m ? pid_m->id : 0xffff;
9019         id = pid_v ? pid_v->id : dev->data->port_id;
9020         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9021         if (!priv)
9022                 return -rte_errno;
9023         /*
9024          * Translate to vport field or to metadata, depending on mode.
9025          * Kernel can use either misc.source_port or half of C0 metadata
9026          * register.
9027          */
9028         if (priv->vport_meta_mask) {
9029                 /*
9030                  * Provide the hint for SW steering library
9031                  * to insert the flow into ingress domain and
9032                  * save the extra vport match.
9033                  */
9034                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9035                     priv->pf_bond < 0 && attr->transfer)
9036                         flow_dv_translate_item_source_vport
9037                                 (matcher, key, priv->vport_id, mask);
9038                 /*
9039                  * We should always set the vport metadata register,
9040                  * otherwise the SW steering library can drop
9041                  * the rule if wire vport metadata value is not zero,
9042                  * it depends on kernel configuration.
9043                  */
9044                 flow_dv_translate_item_meta_vport(matcher, key,
9045                                                   priv->vport_meta_tag,
9046                                                   priv->vport_meta_mask);
9047         } else {
9048                 flow_dv_translate_item_source_vport(matcher, key,
9049                                                     priv->vport_id, mask);
9050         }
9051         return 0;
9052 }
9053
9054 /**
9055  * Add ICMP6 item to matcher and to the value.
9056  *
9057  * @param[in, out] matcher
9058  *   Flow matcher.
9059  * @param[in, out] key
9060  *   Flow matcher value.
9061  * @param[in] item
9062  *   Flow pattern to translate.
9063  * @param[in] inner
9064  *   Item is inner pattern.
9065  */
9066 static void
9067 flow_dv_translate_item_icmp6(void *matcher, void *key,
9068                               const struct rte_flow_item *item,
9069                               int inner)
9070 {
9071         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9072         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9073         void *headers_m;
9074         void *headers_v;
9075         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9076                                      misc_parameters_3);
9077         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9078         if (inner) {
9079                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9080                                          inner_headers);
9081                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9082         } else {
9083                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9084                                          outer_headers);
9085                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9086         }
9087         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9088         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9089         if (!icmp6_v)
9090                 return;
9091         if (!icmp6_m)
9092                 icmp6_m = &rte_flow_item_icmp6_mask;
9093         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9094         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9095                  icmp6_v->type & icmp6_m->type);
9096         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9097         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9098                  icmp6_v->code & icmp6_m->code);
9099 }
9100
9101 /**
9102  * Add ICMP item to matcher and to the value.
9103  *
9104  * @param[in, out] matcher
9105  *   Flow matcher.
9106  * @param[in, out] key
9107  *   Flow matcher value.
9108  * @param[in] item
9109  *   Flow pattern to translate.
9110  * @param[in] inner
9111  *   Item is inner pattern.
9112  */
9113 static void
9114 flow_dv_translate_item_icmp(void *matcher, void *key,
9115                             const struct rte_flow_item *item,
9116                             int inner)
9117 {
9118         const struct rte_flow_item_icmp *icmp_m = item->mask;
9119         const struct rte_flow_item_icmp *icmp_v = item->spec;
9120         uint32_t icmp_header_data_m = 0;
9121         uint32_t icmp_header_data_v = 0;
9122         void *headers_m;
9123         void *headers_v;
9124         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9125                                      misc_parameters_3);
9126         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9127         if (inner) {
9128                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9129                                          inner_headers);
9130                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9131         } else {
9132                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9133                                          outer_headers);
9134                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9135         }
9136         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9137         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9138         if (!icmp_v)
9139                 return;
9140         if (!icmp_m)
9141                 icmp_m = &rte_flow_item_icmp_mask;
9142         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9143                  icmp_m->hdr.icmp_type);
9144         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9145                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9146         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9147                  icmp_m->hdr.icmp_code);
9148         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9149                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9150         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9151         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9152         if (icmp_header_data_m) {
9153                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9154                 icmp_header_data_v |=
9155                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9156                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9157                          icmp_header_data_m);
9158                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9159                          icmp_header_data_v & icmp_header_data_m);
9160         }
9161 }
9162
9163 /**
9164  * Add GTP item to matcher and to the value.
9165  *
9166  * @param[in, out] matcher
9167  *   Flow matcher.
9168  * @param[in, out] key
9169  *   Flow matcher value.
9170  * @param[in] item
9171  *   Flow pattern to translate.
9172  * @param[in] inner
9173  *   Item is inner pattern.
9174  */
9175 static void
9176 flow_dv_translate_item_gtp(void *matcher, void *key,
9177                            const struct rte_flow_item *item, int inner)
9178 {
9179         const struct rte_flow_item_gtp *gtp_m = item->mask;
9180         const struct rte_flow_item_gtp *gtp_v = item->spec;
9181         void *headers_m;
9182         void *headers_v;
9183         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9184                                      misc_parameters_3);
9185         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9186         uint16_t dport = RTE_GTPU_UDP_PORT;
9187
9188         if (inner) {
9189                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9190                                          inner_headers);
9191                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9192         } else {
9193                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9194                                          outer_headers);
9195                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9196         }
9197         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9198                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9199                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9200         }
9201         if (!gtp_v)
9202                 return;
9203         if (!gtp_m)
9204                 gtp_m = &rte_flow_item_gtp_mask;
9205         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9206                  gtp_m->v_pt_rsv_flags);
9207         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9208                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9209         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9210         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9211                  gtp_v->msg_type & gtp_m->msg_type);
9212         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9213                  rte_be_to_cpu_32(gtp_m->teid));
9214         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9215                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9216 }
9217
9218 /**
9219  * Add GTP PSC item to matcher.
9220  *
9221  * @param[in, out] matcher
9222  *   Flow matcher.
9223  * @param[in, out] key
9224  *   Flow matcher value.
9225  * @param[in] item
9226  *   Flow pattern to translate.
9227  */
9228 static int
9229 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9230                                const struct rte_flow_item *item)
9231 {
9232         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9233         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9234         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9235                         misc_parameters_3);
9236         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9237         union {
9238                 uint32_t w32;
9239                 struct {
9240                         uint16_t seq_num;
9241                         uint8_t npdu_num;
9242                         uint8_t next_ext_header_type;
9243                 };
9244         } dw_2;
9245         uint8_t gtp_flags;
9246
9247         /* Always set E-flag match on one, regardless of GTP item settings. */
9248         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9249         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9250         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9251         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9252         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9253         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9254         /*Set next extension header type. */
9255         dw_2.seq_num = 0;
9256         dw_2.npdu_num = 0;
9257         dw_2.next_ext_header_type = 0xff;
9258         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9259                  rte_cpu_to_be_32(dw_2.w32));
9260         dw_2.seq_num = 0;
9261         dw_2.npdu_num = 0;
9262         dw_2.next_ext_header_type = 0x85;
9263         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9264                  rte_cpu_to_be_32(dw_2.w32));
9265         if (gtp_psc_v) {
9266                 union {
9267                         uint32_t w32;
9268                         struct {
9269                                 uint8_t len;
9270                                 uint8_t type_flags;
9271                                 uint8_t qfi;
9272                                 uint8_t reserved;
9273                         };
9274                 } dw_0;
9275
9276                 /*Set extension header PDU type and Qos. */
9277                 if (!gtp_psc_m)
9278                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9279                 dw_0.w32 = 0;
9280                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9281                 dw_0.qfi = gtp_psc_m->qfi;
9282                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9283                          rte_cpu_to_be_32(dw_0.w32));
9284                 dw_0.w32 = 0;
9285                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9286                                                         gtp_psc_m->pdu_type);
9287                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9288                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9289                          rte_cpu_to_be_32(dw_0.w32));
9290         }
9291         return 0;
9292 }
9293
9294 /**
9295  * Add eCPRI item to matcher and to the value.
9296  *
9297  * @param[in] dev
9298  *   The devich to configure through.
9299  * @param[in, out] matcher
9300  *   Flow matcher.
9301  * @param[in, out] key
9302  *   Flow matcher value.
9303  * @param[in] item
9304  *   Flow pattern to translate.
9305  * @param[in] samples
9306  *   Sample IDs to be used in the matching.
9307  */
9308 static void
9309 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9310                              void *key, const struct rte_flow_item *item)
9311 {
9312         struct mlx5_priv *priv = dev->data->dev_private;
9313         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9314         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9315         struct rte_ecpri_common_hdr common;
9316         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9317                                      misc_parameters_4);
9318         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9319         uint32_t *samples;
9320         void *dw_m;
9321         void *dw_v;
9322
9323         if (!ecpri_v)
9324                 return;
9325         if (!ecpri_m)
9326                 ecpri_m = &rte_flow_item_ecpri_mask;
9327         /*
9328          * Maximal four DW samples are supported in a single matching now.
9329          * Two are used now for a eCPRI matching:
9330          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9331          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9332          *    if any.
9333          */
9334         if (!ecpri_m->hdr.common.u32)
9335                 return;
9336         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9337         /* Need to take the whole DW as the mask to fill the entry. */
9338         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9339                             prog_sample_field_value_0);
9340         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9341                             prog_sample_field_value_0);
9342         /* Already big endian (network order) in the header. */
9343         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9344         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9345         /* Sample#0, used for matching type, offset 0. */
9346         MLX5_SET(fte_match_set_misc4, misc4_m,
9347                  prog_sample_field_id_0, samples[0]);
9348         /* It makes no sense to set the sample ID in the mask field. */
9349         MLX5_SET(fte_match_set_misc4, misc4_v,
9350                  prog_sample_field_id_0, samples[0]);
9351         /*
9352          * Checking if message body part needs to be matched.
9353          * Some wildcard rules only matching type field should be supported.
9354          */
9355         if (ecpri_m->hdr.dummy[0]) {
9356                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9357                 switch (common.type) {
9358                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9359                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9360                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9361                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9362                                             prog_sample_field_value_1);
9363                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9364                                             prog_sample_field_value_1);
9365                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9366                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9367                                             ecpri_m->hdr.dummy[0];
9368                         /* Sample#1, to match message body, offset 4. */
9369                         MLX5_SET(fte_match_set_misc4, misc4_m,
9370                                  prog_sample_field_id_1, samples[1]);
9371                         MLX5_SET(fte_match_set_misc4, misc4_v,
9372                                  prog_sample_field_id_1, samples[1]);
9373                         break;
9374                 default:
9375                         /* Others, do not match any sample ID. */
9376                         break;
9377                 }
9378         }
9379 }
9380
9381 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9382
9383 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9384         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9385                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9386
9387 /**
9388  * Calculate flow matcher enable bitmap.
9389  *
9390  * @param match_criteria
9391  *   Pointer to flow matcher criteria.
9392  *
9393  * @return
9394  *   Bitmap of enabled fields.
9395  */
9396 static uint8_t
9397 flow_dv_matcher_enable(uint32_t *match_criteria)
9398 {
9399         uint8_t match_criteria_enable;
9400
9401         match_criteria_enable =
9402                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9403                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9404         match_criteria_enable |=
9405                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9406                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9407         match_criteria_enable |=
9408                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9409                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9410         match_criteria_enable |=
9411                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9412                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9413         match_criteria_enable |=
9414                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9415                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9416         match_criteria_enable |=
9417                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9418                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9419         return match_criteria_enable;
9420 }
9421
9422 struct mlx5_hlist_entry *
9423 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9424 {
9425         struct mlx5_dev_ctx_shared *sh = list->ctx;
9426         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9427         struct rte_eth_dev *dev = ctx->dev;
9428         struct mlx5_flow_tbl_data_entry *tbl_data;
9429         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9430         struct rte_flow_error *error = ctx->error;
9431         union mlx5_flow_tbl_key key = { .v64 = key64 };
9432         struct mlx5_flow_tbl_resource *tbl;
9433         void *domain;
9434         uint32_t idx = 0;
9435         int ret;
9436
9437         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9438         if (!tbl_data) {
9439                 rte_flow_error_set(error, ENOMEM,
9440                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9441                                    NULL,
9442                                    "cannot allocate flow table data entry");
9443                 return NULL;
9444         }
9445         tbl_data->idx = idx;
9446         tbl_data->tunnel = tt_prm->tunnel;
9447         tbl_data->group_id = tt_prm->group_id;
9448         tbl_data->external = !!tt_prm->external;
9449         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9450         tbl_data->is_egress = !!key.is_egress;
9451         tbl_data->is_transfer = !!key.is_fdb;
9452         tbl_data->dummy = !!key.dummy;
9453         tbl_data->level = key.level;
9454         tbl_data->id = key.id;
9455         tbl = &tbl_data->tbl;
9456         if (key.dummy)
9457                 return &tbl_data->entry;
9458         if (key.is_fdb)
9459                 domain = sh->fdb_domain;
9460         else if (key.is_egress)
9461                 domain = sh->tx_domain;
9462         else
9463                 domain = sh->rx_domain;
9464         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9465         if (ret) {
9466                 rte_flow_error_set(error, ENOMEM,
9467                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9468                                    NULL, "cannot create flow table object");
9469                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9470                 return NULL;
9471         }
9472         if (key.level != 0) {
9473                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9474                                         (tbl->obj, &tbl_data->jump.action);
9475                 if (ret) {
9476                         rte_flow_error_set(error, ENOMEM,
9477                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9478                                            NULL,
9479                                            "cannot create flow jump action");
9480                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9481                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9482                         return NULL;
9483                 }
9484         }
9485         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9486               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9487               key.level, key.id);
9488         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9489                              flow_dv_matcher_create_cb,
9490                              flow_dv_matcher_match_cb,
9491                              flow_dv_matcher_remove_cb);
9492         return &tbl_data->entry;
9493 }
9494
9495 int
9496 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9497                      struct mlx5_hlist_entry *entry, uint64_t key64,
9498                      void *cb_ctx __rte_unused)
9499 {
9500         struct mlx5_flow_tbl_data_entry *tbl_data =
9501                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9502         union mlx5_flow_tbl_key key = { .v64 = key64 };
9503
9504         return tbl_data->level != key.level ||
9505                tbl_data->id != key.id ||
9506                tbl_data->dummy != key.dummy ||
9507                tbl_data->is_transfer != !!key.is_fdb ||
9508                tbl_data->is_egress != !!key.is_egress;
9509 }
9510
9511 /**
9512  * Get a flow table.
9513  *
9514  * @param[in, out] dev
9515  *   Pointer to rte_eth_dev structure.
9516  * @param[in] table_level
9517  *   Table level to use.
9518  * @param[in] egress
9519  *   Direction of the table.
9520  * @param[in] transfer
9521  *   E-Switch or NIC flow.
9522  * @param[in] dummy
9523  *   Dummy entry for dv API.
9524  * @param[in] table_id
9525  *   Table id to use.
9526  * @param[out] error
9527  *   pointer to error structure.
9528  *
9529  * @return
9530  *   Returns tables resource based on the index, NULL in case of failed.
9531  */
9532 struct mlx5_flow_tbl_resource *
9533 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9534                          uint32_t table_level, uint8_t egress,
9535                          uint8_t transfer,
9536                          bool external,
9537                          const struct mlx5_flow_tunnel *tunnel,
9538                          uint32_t group_id, uint8_t dummy,
9539                          uint32_t table_id,
9540                          struct rte_flow_error *error)
9541 {
9542         struct mlx5_priv *priv = dev->data->dev_private;
9543         union mlx5_flow_tbl_key table_key = {
9544                 {
9545                         .level = table_level,
9546                         .id = table_id,
9547                         .reserved = 0,
9548                         .dummy = !!dummy,
9549                         .is_fdb = !!transfer,
9550                         .is_egress = !!egress,
9551                 }
9552         };
9553         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9554                 .tunnel = tunnel,
9555                 .group_id = group_id,
9556                 .external = external,
9557         };
9558         struct mlx5_flow_cb_ctx ctx = {
9559                 .dev = dev,
9560                 .error = error,
9561                 .data = &tt_prm,
9562         };
9563         struct mlx5_hlist_entry *entry;
9564         struct mlx5_flow_tbl_data_entry *tbl_data;
9565
9566         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9567         if (!entry) {
9568                 rte_flow_error_set(error, ENOMEM,
9569                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9570                                    "cannot get table");
9571                 return NULL;
9572         }
9573         DRV_LOG(DEBUG, "table_level %u table_id %u "
9574                 "tunnel %u group %u registered.",
9575                 table_level, table_id,
9576                 tunnel ? tunnel->tunnel_id : 0, group_id);
9577         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9578         return &tbl_data->tbl;
9579 }
9580
9581 void
9582 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
9583                       struct mlx5_hlist_entry *entry)
9584 {
9585         struct mlx5_dev_ctx_shared *sh = list->ctx;
9586         struct mlx5_flow_tbl_data_entry *tbl_data =
9587                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9588
9589         MLX5_ASSERT(entry && sh);
9590         if (tbl_data->jump.action)
9591                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
9592         if (tbl_data->tbl.obj)
9593                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
9594         if (tbl_data->tunnel_offload && tbl_data->external) {
9595                 struct mlx5_hlist_entry *he;
9596                 struct mlx5_hlist *tunnel_grp_hash;
9597                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
9598                 union tunnel_tbl_key tunnel_key = {
9599                         .tunnel_id = tbl_data->tunnel ?
9600                                         tbl_data->tunnel->tunnel_id : 0,
9601                         .group = tbl_data->group_id
9602                 };
9603                 uint32_t table_level = tbl_data->level;
9604
9605                 tunnel_grp_hash = tbl_data->tunnel ?
9606                                         tbl_data->tunnel->groups :
9607                                         thub->groups;
9608                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
9609                 if (he)
9610                         mlx5_hlist_unregister(tunnel_grp_hash, he);
9611                 DRV_LOG(DEBUG,
9612                         "table_level %u id %u tunnel %u group %u released.",
9613                         table_level,
9614                         tbl_data->id,
9615                         tbl_data->tunnel ?
9616                         tbl_data->tunnel->tunnel_id : 0,
9617                         tbl_data->group_id);
9618         }
9619         mlx5_cache_list_destroy(&tbl_data->matchers);
9620         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
9621 }
9622
9623 /**
9624  * Release a flow table.
9625  *
9626  * @param[in] sh
9627  *   Pointer to device shared structure.
9628  * @param[in] tbl
9629  *   Table resource to be released.
9630  *
9631  * @return
9632  *   Returns 0 if table was released, else return 1;
9633  */
9634 static int
9635 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
9636                              struct mlx5_flow_tbl_resource *tbl)
9637 {
9638         struct mlx5_flow_tbl_data_entry *tbl_data =
9639                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9640
9641         if (!tbl)
9642                 return 0;
9643         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
9644 }
9645
9646 int
9647 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
9648                          struct mlx5_cache_entry *entry, void *cb_ctx)
9649 {
9650         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9651         struct mlx5_flow_dv_matcher *ref = ctx->data;
9652         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
9653                                                         entry);
9654
9655         return cur->crc != ref->crc ||
9656                cur->priority != ref->priority ||
9657                memcmp((const void *)cur->mask.buf,
9658                       (const void *)ref->mask.buf, ref->mask.size);
9659 }
9660
9661 struct mlx5_cache_entry *
9662 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
9663                           struct mlx5_cache_entry *entry __rte_unused,
9664                           void *cb_ctx)
9665 {
9666         struct mlx5_dev_ctx_shared *sh = list->ctx;
9667         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9668         struct mlx5_flow_dv_matcher *ref = ctx->data;
9669         struct mlx5_flow_dv_matcher *cache;
9670         struct mlx5dv_flow_matcher_attr dv_attr = {
9671                 .type = IBV_FLOW_ATTR_NORMAL,
9672                 .match_mask = (void *)&ref->mask,
9673         };
9674         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
9675                                                             typeof(*tbl), tbl);
9676         int ret;
9677
9678         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
9679         if (!cache) {
9680                 rte_flow_error_set(ctx->error, ENOMEM,
9681                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9682                                    "cannot create matcher");
9683                 return NULL;
9684         }
9685         *cache = *ref;
9686         dv_attr.match_criteria_enable =
9687                 flow_dv_matcher_enable(cache->mask.buf);
9688         dv_attr.priority = ref->priority;
9689         if (tbl->is_egress)
9690                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
9691         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
9692                                                &cache->matcher_object);
9693         if (ret) {
9694                 mlx5_free(cache);
9695                 rte_flow_error_set(ctx->error, ENOMEM,
9696                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9697                                    "cannot create matcher");
9698                 return NULL;
9699         }
9700         return &cache->entry;
9701 }
9702
9703 /**
9704  * Register the flow matcher.
9705  *
9706  * @param[in, out] dev
9707  *   Pointer to rte_eth_dev structure.
9708  * @param[in, out] matcher
9709  *   Pointer to flow matcher.
9710  * @param[in, out] key
9711  *   Pointer to flow table key.
9712  * @parm[in, out] dev_flow
9713  *   Pointer to the dev_flow.
9714  * @param[out] error
9715  *   pointer to error structure.
9716  *
9717  * @return
9718  *   0 on success otherwise -errno and errno is set.
9719  */
9720 static int
9721 flow_dv_matcher_register(struct rte_eth_dev *dev,
9722                          struct mlx5_flow_dv_matcher *ref,
9723                          union mlx5_flow_tbl_key *key,
9724                          struct mlx5_flow *dev_flow,
9725                          const struct mlx5_flow_tunnel *tunnel,
9726                          uint32_t group_id,
9727                          struct rte_flow_error *error)
9728 {
9729         struct mlx5_cache_entry *entry;
9730         struct mlx5_flow_dv_matcher *cache;
9731         struct mlx5_flow_tbl_resource *tbl;
9732         struct mlx5_flow_tbl_data_entry *tbl_data;
9733         struct mlx5_flow_cb_ctx ctx = {
9734                 .error = error,
9735                 .data = ref,
9736         };
9737
9738         /**
9739          * tunnel offload API requires this registration for cases when
9740          * tunnel match rule was inserted before tunnel set rule.
9741          */
9742         tbl = flow_dv_tbl_resource_get(dev, key->level,
9743                                        key->is_egress, key->is_fdb,
9744                                        dev_flow->external, tunnel,
9745                                        group_id, 0, key->id, error);
9746         if (!tbl)
9747                 return -rte_errno;      /* No need to refill the error info */
9748         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9749         ref->tbl = tbl;
9750         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
9751         if (!entry) {
9752                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9753                 return rte_flow_error_set(error, ENOMEM,
9754                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9755                                           "cannot allocate ref memory");
9756         }
9757         cache = container_of(entry, typeof(*cache), entry);
9758         dev_flow->handle->dvh.matcher = cache;
9759         return 0;
9760 }
9761
9762 struct mlx5_hlist_entry *
9763 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
9764 {
9765         struct mlx5_dev_ctx_shared *sh = list->ctx;
9766         struct rte_flow_error *error = ctx;
9767         struct mlx5_flow_dv_tag_resource *entry;
9768         uint32_t idx = 0;
9769         int ret;
9770
9771         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
9772         if (!entry) {
9773                 rte_flow_error_set(error, ENOMEM,
9774                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9775                                    "cannot allocate resource memory");
9776                 return NULL;
9777         }
9778         entry->idx = idx;
9779         entry->tag_id = key;
9780         ret = mlx5_flow_os_create_flow_action_tag(key,
9781                                                   &entry->action);
9782         if (ret) {
9783                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
9784                 rte_flow_error_set(error, ENOMEM,
9785                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9786                                    NULL, "cannot create action");
9787                 return NULL;
9788         }
9789         return &entry->entry;
9790 }
9791
9792 int
9793 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
9794                      struct mlx5_hlist_entry *entry, uint64_t key,
9795                      void *cb_ctx __rte_unused)
9796 {
9797         struct mlx5_flow_dv_tag_resource *tag =
9798                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9799
9800         return key != tag->tag_id;
9801 }
9802
9803 /**
9804  * Find existing tag resource or create and register a new one.
9805  *
9806  * @param dev[in, out]
9807  *   Pointer to rte_eth_dev structure.
9808  * @param[in, out] tag_be24
9809  *   Tag value in big endian then R-shift 8.
9810  * @parm[in, out] dev_flow
9811  *   Pointer to the dev_flow.
9812  * @param[out] error
9813  *   pointer to error structure.
9814  *
9815  * @return
9816  *   0 on success otherwise -errno and errno is set.
9817  */
9818 static int
9819 flow_dv_tag_resource_register
9820                         (struct rte_eth_dev *dev,
9821                          uint32_t tag_be24,
9822                          struct mlx5_flow *dev_flow,
9823                          struct rte_flow_error *error)
9824 {
9825         struct mlx5_priv *priv = dev->data->dev_private;
9826         struct mlx5_flow_dv_tag_resource *cache_resource;
9827         struct mlx5_hlist_entry *entry;
9828
9829         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
9830         if (entry) {
9831                 cache_resource = container_of
9832                         (entry, struct mlx5_flow_dv_tag_resource, entry);
9833                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
9834                 dev_flow->dv.tag_resource = cache_resource;
9835                 return 0;
9836         }
9837         return -rte_errno;
9838 }
9839
9840 void
9841 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
9842                       struct mlx5_hlist_entry *entry)
9843 {
9844         struct mlx5_dev_ctx_shared *sh = list->ctx;
9845         struct mlx5_flow_dv_tag_resource *tag =
9846                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9847
9848         MLX5_ASSERT(tag && sh && tag->action);
9849         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
9850         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
9851         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
9852 }
9853
9854 /**
9855  * Release the tag.
9856  *
9857  * @param dev
9858  *   Pointer to Ethernet device.
9859  * @param tag_idx
9860  *   Tag index.
9861  *
9862  * @return
9863  *   1 while a reference on it exists, 0 when freed.
9864  */
9865 static int
9866 flow_dv_tag_release(struct rte_eth_dev *dev,
9867                     uint32_t tag_idx)
9868 {
9869         struct mlx5_priv *priv = dev->data->dev_private;
9870         struct mlx5_flow_dv_tag_resource *tag;
9871
9872         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
9873         if (!tag)
9874                 return 0;
9875         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
9876                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
9877         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
9878 }
9879
9880 /**
9881  * Translate port ID action to vport.
9882  *
9883  * @param[in] dev
9884  *   Pointer to rte_eth_dev structure.
9885  * @param[in] action
9886  *   Pointer to the port ID action.
9887  * @param[out] dst_port_id
9888  *   The target port ID.
9889  * @param[out] error
9890  *   Pointer to the error structure.
9891  *
9892  * @return
9893  *   0 on success, a negative errno value otherwise and rte_errno is set.
9894  */
9895 static int
9896 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
9897                                  const struct rte_flow_action *action,
9898                                  uint32_t *dst_port_id,
9899                                  struct rte_flow_error *error)
9900 {
9901         uint32_t port;
9902         struct mlx5_priv *priv;
9903         const struct rte_flow_action_port_id *conf =
9904                         (const struct rte_flow_action_port_id *)action->conf;
9905
9906         port = conf->original ? dev->data->port_id : conf->id;
9907         priv = mlx5_port_to_eswitch_info(port, false);
9908         if (!priv)
9909                 return rte_flow_error_set(error, -rte_errno,
9910                                           RTE_FLOW_ERROR_TYPE_ACTION,
9911                                           NULL,
9912                                           "No eswitch info was found for port");
9913 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
9914         /*
9915          * This parameter is transferred to
9916          * mlx5dv_dr_action_create_dest_ib_port().
9917          */
9918         *dst_port_id = priv->dev_port;
9919 #else
9920         /*
9921          * Legacy mode, no LAG configurations is supported.
9922          * This parameter is transferred to
9923          * mlx5dv_dr_action_create_dest_vport().
9924          */
9925         *dst_port_id = priv->vport_id;
9926 #endif
9927         return 0;
9928 }
9929
9930 /**
9931  * Create a counter with aging configuration.
9932  *
9933  * @param[in] dev
9934  *   Pointer to rte_eth_dev structure.
9935  * @param[out] count
9936  *   Pointer to the counter action configuration.
9937  * @param[in] age
9938  *   Pointer to the aging action configuration.
9939  *
9940  * @return
9941  *   Index to flow counter on success, 0 otherwise.
9942  */
9943 static uint32_t
9944 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
9945                                 struct mlx5_flow *dev_flow,
9946                                 const struct rte_flow_action_count *count,
9947                                 const struct rte_flow_action_age *age)
9948 {
9949         uint32_t counter;
9950         struct mlx5_age_param *age_param;
9951
9952         if (count && count->shared)
9953                 counter = flow_dv_counter_get_shared(dev, count->id);
9954         else
9955                 counter = flow_dv_counter_alloc(dev, !!age);
9956         if (!counter || age == NULL)
9957                 return counter;
9958         age_param  = flow_dv_counter_idx_get_age(dev, counter);
9959         age_param->context = age->context ? age->context :
9960                 (void *)(uintptr_t)(dev_flow->flow_idx);
9961         age_param->timeout = age->timeout;
9962         age_param->port_id = dev->data->port_id;
9963         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
9964         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
9965         return counter;
9966 }
9967
9968 /**
9969  * Add Tx queue matcher
9970  *
9971  * @param[in] dev
9972  *   Pointer to the dev struct.
9973  * @param[in, out] matcher
9974  *   Flow matcher.
9975  * @param[in, out] key
9976  *   Flow matcher value.
9977  * @param[in] item
9978  *   Flow pattern to translate.
9979  * @param[in] inner
9980  *   Item is inner pattern.
9981  */
9982 static void
9983 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
9984                                 void *matcher, void *key,
9985                                 const struct rte_flow_item *item)
9986 {
9987         const struct mlx5_rte_flow_item_tx_queue *queue_m;
9988         const struct mlx5_rte_flow_item_tx_queue *queue_v;
9989         void *misc_m =
9990                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9991         void *misc_v =
9992                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9993         struct mlx5_txq_ctrl *txq;
9994         uint32_t queue;
9995
9996
9997         queue_m = (const void *)item->mask;
9998         if (!queue_m)
9999                 return;
10000         queue_v = (const void *)item->spec;
10001         if (!queue_v)
10002                 return;
10003         txq = mlx5_txq_get(dev, queue_v->queue);
10004         if (!txq)
10005                 return;
10006         queue = txq->obj->sq->id;
10007         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10008         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10009                  queue & queue_m->queue);
10010         mlx5_txq_release(dev, queue_v->queue);
10011 }
10012
10013 /**
10014  * Set the hash fields according to the @p flow information.
10015  *
10016  * @param[in] dev_flow
10017  *   Pointer to the mlx5_flow.
10018  * @param[in] rss_desc
10019  *   Pointer to the mlx5_flow_rss_desc.
10020  */
10021 static void
10022 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10023                        struct mlx5_flow_rss_desc *rss_desc)
10024 {
10025         uint64_t items = dev_flow->handle->layers;
10026         int rss_inner = 0;
10027         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10028
10029         dev_flow->hash_fields = 0;
10030 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10031         if (rss_desc->level >= 2) {
10032                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10033                 rss_inner = 1;
10034         }
10035 #endif
10036         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10037             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10038                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10039                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10040                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10041                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10042                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10043                         else
10044                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10045                 }
10046         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10047                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10048                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10049                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10050                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10051                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10052                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10053                         else
10054                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10055                 }
10056         }
10057         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10058             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10059                 if (rss_types & ETH_RSS_UDP) {
10060                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10061                                 dev_flow->hash_fields |=
10062                                                 IBV_RX_HASH_SRC_PORT_UDP;
10063                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10064                                 dev_flow->hash_fields |=
10065                                                 IBV_RX_HASH_DST_PORT_UDP;
10066                         else
10067                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10068                 }
10069         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10070                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10071                 if (rss_types & ETH_RSS_TCP) {
10072                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10073                                 dev_flow->hash_fields |=
10074                                                 IBV_RX_HASH_SRC_PORT_TCP;
10075                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10076                                 dev_flow->hash_fields |=
10077                                                 IBV_RX_HASH_DST_PORT_TCP;
10078                         else
10079                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10080                 }
10081         }
10082 }
10083
10084 /**
10085  * Prepare an Rx Hash queue.
10086  *
10087  * @param dev
10088  *   Pointer to Ethernet device.
10089  * @param[in] dev_flow
10090  *   Pointer to the mlx5_flow.
10091  * @param[in] rss_desc
10092  *   Pointer to the mlx5_flow_rss_desc.
10093  * @param[out] hrxq_idx
10094  *   Hash Rx queue index.
10095  *
10096  * @return
10097  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10098  */
10099 static struct mlx5_hrxq *
10100 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10101                      struct mlx5_flow *dev_flow,
10102                      struct mlx5_flow_rss_desc *rss_desc,
10103                      uint32_t *hrxq_idx)
10104 {
10105         struct mlx5_priv *priv = dev->data->dev_private;
10106         struct mlx5_flow_handle *dh = dev_flow->handle;
10107         struct mlx5_hrxq *hrxq;
10108
10109         MLX5_ASSERT(rss_desc->queue_num);
10110         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10111         rss_desc->hash_fields = dev_flow->hash_fields;
10112         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10113         rss_desc->shared_rss = 0;
10114         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10115         if (!*hrxq_idx)
10116                 return NULL;
10117         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10118                               *hrxq_idx);
10119         return hrxq;
10120 }
10121
10122 /**
10123  * Release sample sub action resource.
10124  *
10125  * @param[in, out] dev
10126  *   Pointer to rte_eth_dev structure.
10127  * @param[in] act_res
10128  *   Pointer to sample sub action resource.
10129  */
10130 static void
10131 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10132                                    struct mlx5_flow_sub_actions_idx *act_res)
10133 {
10134         if (act_res->rix_hrxq) {
10135                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10136                 act_res->rix_hrxq = 0;
10137         }
10138         if (act_res->rix_encap_decap) {
10139                 flow_dv_encap_decap_resource_release(dev,
10140                                                      act_res->rix_encap_decap);
10141                 act_res->rix_encap_decap = 0;
10142         }
10143         if (act_res->rix_port_id_action) {
10144                 flow_dv_port_id_action_resource_release(dev,
10145                                                 act_res->rix_port_id_action);
10146                 act_res->rix_port_id_action = 0;
10147         }
10148         if (act_res->rix_tag) {
10149                 flow_dv_tag_release(dev, act_res->rix_tag);
10150                 act_res->rix_tag = 0;
10151         }
10152         if (act_res->rix_jump) {
10153                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10154                 act_res->rix_jump = 0;
10155         }
10156 }
10157
10158 int
10159 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10160                         struct mlx5_cache_entry *entry, void *cb_ctx)
10161 {
10162         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10163         struct rte_eth_dev *dev = ctx->dev;
10164         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10165         struct mlx5_flow_dv_sample_resource *cache_resource =
10166                         container_of(entry, typeof(*cache_resource), entry);
10167
10168         if (resource->ratio == cache_resource->ratio &&
10169             resource->ft_type == cache_resource->ft_type &&
10170             resource->ft_id == cache_resource->ft_id &&
10171             resource->set_action == cache_resource->set_action &&
10172             !memcmp((void *)&resource->sample_act,
10173                     (void *)&cache_resource->sample_act,
10174                     sizeof(struct mlx5_flow_sub_actions_list))) {
10175                 /*
10176                  * Existing sample action should release the prepared
10177                  * sub-actions reference counter.
10178                  */
10179                 flow_dv_sample_sub_actions_release(dev,
10180                                                 &resource->sample_idx);
10181                 return 0;
10182         }
10183         return 1;
10184 }
10185
10186 struct mlx5_cache_entry *
10187 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10188                          struct mlx5_cache_entry *entry __rte_unused,
10189                          void *cb_ctx)
10190 {
10191         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10192         struct rte_eth_dev *dev = ctx->dev;
10193         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10194         void **sample_dv_actions = resource->sub_actions;
10195         struct mlx5_flow_dv_sample_resource *cache_resource;
10196         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10197         struct mlx5_priv *priv = dev->data->dev_private;
10198         struct mlx5_dev_ctx_shared *sh = priv->sh;
10199         struct mlx5_flow_tbl_resource *tbl;
10200         uint32_t idx = 0;
10201         const uint32_t next_ft_step = 1;
10202         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10203         uint8_t is_egress = 0;
10204         uint8_t is_transfer = 0;
10205         struct rte_flow_error *error = ctx->error;
10206
10207         /* Register new sample resource. */
10208         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10209         if (!cache_resource) {
10210                 rte_flow_error_set(error, ENOMEM,
10211                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10212                                           NULL,
10213                                           "cannot allocate resource memory");
10214                 return NULL;
10215         }
10216         *cache_resource = *resource;
10217         /* Create normal path table level */
10218         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10219                 is_transfer = 1;
10220         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10221                 is_egress = 1;
10222         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10223                                         is_egress, is_transfer,
10224                                         true, NULL, 0, 0, 0, error);
10225         if (!tbl) {
10226                 rte_flow_error_set(error, ENOMEM,
10227                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10228                                           NULL,
10229                                           "fail to create normal path table "
10230                                           "for sample");
10231                 goto error;
10232         }
10233         cache_resource->normal_path_tbl = tbl;
10234         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10235                 if (!sh->default_miss_action) {
10236                         rte_flow_error_set(error, ENOMEM,
10237                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10238                                                 NULL,
10239                                                 "default miss action was not "
10240                                                 "created");
10241                         goto error;
10242                 }
10243                 sample_dv_actions[resource->sample_act.actions_num++] =
10244                                                 sh->default_miss_action;
10245         }
10246         /* Create a DR sample action */
10247         sampler_attr.sample_ratio = cache_resource->ratio;
10248         sampler_attr.default_next_table = tbl->obj;
10249         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10250         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10251                                                         &sample_dv_actions[0];
10252         sampler_attr.action = cache_resource->set_action;
10253         if (mlx5_os_flow_dr_create_flow_action_sampler
10254                         (&sampler_attr, &cache_resource->verbs_action)) {
10255                 rte_flow_error_set(error, ENOMEM,
10256                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10257                                         NULL, "cannot create sample action");
10258                 goto error;
10259         }
10260         cache_resource->idx = idx;
10261         cache_resource->dev = dev;
10262         return &cache_resource->entry;
10263 error:
10264         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10265                 flow_dv_sample_sub_actions_release(dev,
10266                                                    &cache_resource->sample_idx);
10267         if (cache_resource->normal_path_tbl)
10268                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10269                                 cache_resource->normal_path_tbl);
10270         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10271         return NULL;
10272
10273 }
10274
10275 /**
10276  * Find existing sample resource or create and register a new one.
10277  *
10278  * @param[in, out] dev
10279  *   Pointer to rte_eth_dev structure.
10280  * @param[in] resource
10281  *   Pointer to sample resource.
10282  * @parm[in, out] dev_flow
10283  *   Pointer to the dev_flow.
10284  * @param[out] error
10285  *   pointer to error structure.
10286  *
10287  * @return
10288  *   0 on success otherwise -errno and errno is set.
10289  */
10290 static int
10291 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10292                          struct mlx5_flow_dv_sample_resource *resource,
10293                          struct mlx5_flow *dev_flow,
10294                          struct rte_flow_error *error)
10295 {
10296         struct mlx5_flow_dv_sample_resource *cache_resource;
10297         struct mlx5_cache_entry *entry;
10298         struct mlx5_priv *priv = dev->data->dev_private;
10299         struct mlx5_flow_cb_ctx ctx = {
10300                 .dev = dev,
10301                 .error = error,
10302                 .data = resource,
10303         };
10304
10305         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10306         if (!entry)
10307                 return -rte_errno;
10308         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10309         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10310         dev_flow->dv.sample_res = cache_resource;
10311         return 0;
10312 }
10313
10314 int
10315 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10316                             struct mlx5_cache_entry *entry, void *cb_ctx)
10317 {
10318         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10319         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10320         struct rte_eth_dev *dev = ctx->dev;
10321         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10322                         container_of(entry, typeof(*cache_resource), entry);
10323         uint32_t idx = 0;
10324
10325         if (resource->num_of_dest == cache_resource->num_of_dest &&
10326             resource->ft_type == cache_resource->ft_type &&
10327             !memcmp((void *)cache_resource->sample_act,
10328                     (void *)resource->sample_act,
10329                    (resource->num_of_dest *
10330                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10331                 /*
10332                  * Existing sample action should release the prepared
10333                  * sub-actions reference counter.
10334                  */
10335                 for (idx = 0; idx < resource->num_of_dest; idx++)
10336                         flow_dv_sample_sub_actions_release(dev,
10337                                         &resource->sample_idx[idx]);
10338                 return 0;
10339         }
10340         return 1;
10341 }
10342
10343 struct mlx5_cache_entry *
10344 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10345                          struct mlx5_cache_entry *entry __rte_unused,
10346                          void *cb_ctx)
10347 {
10348         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10349         struct rte_eth_dev *dev = ctx->dev;
10350         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10351         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10352         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10353         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10354         struct mlx5_priv *priv = dev->data->dev_private;
10355         struct mlx5_dev_ctx_shared *sh = priv->sh;
10356         struct mlx5_flow_sub_actions_list *sample_act;
10357         struct mlx5dv_dr_domain *domain;
10358         uint32_t idx = 0, res_idx = 0;
10359         struct rte_flow_error *error = ctx->error;
10360         uint64_t action_flags;
10361         int ret;
10362
10363         /* Register new destination array resource. */
10364         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10365                                             &res_idx);
10366         if (!cache_resource) {
10367                 rte_flow_error_set(error, ENOMEM,
10368                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10369                                           NULL,
10370                                           "cannot allocate resource memory");
10371                 return NULL;
10372         }
10373         *cache_resource = *resource;
10374         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10375                 domain = sh->fdb_domain;
10376         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10377                 domain = sh->rx_domain;
10378         else
10379                 domain = sh->tx_domain;
10380         for (idx = 0; idx < resource->num_of_dest; idx++) {
10381                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10382                                  mlx5_malloc(MLX5_MEM_ZERO,
10383                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10384                                  0, SOCKET_ID_ANY);
10385                 if (!dest_attr[idx]) {
10386                         rte_flow_error_set(error, ENOMEM,
10387                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10388                                            NULL,
10389                                            "cannot allocate resource memory");
10390                         goto error;
10391                 }
10392                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10393                 sample_act = &resource->sample_act[idx];
10394                 action_flags = sample_act->action_flags;
10395                 switch (action_flags) {
10396                 case MLX5_FLOW_ACTION_QUEUE:
10397                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10398                         break;
10399                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10400                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10401                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10402                         dest_attr[idx]->dest_reformat->reformat =
10403                                         sample_act->dr_encap_action;
10404                         dest_attr[idx]->dest_reformat->dest =
10405                                         sample_act->dr_port_id_action;
10406                         break;
10407                 case MLX5_FLOW_ACTION_PORT_ID:
10408                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10409                         break;
10410                 case MLX5_FLOW_ACTION_JUMP:
10411                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10412                         break;
10413                 default:
10414                         rte_flow_error_set(error, EINVAL,
10415                                            RTE_FLOW_ERROR_TYPE_ACTION,
10416                                            NULL,
10417                                            "unsupported actions type");
10418                         goto error;
10419                 }
10420         }
10421         /* create a dest array actioin */
10422         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10423                                                 (domain,
10424                                                  cache_resource->num_of_dest,
10425                                                  dest_attr,
10426                                                  &cache_resource->action);
10427         if (ret) {
10428                 rte_flow_error_set(error, ENOMEM,
10429                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10430                                    NULL,
10431                                    "cannot create destination array action");
10432                 goto error;
10433         }
10434         cache_resource->idx = res_idx;
10435         cache_resource->dev = dev;
10436         for (idx = 0; idx < resource->num_of_dest; idx++)
10437                 mlx5_free(dest_attr[idx]);
10438         return &cache_resource->entry;
10439 error:
10440         for (idx = 0; idx < resource->num_of_dest; idx++) {
10441                 flow_dv_sample_sub_actions_release(dev,
10442                                 &cache_resource->sample_idx[idx]);
10443                 if (dest_attr[idx])
10444                         mlx5_free(dest_attr[idx]);
10445         }
10446
10447         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10448         return NULL;
10449 }
10450
10451 /**
10452  * Find existing destination array resource or create and register a new one.
10453  *
10454  * @param[in, out] dev
10455  *   Pointer to rte_eth_dev structure.
10456  * @param[in] resource
10457  *   Pointer to destination array resource.
10458  * @parm[in, out] dev_flow
10459  *   Pointer to the dev_flow.
10460  * @param[out] error
10461  *   pointer to error structure.
10462  *
10463  * @return
10464  *   0 on success otherwise -errno and errno is set.
10465  */
10466 static int
10467 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10468                          struct mlx5_flow_dv_dest_array_resource *resource,
10469                          struct mlx5_flow *dev_flow,
10470                          struct rte_flow_error *error)
10471 {
10472         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10473         struct mlx5_priv *priv = dev->data->dev_private;
10474         struct mlx5_cache_entry *entry;
10475         struct mlx5_flow_cb_ctx ctx = {
10476                 .dev = dev,
10477                 .error = error,
10478                 .data = resource,
10479         };
10480
10481         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10482         if (!entry)
10483                 return -rte_errno;
10484         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10485         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10486         dev_flow->dv.dest_array_res = cache_resource;
10487         return 0;
10488 }
10489
10490 /**
10491  * Convert Sample action to DV specification.
10492  *
10493  * @param[in] dev
10494  *   Pointer to rte_eth_dev structure.
10495  * @param[in] action
10496  *   Pointer to sample action structure.
10497  * @param[in, out] dev_flow
10498  *   Pointer to the mlx5_flow.
10499  * @param[in] attr
10500  *   Pointer to the flow attributes.
10501  * @param[in, out] num_of_dest
10502  *   Pointer to the num of destination.
10503  * @param[in, out] sample_actions
10504  *   Pointer to sample actions list.
10505  * @param[in, out] res
10506  *   Pointer to sample resource.
10507  * @param[out] error
10508  *   Pointer to the error structure.
10509  *
10510  * @return
10511  *   0 on success, a negative errno value otherwise and rte_errno is set.
10512  */
10513 static int
10514 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10515                                 const struct rte_flow_action_sample *action,
10516                                 struct mlx5_flow *dev_flow,
10517                                 const struct rte_flow_attr *attr,
10518                                 uint32_t *num_of_dest,
10519                                 void **sample_actions,
10520                                 struct mlx5_flow_dv_sample_resource *res,
10521                                 struct rte_flow_error *error)
10522 {
10523         struct mlx5_priv *priv = dev->data->dev_private;
10524         const struct rte_flow_action *sub_actions;
10525         struct mlx5_flow_sub_actions_list *sample_act;
10526         struct mlx5_flow_sub_actions_idx *sample_idx;
10527         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10528         struct rte_flow *flow = dev_flow->flow;
10529         struct mlx5_flow_rss_desc *rss_desc;
10530         uint64_t action_flags = 0;
10531
10532         MLX5_ASSERT(wks);
10533         rss_desc = &wks->rss_desc;
10534         sample_act = &res->sample_act;
10535         sample_idx = &res->sample_idx;
10536         res->ratio = action->ratio;
10537         sub_actions = action->actions;
10538         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10539                 int type = sub_actions->type;
10540                 uint32_t pre_rix = 0;
10541                 void *pre_r;
10542                 switch (type) {
10543                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10544                 {
10545                         const struct rte_flow_action_queue *queue;
10546                         struct mlx5_hrxq *hrxq;
10547                         uint32_t hrxq_idx;
10548
10549                         queue = sub_actions->conf;
10550                         rss_desc->queue_num = 1;
10551                         rss_desc->queue[0] = queue->index;
10552                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10553                                                     rss_desc, &hrxq_idx);
10554                         if (!hrxq)
10555                                 return rte_flow_error_set
10556                                         (error, rte_errno,
10557                                          RTE_FLOW_ERROR_TYPE_ACTION,
10558                                          NULL,
10559                                          "cannot create fate queue");
10560                         sample_act->dr_queue_action = hrxq->action;
10561                         sample_idx->rix_hrxq = hrxq_idx;
10562                         sample_actions[sample_act->actions_num++] =
10563                                                 hrxq->action;
10564                         (*num_of_dest)++;
10565                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10566                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10567                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10568                         dev_flow->handle->fate_action =
10569                                         MLX5_FLOW_FATE_QUEUE;
10570                         break;
10571                 }
10572                 case RTE_FLOW_ACTION_TYPE_RSS:
10573                 {
10574                         struct mlx5_hrxq *hrxq;
10575                         uint32_t hrxq_idx;
10576                         const struct rte_flow_action_rss *rss;
10577                         const uint8_t *rss_key;
10578
10579                         rss = sub_actions->conf;
10580                         memcpy(rss_desc->queue, rss->queue,
10581                                rss->queue_num * sizeof(uint16_t));
10582                         rss_desc->queue_num = rss->queue_num;
10583                         /* NULL RSS key indicates default RSS key. */
10584                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10585                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10586                         /*
10587                          * rss->level and rss.types should be set in advance
10588                          * when expanding items for RSS.
10589                          */
10590                         flow_dv_hashfields_set(dev_flow, rss_desc);
10591                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10592                                                     rss_desc, &hrxq_idx);
10593                         if (!hrxq)
10594                                 return rte_flow_error_set
10595                                         (error, rte_errno,
10596                                          RTE_FLOW_ERROR_TYPE_ACTION,
10597                                          NULL,
10598                                          "cannot create fate queue");
10599                         sample_act->dr_queue_action = hrxq->action;
10600                         sample_idx->rix_hrxq = hrxq_idx;
10601                         sample_actions[sample_act->actions_num++] =
10602                                                 hrxq->action;
10603                         (*num_of_dest)++;
10604                         action_flags |= MLX5_FLOW_ACTION_RSS;
10605                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10606                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10607                         dev_flow->handle->fate_action =
10608                                         MLX5_FLOW_FATE_QUEUE;
10609                         break;
10610                 }
10611                 case RTE_FLOW_ACTION_TYPE_MARK:
10612                 {
10613                         uint32_t tag_be = mlx5_flow_mark_set
10614                                 (((const struct rte_flow_action_mark *)
10615                                 (sub_actions->conf))->id);
10616
10617                         dev_flow->handle->mark = 1;
10618                         pre_rix = dev_flow->handle->dvh.rix_tag;
10619                         /* Save the mark resource before sample */
10620                         pre_r = dev_flow->dv.tag_resource;
10621                         if (flow_dv_tag_resource_register(dev, tag_be,
10622                                                   dev_flow, error))
10623                                 return -rte_errno;
10624                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10625                         sample_act->dr_tag_action =
10626                                 dev_flow->dv.tag_resource->action;
10627                         sample_idx->rix_tag =
10628                                 dev_flow->handle->dvh.rix_tag;
10629                         sample_actions[sample_act->actions_num++] =
10630                                                 sample_act->dr_tag_action;
10631                         /* Recover the mark resource after sample */
10632                         dev_flow->dv.tag_resource = pre_r;
10633                         dev_flow->handle->dvh.rix_tag = pre_rix;
10634                         action_flags |= MLX5_FLOW_ACTION_MARK;
10635                         break;
10636                 }
10637                 case RTE_FLOW_ACTION_TYPE_COUNT:
10638                 {
10639                         if (!flow->counter) {
10640                                 flow->counter =
10641                                         flow_dv_translate_create_counter(dev,
10642                                                 dev_flow, sub_actions->conf,
10643                                                 0);
10644                                 if (!flow->counter)
10645                                         return rte_flow_error_set
10646                                                 (error, rte_errno,
10647                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10648                                                 NULL,
10649                                                 "cannot create counter"
10650                                                 " object.");
10651                         }
10652                         sample_act->dr_cnt_action =
10653                                   (flow_dv_counter_get_by_idx(dev,
10654                                   flow->counter, NULL))->action;
10655                         sample_actions[sample_act->actions_num++] =
10656                                                 sample_act->dr_cnt_action;
10657                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10658                         break;
10659                 }
10660                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10661                 {
10662                         struct mlx5_flow_dv_port_id_action_resource
10663                                         port_id_resource;
10664                         uint32_t port_id = 0;
10665
10666                         memset(&port_id_resource, 0, sizeof(port_id_resource));
10667                         /* Save the port id resource before sample */
10668                         pre_rix = dev_flow->handle->rix_port_id_action;
10669                         pre_r = dev_flow->dv.port_id_action;
10670                         if (flow_dv_translate_action_port_id(dev, sub_actions,
10671                                                              &port_id, error))
10672                                 return -rte_errno;
10673                         port_id_resource.port_id = port_id;
10674                         if (flow_dv_port_id_action_resource_register
10675                             (dev, &port_id_resource, dev_flow, error))
10676                                 return -rte_errno;
10677                         sample_act->dr_port_id_action =
10678                                 dev_flow->dv.port_id_action->action;
10679                         sample_idx->rix_port_id_action =
10680                                 dev_flow->handle->rix_port_id_action;
10681                         sample_actions[sample_act->actions_num++] =
10682                                                 sample_act->dr_port_id_action;
10683                         /* Recover the port id resource after sample */
10684                         dev_flow->dv.port_id_action = pre_r;
10685                         dev_flow->handle->rix_port_id_action = pre_rix;
10686                         (*num_of_dest)++;
10687                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10688                         break;
10689                 }
10690                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10691                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10692                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10693                         /* Save the encap resource before sample */
10694                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
10695                         pre_r = dev_flow->dv.encap_decap;
10696                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
10697                                                            dev_flow,
10698                                                            attr->transfer,
10699                                                            error))
10700                                 return -rte_errno;
10701                         sample_act->dr_encap_action =
10702                                 dev_flow->dv.encap_decap->action;
10703                         sample_idx->rix_encap_decap =
10704                                 dev_flow->handle->dvh.rix_encap_decap;
10705                         sample_actions[sample_act->actions_num++] =
10706                                                 sample_act->dr_encap_action;
10707                         /* Recover the encap resource after sample */
10708                         dev_flow->dv.encap_decap = pre_r;
10709                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
10710                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10711                         break;
10712                 default:
10713                         return rte_flow_error_set(error, EINVAL,
10714                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10715                                 NULL,
10716                                 "Not support for sampler action");
10717                 }
10718         }
10719         sample_act->action_flags = action_flags;
10720         res->ft_id = dev_flow->dv.group;
10721         if (attr->transfer) {
10722                 union {
10723                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
10724                         uint64_t set_action;
10725                 } action_ctx = { .set_action = 0 };
10726
10727                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10728                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
10729                          MLX5_MODIFICATION_TYPE_SET);
10730                 MLX5_SET(set_action_in, action_ctx.action_in, field,
10731                          MLX5_MODI_META_REG_C_0);
10732                 MLX5_SET(set_action_in, action_ctx.action_in, data,
10733                          priv->vport_meta_tag);
10734                 res->set_action = action_ctx.set_action;
10735         } else if (attr->ingress) {
10736                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10737         } else {
10738                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
10739         }
10740         return 0;
10741 }
10742
10743 /**
10744  * Convert Sample action to DV specification.
10745  *
10746  * @param[in] dev
10747  *   Pointer to rte_eth_dev structure.
10748  * @param[in, out] dev_flow
10749  *   Pointer to the mlx5_flow.
10750  * @param[in] num_of_dest
10751  *   The num of destination.
10752  * @param[in, out] res
10753  *   Pointer to sample resource.
10754  * @param[in, out] mdest_res
10755  *   Pointer to destination array resource.
10756  * @param[in] sample_actions
10757  *   Pointer to sample path actions list.
10758  * @param[in] action_flags
10759  *   Holds the actions detected until now.
10760  * @param[out] error
10761  *   Pointer to the error structure.
10762  *
10763  * @return
10764  *   0 on success, a negative errno value otherwise and rte_errno is set.
10765  */
10766 static int
10767 flow_dv_create_action_sample(struct rte_eth_dev *dev,
10768                              struct mlx5_flow *dev_flow,
10769                              uint32_t num_of_dest,
10770                              struct mlx5_flow_dv_sample_resource *res,
10771                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
10772                              void **sample_actions,
10773                              uint64_t action_flags,
10774                              struct rte_flow_error *error)
10775 {
10776         /* update normal path action resource into last index of array */
10777         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
10778         struct mlx5_flow_sub_actions_list *sample_act =
10779                                         &mdest_res->sample_act[dest_index];
10780         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10781         struct mlx5_flow_rss_desc *rss_desc;
10782         uint32_t normal_idx = 0;
10783         struct mlx5_hrxq *hrxq;
10784         uint32_t hrxq_idx;
10785
10786         MLX5_ASSERT(wks);
10787         rss_desc = &wks->rss_desc;
10788         if (num_of_dest > 1) {
10789                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
10790                         /* Handle QP action for mirroring */
10791                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10792                                                     rss_desc, &hrxq_idx);
10793                         if (!hrxq)
10794                                 return rte_flow_error_set
10795                                      (error, rte_errno,
10796                                       RTE_FLOW_ERROR_TYPE_ACTION,
10797                                       NULL,
10798                                       "cannot create rx queue");
10799                         normal_idx++;
10800                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
10801                         sample_act->dr_queue_action = hrxq->action;
10802                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10803                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10804                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10805                 }
10806                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
10807                         normal_idx++;
10808                         mdest_res->sample_idx[dest_index].rix_encap_decap =
10809                                 dev_flow->handle->dvh.rix_encap_decap;
10810                         sample_act->dr_encap_action =
10811                                 dev_flow->dv.encap_decap->action;
10812                         dev_flow->handle->dvh.rix_encap_decap = 0;
10813                 }
10814                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
10815                         normal_idx++;
10816                         mdest_res->sample_idx[dest_index].rix_port_id_action =
10817                                 dev_flow->handle->rix_port_id_action;
10818                         sample_act->dr_port_id_action =
10819                                 dev_flow->dv.port_id_action->action;
10820                         dev_flow->handle->rix_port_id_action = 0;
10821                 }
10822                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
10823                         normal_idx++;
10824                         mdest_res->sample_idx[dest_index].rix_jump =
10825                                 dev_flow->handle->rix_jump;
10826                         sample_act->dr_jump_action =
10827                                 dev_flow->dv.jump->action;
10828                         dev_flow->handle->rix_jump = 0;
10829                 }
10830                 sample_act->actions_num = normal_idx;
10831                 /* update sample action resource into first index of array */
10832                 mdest_res->ft_type = res->ft_type;
10833                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
10834                                 sizeof(struct mlx5_flow_sub_actions_idx));
10835                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
10836                                 sizeof(struct mlx5_flow_sub_actions_list));
10837                 mdest_res->num_of_dest = num_of_dest;
10838                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
10839                                                          dev_flow, error))
10840                         return rte_flow_error_set(error, EINVAL,
10841                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10842                                                   NULL, "can't create sample "
10843                                                   "action");
10844         } else {
10845                 res->sub_actions = sample_actions;
10846                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
10847                         return rte_flow_error_set(error, EINVAL,
10848                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10849                                                   NULL,
10850                                                   "can't create sample action");
10851         }
10852         return 0;
10853 }
10854
10855 /**
10856  * Remove an ASO age action from age actions list.
10857  *
10858  * @param[in] dev
10859  *   Pointer to the Ethernet device structure.
10860  * @param[in] age
10861  *   Pointer to the aso age action handler.
10862  */
10863 static void
10864 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
10865                                 struct mlx5_aso_age_action *age)
10866 {
10867         struct mlx5_age_info *age_info;
10868         struct mlx5_age_param *age_param = &age->age_params;
10869         struct mlx5_priv *priv = dev->data->dev_private;
10870         uint16_t expected = AGE_CANDIDATE;
10871
10872         age_info = GET_PORT_AGE_INFO(priv);
10873         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
10874                                          AGE_FREE, false, __ATOMIC_RELAXED,
10875                                          __ATOMIC_RELAXED)) {
10876                 /**
10877                  * We need the lock even it is age timeout,
10878                  * since age action may still in process.
10879                  */
10880                 rte_spinlock_lock(&age_info->aged_sl);
10881                 LIST_REMOVE(age, next);
10882                 rte_spinlock_unlock(&age_info->aged_sl);
10883                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
10884         }
10885 }
10886
10887 /**
10888  * Release an ASO age action.
10889  *
10890  * @param[in] dev
10891  *   Pointer to the Ethernet device structure.
10892  * @param[in] age_idx
10893  *   Index of ASO age action to release.
10894  * @param[in] flow
10895  *   True if the release operation is during flow destroy operation.
10896  *   False if the release operation is during action destroy operation.
10897  *
10898  * @return
10899  *   0 when age action was removed, otherwise the number of references.
10900  */
10901 static int
10902 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
10903 {
10904         struct mlx5_priv *priv = dev->data->dev_private;
10905         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10906         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
10907         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
10908
10909         if (!ret) {
10910                 flow_dv_aso_age_remove_from_age(dev, age);
10911                 rte_spinlock_lock(&mng->free_sl);
10912                 LIST_INSERT_HEAD(&mng->free, age, next);
10913                 rte_spinlock_unlock(&mng->free_sl);
10914         }
10915         return ret;
10916 }
10917
10918 /**
10919  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
10920  *
10921  * @param[in] dev
10922  *   Pointer to the Ethernet device structure.
10923  *
10924  * @return
10925  *   0 on success, otherwise negative errno value and rte_errno is set.
10926  */
10927 static int
10928 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
10929 {
10930         struct mlx5_priv *priv = dev->data->dev_private;
10931         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10932         void *old_pools = mng->pools;
10933         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
10934         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
10935         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
10936
10937         if (!pools) {
10938                 rte_errno = ENOMEM;
10939                 return -ENOMEM;
10940         }
10941         if (old_pools) {
10942                 memcpy(pools, old_pools,
10943                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
10944                 mlx5_free(old_pools);
10945         } else {
10946                 /* First ASO flow hit allocation - starting ASO data-path. */
10947                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
10948
10949                 if (ret) {
10950                         mlx5_free(pools);
10951                         return ret;
10952                 }
10953         }
10954         mng->n = resize;
10955         mng->pools = pools;
10956         return 0;
10957 }
10958
10959 /**
10960  * Create and initialize a new ASO aging pool.
10961  *
10962  * @param[in] dev
10963  *   Pointer to the Ethernet device structure.
10964  * @param[out] age_free
10965  *   Where to put the pointer of a new age action.
10966  *
10967  * @return
10968  *   The age actions pool pointer and @p age_free is set on success,
10969  *   NULL otherwise and rte_errno is set.
10970  */
10971 static struct mlx5_aso_age_pool *
10972 flow_dv_age_pool_create(struct rte_eth_dev *dev,
10973                         struct mlx5_aso_age_action **age_free)
10974 {
10975         struct mlx5_priv *priv = dev->data->dev_private;
10976         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10977         struct mlx5_aso_age_pool *pool = NULL;
10978         struct mlx5_devx_obj *obj = NULL;
10979         uint32_t i;
10980
10981         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
10982                                                     priv->sh->pdn);
10983         if (!obj) {
10984                 rte_errno = ENODATA;
10985                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
10986                 return NULL;
10987         }
10988         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10989         if (!pool) {
10990                 claim_zero(mlx5_devx_cmd_destroy(obj));
10991                 rte_errno = ENOMEM;
10992                 return NULL;
10993         }
10994         pool->flow_hit_aso_obj = obj;
10995         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
10996         rte_spinlock_lock(&mng->resize_sl);
10997         pool->index = mng->next;
10998         /* Resize pools array if there is no room for the new pool in it. */
10999         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11000                 claim_zero(mlx5_devx_cmd_destroy(obj));
11001                 mlx5_free(pool);
11002                 rte_spinlock_unlock(&mng->resize_sl);
11003                 return NULL;
11004         }
11005         mng->pools[pool->index] = pool;
11006         mng->next++;
11007         rte_spinlock_unlock(&mng->resize_sl);
11008         /* Assign the first action in the new pool, the rest go to free list. */
11009         *age_free = &pool->actions[0];
11010         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11011                 pool->actions[i].offset = i;
11012                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11013         }
11014         return pool;
11015 }
11016
11017 /**
11018  * Allocate a ASO aging bit.
11019  *
11020  * @param[in] dev
11021  *   Pointer to the Ethernet device structure.
11022  * @param[out] error
11023  *   Pointer to the error structure.
11024  *
11025  * @return
11026  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11027  */
11028 static uint32_t
11029 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11030 {
11031         struct mlx5_priv *priv = dev->data->dev_private;
11032         const struct mlx5_aso_age_pool *pool;
11033         struct mlx5_aso_age_action *age_free = NULL;
11034         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11035
11036         MLX5_ASSERT(mng);
11037         /* Try to get the next free age action bit. */
11038         rte_spinlock_lock(&mng->free_sl);
11039         age_free = LIST_FIRST(&mng->free);
11040         if (age_free) {
11041                 LIST_REMOVE(age_free, next);
11042         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11043                 rte_spinlock_unlock(&mng->free_sl);
11044                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11045                                    NULL, "failed to create ASO age pool");
11046                 return 0; /* 0 is an error. */
11047         }
11048         rte_spinlock_unlock(&mng->free_sl);
11049         pool = container_of
11050           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11051                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11052                                                                        actions);
11053         if (!age_free->dr_action) {
11054                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11055                                                  error);
11056
11057                 if (reg_c < 0) {
11058                         rte_flow_error_set(error, rte_errno,
11059                                            RTE_FLOW_ERROR_TYPE_ACTION,
11060                                            NULL, "failed to get reg_c "
11061                                            "for ASO flow hit");
11062                         return 0; /* 0 is an error. */
11063                 }
11064 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11065                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11066                                 (priv->sh->rx_domain,
11067                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11068                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11069                                  (reg_c - REG_C_0));
11070 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11071                 if (!age_free->dr_action) {
11072                         rte_errno = errno;
11073                         rte_spinlock_lock(&mng->free_sl);
11074                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11075                         rte_spinlock_unlock(&mng->free_sl);
11076                         rte_flow_error_set(error, rte_errno,
11077                                            RTE_FLOW_ERROR_TYPE_ACTION,
11078                                            NULL, "failed to create ASO "
11079                                            "flow hit action");
11080                         return 0; /* 0 is an error. */
11081                 }
11082         }
11083         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11084         return pool->index | ((age_free->offset + 1) << 16);
11085 }
11086
11087 /**
11088  * Create a age action using ASO mechanism.
11089  *
11090  * @param[in] dev
11091  *   Pointer to rte_eth_dev structure.
11092  * @param[in] age
11093  *   Pointer to the aging action configuration.
11094  * @param[out] error
11095  *   Pointer to the error structure.
11096  *
11097  * @return
11098  *   Index to flow counter on success, 0 otherwise.
11099  */
11100 static uint32_t
11101 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
11102                                  const struct rte_flow_action_age *age,
11103                                  struct rte_flow_error *error)
11104 {
11105         uint32_t age_idx = 0;
11106         struct mlx5_aso_age_action *aso_age;
11107
11108         age_idx = flow_dv_aso_age_alloc(dev, error);
11109         if (!age_idx)
11110                 return 0;
11111         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11112         aso_age->age_params.context = age->context;
11113         aso_age->age_params.timeout = age->timeout;
11114         aso_age->age_params.port_id = dev->data->port_id;
11115         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11116                          __ATOMIC_RELAXED);
11117         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11118                          __ATOMIC_RELAXED);
11119         return age_idx;
11120 }
11121
11122 /**
11123  * Fill the flow with DV spec, lock free
11124  * (mutex should be acquired by caller).
11125  *
11126  * @param[in] dev
11127  *   Pointer to rte_eth_dev structure.
11128  * @param[in, out] dev_flow
11129  *   Pointer to the sub flow.
11130  * @param[in] attr
11131  *   Pointer to the flow attributes.
11132  * @param[in] items
11133  *   Pointer to the list of items.
11134  * @param[in] actions
11135  *   Pointer to the list of actions.
11136  * @param[out] error
11137  *   Pointer to the error structure.
11138  *
11139  * @return
11140  *   0 on success, a negative errno value otherwise and rte_errno is set.
11141  */
11142 static int
11143 flow_dv_translate(struct rte_eth_dev *dev,
11144                   struct mlx5_flow *dev_flow,
11145                   const struct rte_flow_attr *attr,
11146                   const struct rte_flow_item items[],
11147                   const struct rte_flow_action actions[],
11148                   struct rte_flow_error *error)
11149 {
11150         struct mlx5_priv *priv = dev->data->dev_private;
11151         struct mlx5_dev_config *dev_conf = &priv->config;
11152         struct rte_flow *flow = dev_flow->flow;
11153         struct mlx5_flow_handle *handle = dev_flow->handle;
11154         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11155         struct mlx5_flow_rss_desc *rss_desc;
11156         uint64_t item_flags = 0;
11157         uint64_t last_item = 0;
11158         uint64_t action_flags = 0;
11159         struct mlx5_flow_dv_matcher matcher = {
11160                 .mask = {
11161                         .size = sizeof(matcher.mask.buf) -
11162                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
11163                 },
11164         };
11165         int actions_n = 0;
11166         bool actions_end = false;
11167         union {
11168                 struct mlx5_flow_dv_modify_hdr_resource res;
11169                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
11170                             sizeof(struct mlx5_modification_cmd) *
11171                             (MLX5_MAX_MODIFY_NUM + 1)];
11172         } mhdr_dummy;
11173         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
11174         const struct rte_flow_action_count *count = NULL;
11175         const struct rte_flow_action_age *age = NULL;
11176         union flow_dv_attr flow_attr = { .attr = 0 };
11177         uint32_t tag_be;
11178         union mlx5_flow_tbl_key tbl_key;
11179         uint32_t modify_action_position = UINT32_MAX;
11180         void *match_mask = matcher.mask.buf;
11181         void *match_value = dev_flow->dv.value.buf;
11182         uint8_t next_protocol = 0xff;
11183         struct rte_vlan_hdr vlan = { 0 };
11184         struct mlx5_flow_dv_dest_array_resource mdest_res;
11185         struct mlx5_flow_dv_sample_resource sample_res;
11186         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11187         const struct rte_flow_action_sample *sample = NULL;
11188         struct mlx5_flow_sub_actions_list *sample_act;
11189         uint32_t sample_act_pos = UINT32_MAX;
11190         uint32_t num_of_dest = 0;
11191         int tmp_actions_n = 0;
11192         uint32_t table;
11193         int ret = 0;
11194         const struct mlx5_flow_tunnel *tunnel;
11195         struct flow_grp_info grp_info = {
11196                 .external = !!dev_flow->external,
11197                 .transfer = !!attr->transfer,
11198                 .fdb_def_rule = !!priv->fdb_def_rule,
11199                 .skip_scale = dev_flow->skip_scale &
11200                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
11201         };
11202
11203         if (!wks)
11204                 return rte_flow_error_set(error, ENOMEM,
11205                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11206                                           NULL,
11207                                           "failed to push flow workspace");
11208         rss_desc = &wks->rss_desc;
11209         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
11210         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
11211         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11212                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11213         /* update normal path action resource into last index of array */
11214         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
11215         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
11216                  flow_items_to_tunnel(items) :
11217                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
11218                  flow_actions_to_tunnel(actions) :
11219                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
11220         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11221                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11222         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
11223                                 (dev, tunnel, attr, items, actions);
11224         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
11225                                        &grp_info, error);
11226         if (ret)
11227                 return ret;
11228         dev_flow->dv.group = table;
11229         if (attr->transfer)
11230                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11231         /* number of actions must be set to 0 in case of dirty stack. */
11232         mhdr_res->actions_num = 0;
11233         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
11234                 /*
11235                  * do not add decap action if match rule drops packet
11236                  * HW rejects rules with decap & drop
11237                  *
11238                  * if tunnel match rule was inserted before matching tunnel set
11239                  * rule flow table used in the match rule must be registered.
11240                  * current implementation handles that in the
11241                  * flow_dv_match_register() at the function end.
11242                  */
11243                 bool add_decap = true;
11244                 const struct rte_flow_action *ptr = actions;
11245
11246                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
11247                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
11248                                 add_decap = false;
11249                                 break;
11250                         }
11251                 }
11252                 if (add_decap) {
11253                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11254                                                            attr->transfer,
11255                                                            error))
11256                                 return -rte_errno;
11257                         dev_flow->dv.actions[actions_n++] =
11258                                         dev_flow->dv.encap_decap->action;
11259                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11260                 }
11261         }
11262         for (; !actions_end ; actions++) {
11263                 const struct rte_flow_action_queue *queue;
11264                 const struct rte_flow_action_rss *rss;
11265                 const struct rte_flow_action *action = actions;
11266                 const uint8_t *rss_key;
11267                 struct mlx5_flow_tbl_resource *tbl;
11268                 struct mlx5_aso_age_action *age_act;
11269                 uint32_t port_id = 0;
11270                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
11271                 int action_type = actions->type;
11272                 const struct rte_flow_action *found_action = NULL;
11273                 uint32_t jump_group = 0;
11274                 struct mlx5_flow_counter *cnt;
11275
11276                 if (!mlx5_flow_os_action_supported(action_type))
11277                         return rte_flow_error_set(error, ENOTSUP,
11278                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11279                                                   actions,
11280                                                   "action not supported");
11281                 switch (action_type) {
11282                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
11283                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
11284                         break;
11285                 case RTE_FLOW_ACTION_TYPE_VOID:
11286                         break;
11287                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11288                         if (flow_dv_translate_action_port_id(dev, action,
11289                                                              &port_id, error))
11290                                 return -rte_errno;
11291                         port_id_resource.port_id = port_id;
11292                         MLX5_ASSERT(!handle->rix_port_id_action);
11293                         if (flow_dv_port_id_action_resource_register
11294                             (dev, &port_id_resource, dev_flow, error))
11295                                 return -rte_errno;
11296                         dev_flow->dv.actions[actions_n++] =
11297                                         dev_flow->dv.port_id_action->action;
11298                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11299                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
11300                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11301                         num_of_dest++;
11302                         break;
11303                 case RTE_FLOW_ACTION_TYPE_FLAG:
11304                         action_flags |= MLX5_FLOW_ACTION_FLAG;
11305                         dev_flow->handle->mark = 1;
11306                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11307                                 struct rte_flow_action_mark mark = {
11308                                         .id = MLX5_FLOW_MARK_DEFAULT,
11309                                 };
11310
11311                                 if (flow_dv_convert_action_mark(dev, &mark,
11312                                                                 mhdr_res,
11313                                                                 error))
11314                                         return -rte_errno;
11315                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11316                                 break;
11317                         }
11318                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
11319                         /*
11320                          * Only one FLAG or MARK is supported per device flow
11321                          * right now. So the pointer to the tag resource must be
11322                          * zero before the register process.
11323                          */
11324                         MLX5_ASSERT(!handle->dvh.rix_tag);
11325                         if (flow_dv_tag_resource_register(dev, tag_be,
11326                                                           dev_flow, error))
11327                                 return -rte_errno;
11328                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11329                         dev_flow->dv.actions[actions_n++] =
11330                                         dev_flow->dv.tag_resource->action;
11331                         break;
11332                 case RTE_FLOW_ACTION_TYPE_MARK:
11333                         action_flags |= MLX5_FLOW_ACTION_MARK;
11334                         dev_flow->handle->mark = 1;
11335                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11336                                 const struct rte_flow_action_mark *mark =
11337                                         (const struct rte_flow_action_mark *)
11338                                                 actions->conf;
11339
11340                                 if (flow_dv_convert_action_mark(dev, mark,
11341                                                                 mhdr_res,
11342                                                                 error))
11343                                         return -rte_errno;
11344                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11345                                 break;
11346                         }
11347                         /* Fall-through */
11348                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
11349                         /* Legacy (non-extensive) MARK action. */
11350                         tag_be = mlx5_flow_mark_set
11351                               (((const struct rte_flow_action_mark *)
11352                                (actions->conf))->id);
11353                         MLX5_ASSERT(!handle->dvh.rix_tag);
11354                         if (flow_dv_tag_resource_register(dev, tag_be,
11355                                                           dev_flow, error))
11356                                 return -rte_errno;
11357                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11358                         dev_flow->dv.actions[actions_n++] =
11359                                         dev_flow->dv.tag_resource->action;
11360                         break;
11361                 case RTE_FLOW_ACTION_TYPE_SET_META:
11362                         if (flow_dv_convert_action_set_meta
11363                                 (dev, mhdr_res, attr,
11364                                  (const struct rte_flow_action_set_meta *)
11365                                   actions->conf, error))
11366                                 return -rte_errno;
11367                         action_flags |= MLX5_FLOW_ACTION_SET_META;
11368                         break;
11369                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
11370                         if (flow_dv_convert_action_set_tag
11371                                 (dev, mhdr_res,
11372                                  (const struct rte_flow_action_set_tag *)
11373                                   actions->conf, error))
11374                                 return -rte_errno;
11375                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11376                         break;
11377                 case RTE_FLOW_ACTION_TYPE_DROP:
11378                         action_flags |= MLX5_FLOW_ACTION_DROP;
11379                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
11380                         break;
11381                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11382                         queue = actions->conf;
11383                         rss_desc->queue_num = 1;
11384                         rss_desc->queue[0] = queue->index;
11385                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11386                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11387                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
11388                         num_of_dest++;
11389                         break;
11390                 case RTE_FLOW_ACTION_TYPE_RSS:
11391                         rss = actions->conf;
11392                         memcpy(rss_desc->queue, rss->queue,
11393                                rss->queue_num * sizeof(uint16_t));
11394                         rss_desc->queue_num = rss->queue_num;
11395                         /* NULL RSS key indicates default RSS key. */
11396                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11397                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11398                         /*
11399                          * rss->level and rss.types should be set in advance
11400                          * when expanding items for RSS.
11401                          */
11402                         action_flags |= MLX5_FLOW_ACTION_RSS;
11403                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
11404                                 MLX5_FLOW_FATE_SHARED_RSS :
11405                                 MLX5_FLOW_FATE_QUEUE;
11406                         break;
11407                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
11408                         flow->age = (uint32_t)(uintptr_t)(action->conf);
11409                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
11410                         __atomic_fetch_add(&age_act->refcnt, 1,
11411                                            __ATOMIC_RELAXED);
11412                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
11413                         action_flags |= MLX5_FLOW_ACTION_AGE;
11414                         break;
11415                 case RTE_FLOW_ACTION_TYPE_AGE:
11416                         if (priv->sh->flow_hit_aso_en && attr->group) {
11417                                 /*
11418                                  * Create one shared age action, to be used
11419                                  * by all sub-flows.
11420                                  */
11421                                 if (!flow->age) {
11422                                         flow->age =
11423                                                 flow_dv_translate_create_aso_age
11424                                                         (dev, action->conf,
11425                                                          error);
11426                                         if (!flow->age)
11427                                                 return rte_flow_error_set
11428                                                 (error, rte_errno,
11429                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11430                                                  NULL,
11431                                                  "can't create ASO age action");
11432                                 }
11433                                 dev_flow->dv.actions[actions_n++] =
11434                                           (flow_aso_age_get_by_idx
11435                                                 (dev, flow->age))->dr_action;
11436                                 action_flags |= MLX5_FLOW_ACTION_AGE;
11437                                 break;
11438                         }
11439                         /* Fall-through */
11440                 case RTE_FLOW_ACTION_TYPE_COUNT:
11441                         if (!dev_conf->devx) {
11442                                 return rte_flow_error_set
11443                                               (error, ENOTSUP,
11444                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11445                                                NULL,
11446                                                "count action not supported");
11447                         }
11448                         /* Save information first, will apply later. */
11449                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
11450                                 count = action->conf;
11451                         else
11452                                 age = action->conf;
11453                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11454                         break;
11455                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
11456                         cnt = flow_dv_counter_get_by_idx(dev,
11457                                 (uint32_t)(uintptr_t)action->conf, NULL);
11458                         MLX5_ASSERT(cnt != NULL);
11459                         dev_flow->dv.actions[actions_n++] = cnt->action;
11460                         break;
11461                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
11462                         dev_flow->dv.actions[actions_n++] =
11463                                                 priv->sh->pop_vlan_action;
11464                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
11465                         break;
11466                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
11467                         if (!(action_flags &
11468                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
11469                                 flow_dev_get_vlan_info_from_items(items, &vlan);
11470                         vlan.eth_proto = rte_be_to_cpu_16
11471                              ((((const struct rte_flow_action_of_push_vlan *)
11472                                                    actions->conf)->ethertype));
11473                         found_action = mlx5_flow_find_action
11474                                         (actions + 1,
11475                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
11476                         if (found_action)
11477                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11478                         found_action = mlx5_flow_find_action
11479                                         (actions + 1,
11480                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
11481                         if (found_action)
11482                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11483                         if (flow_dv_create_action_push_vlan
11484                                             (dev, attr, &vlan, dev_flow, error))
11485                                 return -rte_errno;
11486                         dev_flow->dv.actions[actions_n++] =
11487                                         dev_flow->dv.push_vlan_res->action;
11488                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
11489                         break;
11490                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
11491                         /* of_vlan_push action handled this action */
11492                         MLX5_ASSERT(action_flags &
11493                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
11494                         break;
11495                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
11496                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
11497                                 break;
11498                         flow_dev_get_vlan_info_from_items(items, &vlan);
11499                         mlx5_update_vlan_vid_pcp(actions, &vlan);
11500                         /* If no VLAN push - this is a modify header action */
11501                         if (flow_dv_convert_action_modify_vlan_vid
11502                                                 (mhdr_res, actions, error))
11503                                 return -rte_errno;
11504                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
11505                         break;
11506                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11507                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11508                         if (flow_dv_create_action_l2_encap(dev, actions,
11509                                                            dev_flow,
11510                                                            attr->transfer,
11511                                                            error))
11512                                 return -rte_errno;
11513                         dev_flow->dv.actions[actions_n++] =
11514                                         dev_flow->dv.encap_decap->action;
11515                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11516                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11517                                 sample_act->action_flags |=
11518                                                         MLX5_FLOW_ACTION_ENCAP;
11519                         break;
11520                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
11521                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
11522                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11523                                                            attr->transfer,
11524                                                            error))
11525                                 return -rte_errno;
11526                         dev_flow->dv.actions[actions_n++] =
11527                                         dev_flow->dv.encap_decap->action;
11528                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11529                         break;
11530                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11531                         /* Handle encap with preceding decap. */
11532                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
11533                                 if (flow_dv_create_action_raw_encap
11534                                         (dev, actions, dev_flow, attr, error))
11535                                         return -rte_errno;
11536                                 dev_flow->dv.actions[actions_n++] =
11537                                         dev_flow->dv.encap_decap->action;
11538                         } else {
11539                                 /* Handle encap without preceding decap. */
11540                                 if (flow_dv_create_action_l2_encap
11541                                     (dev, actions, dev_flow, attr->transfer,
11542                                      error))
11543                                         return -rte_errno;
11544                                 dev_flow->dv.actions[actions_n++] =
11545                                         dev_flow->dv.encap_decap->action;
11546                         }
11547                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11548                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11549                                 sample_act->action_flags |=
11550                                                         MLX5_FLOW_ACTION_ENCAP;
11551                         break;
11552                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11553                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
11554                                 ;
11555                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11556                                 if (flow_dv_create_action_l2_decap
11557                                     (dev, dev_flow, attr->transfer, error))
11558                                         return -rte_errno;
11559                                 dev_flow->dv.actions[actions_n++] =
11560                                         dev_flow->dv.encap_decap->action;
11561                         }
11562                         /* If decap is followed by encap, handle it at encap. */
11563                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11564                         break;
11565                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
11566                         dev_flow->dv.actions[actions_n++] =
11567                                 (void *)(uintptr_t)action->conf;
11568                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11569                         break;
11570                 case RTE_FLOW_ACTION_TYPE_JUMP:
11571                         jump_group = ((const struct rte_flow_action_jump *)
11572                                                         action->conf)->group;
11573                         grp_info.std_tbl_fix = 0;
11574                         if (dev_flow->skip_scale &
11575                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
11576                                 grp_info.skip_scale = 1;
11577                         else
11578                                 grp_info.skip_scale = 0;
11579                         ret = mlx5_flow_group_to_table(dev, tunnel,
11580                                                        jump_group,
11581                                                        &table,
11582                                                        &grp_info, error);
11583                         if (ret)
11584                                 return ret;
11585                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
11586                                                        attr->transfer,
11587                                                        !!dev_flow->external,
11588                                                        tunnel, jump_group, 0,
11589                                                        0, error);
11590                         if (!tbl)
11591                                 return rte_flow_error_set
11592                                                 (error, errno,
11593                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11594                                                  NULL,
11595                                                  "cannot create jump action.");
11596                         if (flow_dv_jump_tbl_resource_register
11597                             (dev, tbl, dev_flow, error)) {
11598                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11599                                 return rte_flow_error_set
11600                                                 (error, errno,
11601                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11602                                                  NULL,
11603                                                  "cannot create jump action.");
11604                         }
11605                         dev_flow->dv.actions[actions_n++] =
11606                                         dev_flow->dv.jump->action;
11607                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11608                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
11609                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
11610                         num_of_dest++;
11611                         break;
11612                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
11613                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
11614                         if (flow_dv_convert_action_modify_mac
11615                                         (mhdr_res, actions, error))
11616                                 return -rte_errno;
11617                         action_flags |= actions->type ==
11618                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
11619                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
11620                                         MLX5_FLOW_ACTION_SET_MAC_DST;
11621                         break;
11622                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
11623                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
11624                         if (flow_dv_convert_action_modify_ipv4
11625                                         (mhdr_res, actions, error))
11626                                 return -rte_errno;
11627                         action_flags |= actions->type ==
11628                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
11629                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
11630                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
11631                         break;
11632                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
11633                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
11634                         if (flow_dv_convert_action_modify_ipv6
11635                                         (mhdr_res, actions, error))
11636                                 return -rte_errno;
11637                         action_flags |= actions->type ==
11638                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
11639                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
11640                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
11641                         break;
11642                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
11643                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
11644                         if (flow_dv_convert_action_modify_tp
11645                                         (mhdr_res, actions, items,
11646                                          &flow_attr, dev_flow, !!(action_flags &
11647                                          MLX5_FLOW_ACTION_DECAP), error))
11648                                 return -rte_errno;
11649                         action_flags |= actions->type ==
11650                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
11651                                         MLX5_FLOW_ACTION_SET_TP_SRC :
11652                                         MLX5_FLOW_ACTION_SET_TP_DST;
11653                         break;
11654                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
11655                         if (flow_dv_convert_action_modify_dec_ttl
11656                                         (mhdr_res, items, &flow_attr, dev_flow,
11657                                          !!(action_flags &
11658                                          MLX5_FLOW_ACTION_DECAP), error))
11659                                 return -rte_errno;
11660                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
11661                         break;
11662                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
11663                         if (flow_dv_convert_action_modify_ttl
11664                                         (mhdr_res, actions, items, &flow_attr,
11665                                          dev_flow, !!(action_flags &
11666                                          MLX5_FLOW_ACTION_DECAP), error))
11667                                 return -rte_errno;
11668                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
11669                         break;
11670                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
11671                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
11672                         if (flow_dv_convert_action_modify_tcp_seq
11673                                         (mhdr_res, actions, error))
11674                                 return -rte_errno;
11675                         action_flags |= actions->type ==
11676                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
11677                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
11678                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
11679                         break;
11680
11681                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
11682                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
11683                         if (flow_dv_convert_action_modify_tcp_ack
11684                                         (mhdr_res, actions, error))
11685                                 return -rte_errno;
11686                         action_flags |= actions->type ==
11687                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
11688                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
11689                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
11690                         break;
11691                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
11692                         if (flow_dv_convert_action_set_reg
11693                                         (mhdr_res, actions, error))
11694                                 return -rte_errno;
11695                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11696                         break;
11697                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
11698                         if (flow_dv_convert_action_copy_mreg
11699                                         (dev, mhdr_res, actions, error))
11700                                 return -rte_errno;
11701                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11702                         break;
11703                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
11704                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
11705                         dev_flow->handle->fate_action =
11706                                         MLX5_FLOW_FATE_DEFAULT_MISS;
11707                         break;
11708                 case RTE_FLOW_ACTION_TYPE_METER:
11709                         if (!wks->fm)
11710                                 return rte_flow_error_set(error, rte_errno,
11711                                         RTE_FLOW_ERROR_TYPE_ACTION,
11712                                         NULL, "Failed to get meter in flow.");
11713                         /* Set the meter action. */
11714                         dev_flow->dv.actions[actions_n++] =
11715                                 wks->fm->meter_action;
11716                         action_flags |= MLX5_FLOW_ACTION_METER;
11717                         break;
11718                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
11719                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
11720                                                               actions, error))
11721                                 return -rte_errno;
11722                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
11723                         break;
11724                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
11725                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
11726                                                               actions, error))
11727                                 return -rte_errno;
11728                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
11729                         break;
11730                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
11731                         sample_act_pos = actions_n;
11732                         sample = (const struct rte_flow_action_sample *)
11733                                  action->conf;
11734                         actions_n++;
11735                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
11736                         /* put encap action into group if work with port id */
11737                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
11738                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
11739                                 sample_act->action_flags |=
11740                                                         MLX5_FLOW_ACTION_ENCAP;
11741                         break;
11742                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
11743                         if (flow_dv_convert_action_modify_field
11744                                         (dev, mhdr_res, actions, attr, error))
11745                                 return -rte_errno;
11746                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
11747                         break;
11748                 case RTE_FLOW_ACTION_TYPE_END:
11749                         actions_end = true;
11750                         if (mhdr_res->actions_num) {
11751                                 /* create modify action if needed. */
11752                                 if (flow_dv_modify_hdr_resource_register
11753                                         (dev, mhdr_res, dev_flow, error))
11754                                         return -rte_errno;
11755                                 dev_flow->dv.actions[modify_action_position] =
11756                                         handle->dvh.modify_hdr->action;
11757                         }
11758                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
11759                                 /*
11760                                  * Create one count action, to be used
11761                                  * by all sub-flows.
11762                                  */
11763                                 if (!flow->counter) {
11764                                         flow->counter =
11765                                                 flow_dv_translate_create_counter
11766                                                         (dev, dev_flow, count,
11767                                                          age);
11768                                         if (!flow->counter)
11769                                                 return rte_flow_error_set
11770                                                 (error, rte_errno,
11771                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11772                                                  NULL, "cannot create counter"
11773                                                  " object.");
11774                                 }
11775                                 dev_flow->dv.actions[actions_n] =
11776                                           (flow_dv_counter_get_by_idx(dev,
11777                                           flow->counter, NULL))->action;
11778                                 actions_n++;
11779                         }
11780                 default:
11781                         break;
11782                 }
11783                 if (mhdr_res->actions_num &&
11784                     modify_action_position == UINT32_MAX)
11785                         modify_action_position = actions_n++;
11786         }
11787         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
11788                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
11789                 int item_type = items->type;
11790
11791                 if (!mlx5_flow_os_item_supported(item_type))
11792                         return rte_flow_error_set(error, ENOTSUP,
11793                                                   RTE_FLOW_ERROR_TYPE_ITEM,
11794                                                   NULL, "item not supported");
11795                 switch (item_type) {
11796                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
11797                         flow_dv_translate_item_port_id
11798                                 (dev, match_mask, match_value, items, attr);
11799                         last_item = MLX5_FLOW_ITEM_PORT_ID;
11800                         break;
11801                 case RTE_FLOW_ITEM_TYPE_ETH:
11802                         flow_dv_translate_item_eth(match_mask, match_value,
11803                                                    items, tunnel,
11804                                                    dev_flow->dv.group);
11805                         matcher.priority = action_flags &
11806                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
11807                                         !dev_flow->external ?
11808                                         MLX5_PRIORITY_MAP_L3 :
11809                                         MLX5_PRIORITY_MAP_L2;
11810                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
11811                                              MLX5_FLOW_LAYER_OUTER_L2;
11812                         break;
11813                 case RTE_FLOW_ITEM_TYPE_VLAN:
11814                         flow_dv_translate_item_vlan(dev_flow,
11815                                                     match_mask, match_value,
11816                                                     items, tunnel,
11817                                                     dev_flow->dv.group);
11818                         matcher.priority = MLX5_PRIORITY_MAP_L2;
11819                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
11820                                               MLX5_FLOW_LAYER_INNER_VLAN) :
11821                                              (MLX5_FLOW_LAYER_OUTER_L2 |
11822                                               MLX5_FLOW_LAYER_OUTER_VLAN);
11823                         break;
11824                 case RTE_FLOW_ITEM_TYPE_IPV4:
11825                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11826                                                   &item_flags, &tunnel);
11827                         flow_dv_translate_item_ipv4(match_mask, match_value,
11828                                                     items, tunnel,
11829                                                     dev_flow->dv.group);
11830                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11831                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
11832                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
11833                         if (items->mask != NULL &&
11834                             ((const struct rte_flow_item_ipv4 *)
11835                              items->mask)->hdr.next_proto_id) {
11836                                 next_protocol =
11837                                         ((const struct rte_flow_item_ipv4 *)
11838                                          (items->spec))->hdr.next_proto_id;
11839                                 next_protocol &=
11840                                         ((const struct rte_flow_item_ipv4 *)
11841                                          (items->mask))->hdr.next_proto_id;
11842                         } else {
11843                                 /* Reset for inner layer. */
11844                                 next_protocol = 0xff;
11845                         }
11846                         break;
11847                 case RTE_FLOW_ITEM_TYPE_IPV6:
11848                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11849                                                   &item_flags, &tunnel);
11850                         flow_dv_translate_item_ipv6(match_mask, match_value,
11851                                                     items, tunnel,
11852                                                     dev_flow->dv.group);
11853                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11854                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
11855                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
11856                         if (items->mask != NULL &&
11857                             ((const struct rte_flow_item_ipv6 *)
11858                              items->mask)->hdr.proto) {
11859                                 next_protocol =
11860                                         ((const struct rte_flow_item_ipv6 *)
11861                                          items->spec)->hdr.proto;
11862                                 next_protocol &=
11863                                         ((const struct rte_flow_item_ipv6 *)
11864                                          items->mask)->hdr.proto;
11865                         } else {
11866                                 /* Reset for inner layer. */
11867                                 next_protocol = 0xff;
11868                         }
11869                         break;
11870                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
11871                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
11872                                                              match_value,
11873                                                              items, tunnel);
11874                         last_item = tunnel ?
11875                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
11876                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
11877                         if (items->mask != NULL &&
11878                             ((const struct rte_flow_item_ipv6_frag_ext *)
11879                              items->mask)->hdr.next_header) {
11880                                 next_protocol =
11881                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11882                                  items->spec)->hdr.next_header;
11883                                 next_protocol &=
11884                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11885                                  items->mask)->hdr.next_header;
11886                         } else {
11887                                 /* Reset for inner layer. */
11888                                 next_protocol = 0xff;
11889                         }
11890                         break;
11891                 case RTE_FLOW_ITEM_TYPE_TCP:
11892                         flow_dv_translate_item_tcp(match_mask, match_value,
11893                                                    items, tunnel);
11894                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11895                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
11896                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
11897                         break;
11898                 case RTE_FLOW_ITEM_TYPE_UDP:
11899                         flow_dv_translate_item_udp(match_mask, match_value,
11900                                                    items, tunnel);
11901                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11902                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
11903                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
11904                         break;
11905                 case RTE_FLOW_ITEM_TYPE_GRE:
11906                         flow_dv_translate_item_gre(match_mask, match_value,
11907                                                    items, tunnel);
11908                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11909                         last_item = MLX5_FLOW_LAYER_GRE;
11910                         break;
11911                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
11912                         flow_dv_translate_item_gre_key(match_mask,
11913                                                        match_value, items);
11914                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
11915                         break;
11916                 case RTE_FLOW_ITEM_TYPE_NVGRE:
11917                         flow_dv_translate_item_nvgre(match_mask, match_value,
11918                                                      items, tunnel);
11919                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11920                         last_item = MLX5_FLOW_LAYER_GRE;
11921                         break;
11922                 case RTE_FLOW_ITEM_TYPE_VXLAN:
11923                         flow_dv_translate_item_vxlan(match_mask, match_value,
11924                                                      items, tunnel);
11925                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11926                         last_item = MLX5_FLOW_LAYER_VXLAN;
11927                         break;
11928                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
11929                         flow_dv_translate_item_vxlan_gpe(match_mask,
11930                                                          match_value, items,
11931                                                          tunnel);
11932                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11933                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
11934                         break;
11935                 case RTE_FLOW_ITEM_TYPE_GENEVE:
11936                         flow_dv_translate_item_geneve(match_mask, match_value,
11937                                                       items, tunnel);
11938                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11939                         last_item = MLX5_FLOW_LAYER_GENEVE;
11940                         break;
11941                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
11942                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
11943                                                           match_value,
11944                                                           items, error);
11945                         if (ret)
11946                                 return rte_flow_error_set(error, -ret,
11947                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11948                                         "cannot create GENEVE TLV option");
11949                         flow->geneve_tlv_option = 1;
11950                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
11951                         break;
11952                 case RTE_FLOW_ITEM_TYPE_MPLS:
11953                         flow_dv_translate_item_mpls(match_mask, match_value,
11954                                                     items, last_item, tunnel);
11955                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11956                         last_item = MLX5_FLOW_LAYER_MPLS;
11957                         break;
11958                 case RTE_FLOW_ITEM_TYPE_MARK:
11959                         flow_dv_translate_item_mark(dev, match_mask,
11960                                                     match_value, items);
11961                         last_item = MLX5_FLOW_ITEM_MARK;
11962                         break;
11963                 case RTE_FLOW_ITEM_TYPE_META:
11964                         flow_dv_translate_item_meta(dev, match_mask,
11965                                                     match_value, attr, items);
11966                         last_item = MLX5_FLOW_ITEM_METADATA;
11967                         break;
11968                 case RTE_FLOW_ITEM_TYPE_ICMP:
11969                         flow_dv_translate_item_icmp(match_mask, match_value,
11970                                                     items, tunnel);
11971                         last_item = MLX5_FLOW_LAYER_ICMP;
11972                         break;
11973                 case RTE_FLOW_ITEM_TYPE_ICMP6:
11974                         flow_dv_translate_item_icmp6(match_mask, match_value,
11975                                                       items, tunnel);
11976                         last_item = MLX5_FLOW_LAYER_ICMP6;
11977                         break;
11978                 case RTE_FLOW_ITEM_TYPE_TAG:
11979                         flow_dv_translate_item_tag(dev, match_mask,
11980                                                    match_value, items);
11981                         last_item = MLX5_FLOW_ITEM_TAG;
11982                         break;
11983                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
11984                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
11985                                                         match_value, items);
11986                         last_item = MLX5_FLOW_ITEM_TAG;
11987                         break;
11988                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
11989                         flow_dv_translate_item_tx_queue(dev, match_mask,
11990                                                         match_value,
11991                                                         items);
11992                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
11993                         break;
11994                 case RTE_FLOW_ITEM_TYPE_GTP:
11995                         flow_dv_translate_item_gtp(match_mask, match_value,
11996                                                    items, tunnel);
11997                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11998                         last_item = MLX5_FLOW_LAYER_GTP;
11999                         break;
12000                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12001                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12002                                                           match_value,
12003                                                           items);
12004                         if (ret)
12005                                 return rte_flow_error_set(error, -ret,
12006                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12007                                         "cannot create GTP PSC item");
12008                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12009                         break;
12010                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12011                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12012                                 /* Create it only the first time to be used. */
12013                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12014                                 if (ret)
12015                                         return rte_flow_error_set
12016                                                 (error, -ret,
12017                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12018                                                 NULL,
12019                                                 "cannot create eCPRI parser");
12020                         }
12021                         /* Adjust the length matcher and device flow value. */
12022                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12023                         dev_flow->dv.value.size =
12024                                         MLX5_ST_SZ_BYTES(fte_match_param);
12025                         flow_dv_translate_item_ecpri(dev, match_mask,
12026                                                      match_value, items);
12027                         /* No other protocol should follow eCPRI layer. */
12028                         last_item = MLX5_FLOW_LAYER_ECPRI;
12029                         break;
12030                 default:
12031                         break;
12032                 }
12033                 item_flags |= last_item;
12034         }
12035         /*
12036          * When E-Switch mode is enabled, we have two cases where we need to
12037          * set the source port manually.
12038          * The first one, is in case of Nic steering rule, and the second is
12039          * E-Switch rule where no port_id item was found. In both cases
12040          * the source port is set according the current port in use.
12041          */
12042         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12043             (priv->representor || priv->master)) {
12044                 if (flow_dv_translate_item_port_id(dev, match_mask,
12045                                                    match_value, NULL, attr))
12046                         return -rte_errno;
12047         }
12048 #ifdef RTE_LIBRTE_MLX5_DEBUG
12049         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12050                                               dev_flow->dv.value.buf));
12051 #endif
12052         /*
12053          * Layers may be already initialized from prefix flow if this dev_flow
12054          * is the suffix flow.
12055          */
12056         handle->layers |= item_flags;
12057         if (action_flags & MLX5_FLOW_ACTION_RSS)
12058                 flow_dv_hashfields_set(dev_flow, rss_desc);
12059         /* If has RSS action in the sample action, the Sample/Mirror resource
12060          * should be registered after the hash filed be update.
12061          */
12062         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
12063                 ret = flow_dv_translate_action_sample(dev,
12064                                                       sample,
12065                                                       dev_flow, attr,
12066                                                       &num_of_dest,
12067                                                       sample_actions,
12068                                                       &sample_res,
12069                                                       error);
12070                 if (ret < 0)
12071                         return ret;
12072                 ret = flow_dv_create_action_sample(dev,
12073                                                    dev_flow,
12074                                                    num_of_dest,
12075                                                    &sample_res,
12076                                                    &mdest_res,
12077                                                    sample_actions,
12078                                                    action_flags,
12079                                                    error);
12080                 if (ret < 0)
12081                         return rte_flow_error_set
12082                                                 (error, rte_errno,
12083                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12084                                                 NULL,
12085                                                 "cannot create sample action");
12086                 if (num_of_dest > 1) {
12087                         dev_flow->dv.actions[sample_act_pos] =
12088                         dev_flow->dv.dest_array_res->action;
12089                 } else {
12090                         dev_flow->dv.actions[sample_act_pos] =
12091                         dev_flow->dv.sample_res->verbs_action;
12092                 }
12093         }
12094         /*
12095          * For multiple destination (sample action with ratio=1), the encap
12096          * action and port id action will be combined into group action.
12097          * So need remove the original these actions in the flow and only
12098          * use the sample action instead of.
12099          */
12100         if (num_of_dest > 1 &&
12101             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
12102                 int i;
12103                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12104
12105                 for (i = 0; i < actions_n; i++) {
12106                         if ((sample_act->dr_encap_action &&
12107                                 sample_act->dr_encap_action ==
12108                                 dev_flow->dv.actions[i]) ||
12109                                 (sample_act->dr_port_id_action &&
12110                                 sample_act->dr_port_id_action ==
12111                                 dev_flow->dv.actions[i]) ||
12112                                 (sample_act->dr_jump_action &&
12113                                 sample_act->dr_jump_action ==
12114                                 dev_flow->dv.actions[i]))
12115                                 continue;
12116                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
12117                 }
12118                 memcpy((void *)dev_flow->dv.actions,
12119                                 (void *)temp_actions,
12120                                 tmp_actions_n * sizeof(void *));
12121                 actions_n = tmp_actions_n;
12122         }
12123         dev_flow->dv.actions_n = actions_n;
12124         dev_flow->act_flags = action_flags;
12125         if (wks->skip_matcher_reg)
12126                 return 0;
12127         /* Register matcher. */
12128         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
12129                                     matcher.mask.size);
12130         matcher.priority = mlx5_get_matcher_priority(dev, attr,
12131                                         matcher.priority);
12132         /* reserved field no needs to be set to 0 here. */
12133         tbl_key.is_fdb = attr->transfer;
12134         tbl_key.is_egress = attr->egress;
12135         tbl_key.level = dev_flow->dv.group;
12136         tbl_key.id = dev_flow->dv.table_id;
12137         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
12138                                      tunnel, attr->group, error))
12139                 return -rte_errno;
12140         return 0;
12141 }
12142
12143 /**
12144  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12145  * and tunnel.
12146  *
12147  * @param[in, out] action
12148  *   Shred RSS action holding hash RX queue objects.
12149  * @param[in] hash_fields
12150  *   Defines combination of packet fields to participate in RX hash.
12151  * @param[in] tunnel
12152  *   Tunnel type
12153  * @param[in] hrxq_idx
12154  *   Hash RX queue index to set.
12155  *
12156  * @return
12157  *   0 on success, otherwise negative errno value.
12158  */
12159 static int
12160 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
12161                               const uint64_t hash_fields,
12162                               uint32_t hrxq_idx)
12163 {
12164         uint32_t *hrxqs = action->hrxq;
12165
12166         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12167         case MLX5_RSS_HASH_IPV4:
12168                 /* fall-through. */
12169         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12170                 /* fall-through. */
12171         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12172                 hrxqs[0] = hrxq_idx;
12173                 return 0;
12174         case MLX5_RSS_HASH_IPV4_TCP:
12175                 /* fall-through. */
12176         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12177                 /* fall-through. */
12178         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12179                 hrxqs[1] = hrxq_idx;
12180                 return 0;
12181         case MLX5_RSS_HASH_IPV4_UDP:
12182                 /* fall-through. */
12183         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12184                 /* fall-through. */
12185         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12186                 hrxqs[2] = hrxq_idx;
12187                 return 0;
12188         case MLX5_RSS_HASH_IPV6:
12189                 /* fall-through. */
12190         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12191                 /* fall-through. */
12192         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12193                 hrxqs[3] = hrxq_idx;
12194                 return 0;
12195         case MLX5_RSS_HASH_IPV6_TCP:
12196                 /* fall-through. */
12197         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12198                 /* fall-through. */
12199         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12200                 hrxqs[4] = hrxq_idx;
12201                 return 0;
12202         case MLX5_RSS_HASH_IPV6_UDP:
12203                 /* fall-through. */
12204         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12205                 /* fall-through. */
12206         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12207                 hrxqs[5] = hrxq_idx;
12208                 return 0;
12209         case MLX5_RSS_HASH_NONE:
12210                 hrxqs[6] = hrxq_idx;
12211                 return 0;
12212         default:
12213                 return -1;
12214         }
12215 }
12216
12217 /**
12218  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12219  * and tunnel.
12220  *
12221  * @param[in] dev
12222  *   Pointer to the Ethernet device structure.
12223  * @param[in] idx
12224  *   Shared RSS action ID holding hash RX queue objects.
12225  * @param[in] hash_fields
12226  *   Defines combination of packet fields to participate in RX hash.
12227  * @param[in] tunnel
12228  *   Tunnel type
12229  *
12230  * @return
12231  *   Valid hash RX queue index, otherwise 0.
12232  */
12233 static uint32_t
12234 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
12235                                  const uint64_t hash_fields)
12236 {
12237         struct mlx5_priv *priv = dev->data->dev_private;
12238         struct mlx5_shared_action_rss *shared_rss =
12239             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12240         const uint32_t *hrxqs = shared_rss->hrxq;
12241
12242         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12243         case MLX5_RSS_HASH_IPV4:
12244                 /* fall-through. */
12245         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12246                 /* fall-through. */
12247         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12248                 return hrxqs[0];
12249         case MLX5_RSS_HASH_IPV4_TCP:
12250                 /* fall-through. */
12251         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12252                 /* fall-through. */
12253         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12254                 return hrxqs[1];
12255         case MLX5_RSS_HASH_IPV4_UDP:
12256                 /* fall-through. */
12257         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12258                 /* fall-through. */
12259         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12260                 return hrxqs[2];
12261         case MLX5_RSS_HASH_IPV6:
12262                 /* fall-through. */
12263         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12264                 /* fall-through. */
12265         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12266                 return hrxqs[3];
12267         case MLX5_RSS_HASH_IPV6_TCP:
12268                 /* fall-through. */
12269         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12270                 /* fall-through. */
12271         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12272                 return hrxqs[4];
12273         case MLX5_RSS_HASH_IPV6_UDP:
12274                 /* fall-through. */
12275         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12276                 /* fall-through. */
12277         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12278                 return hrxqs[5];
12279         case MLX5_RSS_HASH_NONE:
12280                 return hrxqs[6];
12281         default:
12282                 return 0;
12283         }
12284
12285 }
12286
12287 /**
12288  * Apply the flow to the NIC, lock free,
12289  * (mutex should be acquired by caller).
12290  *
12291  * @param[in] dev
12292  *   Pointer to the Ethernet device structure.
12293  * @param[in, out] flow
12294  *   Pointer to flow structure.
12295  * @param[out] error
12296  *   Pointer to error structure.
12297  *
12298  * @return
12299  *   0 on success, a negative errno value otherwise and rte_errno is set.
12300  */
12301 static int
12302 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
12303               struct rte_flow_error *error)
12304 {
12305         struct mlx5_flow_dv_workspace *dv;
12306         struct mlx5_flow_handle *dh;
12307         struct mlx5_flow_handle_dv *dv_h;
12308         struct mlx5_flow *dev_flow;
12309         struct mlx5_priv *priv = dev->data->dev_private;
12310         uint32_t handle_idx;
12311         int n;
12312         int err;
12313         int idx;
12314         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12315         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
12316
12317         MLX5_ASSERT(wks);
12318         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
12319                 dev_flow = &wks->flows[idx];
12320                 dv = &dev_flow->dv;
12321                 dh = dev_flow->handle;
12322                 dv_h = &dh->dvh;
12323                 n = dv->actions_n;
12324                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
12325                         if (dv->transfer) {
12326                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12327                                 dv->actions[n++] = priv->sh->dr_drop_action;
12328                         } else {
12329 #ifdef HAVE_MLX5DV_DR
12330                                 /* DR supports drop action placeholder. */
12331                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12332                                 dv->actions[n++] = priv->sh->dr_drop_action;
12333 #else
12334                                 /* For DV we use the explicit drop queue. */
12335                                 MLX5_ASSERT(priv->drop_queue.hrxq);
12336                                 dv->actions[n++] =
12337                                                 priv->drop_queue.hrxq->action;
12338 #endif
12339                         }
12340                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
12341                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
12342                         struct mlx5_hrxq *hrxq;
12343                         uint32_t hrxq_idx;
12344
12345                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
12346                                                     &hrxq_idx);
12347                         if (!hrxq) {
12348                                 rte_flow_error_set
12349                                         (error, rte_errno,
12350                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12351                                          "cannot get hash queue");
12352                                 goto error;
12353                         }
12354                         dh->rix_hrxq = hrxq_idx;
12355                         dv->actions[n++] = hrxq->action;
12356                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12357                         struct mlx5_hrxq *hrxq = NULL;
12358                         uint32_t hrxq_idx;
12359
12360                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
12361                                                 rss_desc->shared_rss,
12362                                                 dev_flow->hash_fields);
12363                         if (hrxq_idx)
12364                                 hrxq = mlx5_ipool_get
12365                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
12366                                          hrxq_idx);
12367                         if (!hrxq) {
12368                                 rte_flow_error_set
12369                                         (error, rte_errno,
12370                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12371                                          "cannot get hash queue");
12372                                 goto error;
12373                         }
12374                         dh->rix_srss = rss_desc->shared_rss;
12375                         dv->actions[n++] = hrxq->action;
12376                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
12377                         if (!priv->sh->default_miss_action) {
12378                                 rte_flow_error_set
12379                                         (error, rte_errno,
12380                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12381                                          "default miss action not be created.");
12382                                 goto error;
12383                         }
12384                         dv->actions[n++] = priv->sh->default_miss_action;
12385                 }
12386                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
12387                                                (void *)&dv->value, n,
12388                                                dv->actions, &dh->drv_flow);
12389                 if (err) {
12390                         rte_flow_error_set(error, errno,
12391                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12392                                            NULL,
12393                                            "hardware refuses to create flow");
12394                         goto error;
12395                 }
12396                 if (priv->vmwa_context &&
12397                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
12398                         /*
12399                          * The rule contains the VLAN pattern.
12400                          * For VF we are going to create VLAN
12401                          * interface to make hypervisor set correct
12402                          * e-Switch vport context.
12403                          */
12404                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
12405                 }
12406         }
12407         return 0;
12408 error:
12409         err = rte_errno; /* Save rte_errno before cleanup. */
12410         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
12411                        handle_idx, dh, next) {
12412                 /* hrxq is union, don't clear it if the flag is not set. */
12413                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
12414                         mlx5_hrxq_release(dev, dh->rix_hrxq);
12415                         dh->rix_hrxq = 0;
12416                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12417                         dh->rix_srss = 0;
12418                 }
12419                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12420                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12421         }
12422         rte_errno = err; /* Restore rte_errno. */
12423         return -rte_errno;
12424 }
12425
12426 void
12427 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
12428                           struct mlx5_cache_entry *entry)
12429 {
12430         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
12431                                                           entry);
12432
12433         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
12434         mlx5_free(cache);
12435 }
12436
12437 /**
12438  * Release the flow matcher.
12439  *
12440  * @param dev
12441  *   Pointer to Ethernet device.
12442  * @param port_id
12443  *   Index to port ID action resource.
12444  *
12445  * @return
12446  *   1 while a reference on it exists, 0 when freed.
12447  */
12448 static int
12449 flow_dv_matcher_release(struct rte_eth_dev *dev,
12450                         struct mlx5_flow_handle *handle)
12451 {
12452         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
12453         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
12454                                                             typeof(*tbl), tbl);
12455         int ret;
12456
12457         MLX5_ASSERT(matcher->matcher_object);
12458         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
12459         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
12460         return ret;
12461 }
12462
12463 /**
12464  * Release encap_decap resource.
12465  *
12466  * @param list
12467  *   Pointer to the hash list.
12468  * @param entry
12469  *   Pointer to exist resource entry object.
12470  */
12471 void
12472 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
12473                               struct mlx5_hlist_entry *entry)
12474 {
12475         struct mlx5_dev_ctx_shared *sh = list->ctx;
12476         struct mlx5_flow_dv_encap_decap_resource *res =
12477                 container_of(entry, typeof(*res), entry);
12478
12479         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12480         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
12481 }
12482
12483 /**
12484  * Release an encap/decap resource.
12485  *
12486  * @param dev
12487  *   Pointer to Ethernet device.
12488  * @param encap_decap_idx
12489  *   Index of encap decap resource.
12490  *
12491  * @return
12492  *   1 while a reference on it exists, 0 when freed.
12493  */
12494 static int
12495 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
12496                                      uint32_t encap_decap_idx)
12497 {
12498         struct mlx5_priv *priv = dev->data->dev_private;
12499         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
12500
12501         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
12502                                         encap_decap_idx);
12503         if (!cache_resource)
12504                 return 0;
12505         MLX5_ASSERT(cache_resource->action);
12506         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
12507                                      &cache_resource->entry);
12508 }
12509
12510 /**
12511  * Release an jump to table action resource.
12512  *
12513  * @param dev
12514  *   Pointer to Ethernet device.
12515  * @param rix_jump
12516  *   Index to the jump action resource.
12517  *
12518  * @return
12519  *   1 while a reference on it exists, 0 when freed.
12520  */
12521 static int
12522 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
12523                                   uint32_t rix_jump)
12524 {
12525         struct mlx5_priv *priv = dev->data->dev_private;
12526         struct mlx5_flow_tbl_data_entry *tbl_data;
12527
12528         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
12529                                   rix_jump);
12530         if (!tbl_data)
12531                 return 0;
12532         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
12533 }
12534
12535 void
12536 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
12537                          struct mlx5_hlist_entry *entry)
12538 {
12539         struct mlx5_flow_dv_modify_hdr_resource *res =
12540                 container_of(entry, typeof(*res), entry);
12541
12542         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12543         mlx5_free(entry);
12544 }
12545
12546 /**
12547  * Release a modify-header resource.
12548  *
12549  * @param dev
12550  *   Pointer to Ethernet device.
12551  * @param handle
12552  *   Pointer to mlx5_flow_handle.
12553  *
12554  * @return
12555  *   1 while a reference on it exists, 0 when freed.
12556  */
12557 static int
12558 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
12559                                     struct mlx5_flow_handle *handle)
12560 {
12561         struct mlx5_priv *priv = dev->data->dev_private;
12562         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
12563
12564         MLX5_ASSERT(entry->action);
12565         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
12566 }
12567
12568 void
12569 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
12570                           struct mlx5_cache_entry *entry)
12571 {
12572         struct mlx5_dev_ctx_shared *sh = list->ctx;
12573         struct mlx5_flow_dv_port_id_action_resource *cache =
12574                         container_of(entry, typeof(*cache), entry);
12575
12576         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12577         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
12578 }
12579
12580 /**
12581  * Release port ID action resource.
12582  *
12583  * @param dev
12584  *   Pointer to Ethernet device.
12585  * @param handle
12586  *   Pointer to mlx5_flow_handle.
12587  *
12588  * @return
12589  *   1 while a reference on it exists, 0 when freed.
12590  */
12591 static int
12592 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
12593                                         uint32_t port_id)
12594 {
12595         struct mlx5_priv *priv = dev->data->dev_private;
12596         struct mlx5_flow_dv_port_id_action_resource *cache;
12597
12598         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
12599         if (!cache)
12600                 return 0;
12601         MLX5_ASSERT(cache->action);
12602         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
12603                                      &cache->entry);
12604 }
12605
12606 /**
12607  * Release shared RSS action resource.
12608  *
12609  * @param dev
12610  *   Pointer to Ethernet device.
12611  * @param srss
12612  *   Shared RSS action index.
12613  */
12614 static void
12615 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
12616 {
12617         struct mlx5_priv *priv = dev->data->dev_private;
12618         struct mlx5_shared_action_rss *shared_rss;
12619
12620         shared_rss = mlx5_ipool_get
12621                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
12622         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12623 }
12624
12625 void
12626 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
12627                             struct mlx5_cache_entry *entry)
12628 {
12629         struct mlx5_dev_ctx_shared *sh = list->ctx;
12630         struct mlx5_flow_dv_push_vlan_action_resource *cache =
12631                         container_of(entry, typeof(*cache), entry);
12632
12633         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12634         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
12635 }
12636
12637 /**
12638  * Release push vlan action resource.
12639  *
12640  * @param dev
12641  *   Pointer to Ethernet device.
12642  * @param handle
12643  *   Pointer to mlx5_flow_handle.
12644  *
12645  * @return
12646  *   1 while a reference on it exists, 0 when freed.
12647  */
12648 static int
12649 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
12650                                           struct mlx5_flow_handle *handle)
12651 {
12652         struct mlx5_priv *priv = dev->data->dev_private;
12653         struct mlx5_flow_dv_push_vlan_action_resource *cache;
12654         uint32_t idx = handle->dvh.rix_push_vlan;
12655
12656         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
12657         if (!cache)
12658                 return 0;
12659         MLX5_ASSERT(cache->action);
12660         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
12661                                      &cache->entry);
12662 }
12663
12664 /**
12665  * Release the fate resource.
12666  *
12667  * @param dev
12668  *   Pointer to Ethernet device.
12669  * @param handle
12670  *   Pointer to mlx5_flow_handle.
12671  */
12672 static void
12673 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
12674                                struct mlx5_flow_handle *handle)
12675 {
12676         if (!handle->rix_fate)
12677                 return;
12678         switch (handle->fate_action) {
12679         case MLX5_FLOW_FATE_QUEUE:
12680                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
12681                         mlx5_hrxq_release(dev, handle->rix_hrxq);
12682                 break;
12683         case MLX5_FLOW_FATE_JUMP:
12684                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
12685                 break;
12686         case MLX5_FLOW_FATE_PORT_ID:
12687                 flow_dv_port_id_action_resource_release(dev,
12688                                 handle->rix_port_id_action);
12689                 break;
12690         default:
12691                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
12692                 break;
12693         }
12694         handle->rix_fate = 0;
12695 }
12696
12697 void
12698 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
12699                          struct mlx5_cache_entry *entry)
12700 {
12701         struct mlx5_flow_dv_sample_resource *cache_resource =
12702                         container_of(entry, typeof(*cache_resource), entry);
12703         struct rte_eth_dev *dev = cache_resource->dev;
12704         struct mlx5_priv *priv = dev->data->dev_private;
12705
12706         if (cache_resource->verbs_action)
12707                 claim_zero(mlx5_flow_os_destroy_flow_action
12708                                 (cache_resource->verbs_action));
12709         if (cache_resource->normal_path_tbl)
12710                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12711                         cache_resource->normal_path_tbl);
12712         flow_dv_sample_sub_actions_release(dev,
12713                                 &cache_resource->sample_idx);
12714         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12715                         cache_resource->idx);
12716         DRV_LOG(DEBUG, "sample resource %p: removed",
12717                 (void *)cache_resource);
12718 }
12719
12720 /**
12721  * Release an sample resource.
12722  *
12723  * @param dev
12724  *   Pointer to Ethernet device.
12725  * @param handle
12726  *   Pointer to mlx5_flow_handle.
12727  *
12728  * @return
12729  *   1 while a reference on it exists, 0 when freed.
12730  */
12731 static int
12732 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
12733                                      struct mlx5_flow_handle *handle)
12734 {
12735         struct mlx5_priv *priv = dev->data->dev_private;
12736         struct mlx5_flow_dv_sample_resource *cache_resource;
12737
12738         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12739                          handle->dvh.rix_sample);
12740         if (!cache_resource)
12741                 return 0;
12742         MLX5_ASSERT(cache_resource->verbs_action);
12743         return mlx5_cache_unregister(&priv->sh->sample_action_list,
12744                                      &cache_resource->entry);
12745 }
12746
12747 void
12748 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
12749                              struct mlx5_cache_entry *entry)
12750 {
12751         struct mlx5_flow_dv_dest_array_resource *cache_resource =
12752                         container_of(entry, typeof(*cache_resource), entry);
12753         struct rte_eth_dev *dev = cache_resource->dev;
12754         struct mlx5_priv *priv = dev->data->dev_private;
12755         uint32_t i = 0;
12756
12757         MLX5_ASSERT(cache_resource->action);
12758         if (cache_resource->action)
12759                 claim_zero(mlx5_flow_os_destroy_flow_action
12760                                         (cache_resource->action));
12761         for (; i < cache_resource->num_of_dest; i++)
12762                 flow_dv_sample_sub_actions_release(dev,
12763                                 &cache_resource->sample_idx[i]);
12764         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12765                         cache_resource->idx);
12766         DRV_LOG(DEBUG, "destination array resource %p: removed",
12767                 (void *)cache_resource);
12768 }
12769
12770 /**
12771  * Release an destination array resource.
12772  *
12773  * @param dev
12774  *   Pointer to Ethernet device.
12775  * @param handle
12776  *   Pointer to mlx5_flow_handle.
12777  *
12778  * @return
12779  *   1 while a reference on it exists, 0 when freed.
12780  */
12781 static int
12782 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
12783                                     struct mlx5_flow_handle *handle)
12784 {
12785         struct mlx5_priv *priv = dev->data->dev_private;
12786         struct mlx5_flow_dv_dest_array_resource *cache;
12787
12788         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12789                                handle->dvh.rix_dest_array);
12790         if (!cache)
12791                 return 0;
12792         MLX5_ASSERT(cache->action);
12793         return mlx5_cache_unregister(&priv->sh->dest_array_list,
12794                                      &cache->entry);
12795 }
12796
12797 static void
12798 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
12799 {
12800         struct mlx5_priv *priv = dev->data->dev_private;
12801         struct mlx5_dev_ctx_shared *sh = priv->sh;
12802         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
12803                                 sh->geneve_tlv_option_resource;
12804         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
12805         if (geneve_opt_resource) {
12806                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
12807                                          __ATOMIC_RELAXED))) {
12808                         claim_zero(mlx5_devx_cmd_destroy
12809                                         (geneve_opt_resource->obj));
12810                         mlx5_free(sh->geneve_tlv_option_resource);
12811                         sh->geneve_tlv_option_resource = NULL;
12812                 }
12813         }
12814         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
12815 }
12816
12817 /**
12818  * Remove the flow from the NIC but keeps it in memory.
12819  * Lock free, (mutex should be acquired by caller).
12820  *
12821  * @param[in] dev
12822  *   Pointer to Ethernet device.
12823  * @param[in, out] flow
12824  *   Pointer to flow structure.
12825  */
12826 static void
12827 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12828 {
12829         struct mlx5_flow_handle *dh;
12830         uint32_t handle_idx;
12831         struct mlx5_priv *priv = dev->data->dev_private;
12832
12833         if (!flow)
12834                 return;
12835         handle_idx = flow->dev_handles;
12836         while (handle_idx) {
12837                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12838                                     handle_idx);
12839                 if (!dh)
12840                         return;
12841                 if (dh->drv_flow) {
12842                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
12843                         dh->drv_flow = NULL;
12844                 }
12845                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
12846                         flow_dv_fate_resource_release(dev, dh);
12847                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12848                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12849                 handle_idx = dh->next.next;
12850         }
12851 }
12852
12853 /**
12854  * Remove the flow from the NIC and the memory.
12855  * Lock free, (mutex should be acquired by caller).
12856  *
12857  * @param[in] dev
12858  *   Pointer to the Ethernet device structure.
12859  * @param[in, out] flow
12860  *   Pointer to flow structure.
12861  */
12862 static void
12863 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12864 {
12865         struct mlx5_flow_handle *dev_handle;
12866         struct mlx5_priv *priv = dev->data->dev_private;
12867         struct mlx5_flow_meter_info *fm = NULL;
12868         uint32_t srss = 0;
12869
12870         if (!flow)
12871                 return;
12872         flow_dv_remove(dev, flow);
12873         if (flow->counter) {
12874                 flow_dv_counter_free(dev, flow->counter);
12875                 flow->counter = 0;
12876         }
12877         if (flow->meter) {
12878                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
12879                 if (fm)
12880                         mlx5_flow_meter_detach(priv, fm);
12881                 flow->meter = 0;
12882         }
12883         if (flow->age)
12884                 flow_dv_aso_age_release(dev, flow->age);
12885         if (flow->geneve_tlv_option) {
12886                 flow_dv_geneve_tlv_option_resource_release(dev);
12887                 flow->geneve_tlv_option = 0;
12888         }
12889         while (flow->dev_handles) {
12890                 uint32_t tmp_idx = flow->dev_handles;
12891
12892                 dev_handle = mlx5_ipool_get(priv->sh->ipool
12893                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
12894                 if (!dev_handle)
12895                         return;
12896                 flow->dev_handles = dev_handle->next.next;
12897                 if (dev_handle->dvh.matcher)
12898                         flow_dv_matcher_release(dev, dev_handle);
12899                 if (dev_handle->dvh.rix_sample)
12900                         flow_dv_sample_resource_release(dev, dev_handle);
12901                 if (dev_handle->dvh.rix_dest_array)
12902                         flow_dv_dest_array_resource_release(dev, dev_handle);
12903                 if (dev_handle->dvh.rix_encap_decap)
12904                         flow_dv_encap_decap_resource_release(dev,
12905                                 dev_handle->dvh.rix_encap_decap);
12906                 if (dev_handle->dvh.modify_hdr)
12907                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
12908                 if (dev_handle->dvh.rix_push_vlan)
12909                         flow_dv_push_vlan_action_resource_release(dev,
12910                                                                   dev_handle);
12911                 if (dev_handle->dvh.rix_tag)
12912                         flow_dv_tag_release(dev,
12913                                             dev_handle->dvh.rix_tag);
12914                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
12915                         flow_dv_fate_resource_release(dev, dev_handle);
12916                 else if (!srss)
12917                         srss = dev_handle->rix_srss;
12918                 if (fm && dev_handle->is_meter_flow_id &&
12919                     dev_handle->split_flow_id)
12920                         mlx5_ipool_free(fm->flow_ipool,
12921                                         dev_handle->split_flow_id);
12922                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12923                            tmp_idx);
12924         }
12925         if (srss)
12926                 flow_dv_shared_rss_action_release(dev, srss);
12927 }
12928
12929 /**
12930  * Release array of hash RX queue objects.
12931  * Helper function.
12932  *
12933  * @param[in] dev
12934  *   Pointer to the Ethernet device structure.
12935  * @param[in, out] hrxqs
12936  *   Array of hash RX queue objects.
12937  *
12938  * @return
12939  *   Total number of references to hash RX queue objects in *hrxqs* array
12940  *   after this operation.
12941  */
12942 static int
12943 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
12944                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
12945 {
12946         size_t i;
12947         int remaining = 0;
12948
12949         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
12950                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
12951
12952                 if (!ret)
12953                         (*hrxqs)[i] = 0;
12954                 remaining += ret;
12955         }
12956         return remaining;
12957 }
12958
12959 /**
12960  * Release all hash RX queue objects representing shared RSS action.
12961  *
12962  * @param[in] dev
12963  *   Pointer to the Ethernet device structure.
12964  * @param[in, out] action
12965  *   Shared RSS action to remove hash RX queue objects from.
12966  *
12967  * @return
12968  *   Total number of references to hash RX queue objects stored in *action*
12969  *   after this operation.
12970  *   Expected to be 0 if no external references held.
12971  */
12972 static int
12973 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
12974                                  struct mlx5_shared_action_rss *shared_rss)
12975 {
12976         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
12977 }
12978
12979 /**
12980  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
12981  * user input.
12982  *
12983  * Only one hash value is available for one L3+L4 combination:
12984  * for example:
12985  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
12986  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
12987  * same slot in mlx5_rss_hash_fields.
12988  *
12989  * @param[in] rss
12990  *   Pointer to the shared action RSS conf.
12991  * @param[in, out] hash_field
12992  *   hash_field variable needed to be adjusted.
12993  *
12994  * @return
12995  *   void
12996  */
12997 static void
12998 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
12999                                      uint64_t *hash_field)
13000 {
13001         uint64_t rss_types = rss->origin.types;
13002
13003         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13004         case MLX5_RSS_HASH_IPV4:
13005                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13006                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13007                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13008                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13009                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13010                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13011                         else
13012                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13013                 }
13014                 return;
13015         case MLX5_RSS_HASH_IPV6:
13016                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13017                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13018                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13019                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13020                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13021                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13022                         else
13023                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13024                 }
13025                 return;
13026         case MLX5_RSS_HASH_IPV4_UDP:
13027                 /* fall-through. */
13028         case MLX5_RSS_HASH_IPV6_UDP:
13029                 if (rss_types & ETH_RSS_UDP) {
13030                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13031                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13032                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13033                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13034                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13035                         else
13036                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13037                 }
13038                 return;
13039         case MLX5_RSS_HASH_IPV4_TCP:
13040                 /* fall-through. */
13041         case MLX5_RSS_HASH_IPV6_TCP:
13042                 if (rss_types & ETH_RSS_TCP) {
13043                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13044                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13045                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13046                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13047                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13048                         else
13049                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13050                 }
13051                 return;
13052         default:
13053                 return;
13054         }
13055 }
13056
13057 /**
13058  * Setup shared RSS action.
13059  * Prepare set of hash RX queue objects sufficient to handle all valid
13060  * hash_fields combinations (see enum ibv_rx_hash_fields).
13061  *
13062  * @param[in] dev
13063  *   Pointer to the Ethernet device structure.
13064  * @param[in] action_idx
13065  *   Shared RSS action ipool index.
13066  * @param[in, out] action
13067  *   Partially initialized shared RSS action.
13068  * @param[out] error
13069  *   Perform verbose error reporting if not NULL. Initialized in case of
13070  *   error only.
13071  *
13072  * @return
13073  *   0 on success, otherwise negative errno value.
13074  */
13075 static int
13076 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
13077                            uint32_t action_idx,
13078                            struct mlx5_shared_action_rss *shared_rss,
13079                            struct rte_flow_error *error)
13080 {
13081         struct mlx5_flow_rss_desc rss_desc = { 0 };
13082         size_t i;
13083         int err;
13084
13085         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
13086                 return rte_flow_error_set(error, rte_errno,
13087                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13088                                           "cannot setup indirection table");
13089         }
13090         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
13091         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
13092         rss_desc.const_q = shared_rss->origin.queue;
13093         rss_desc.queue_num = shared_rss->origin.queue_num;
13094         /* Set non-zero value to indicate a shared RSS. */
13095         rss_desc.shared_rss = action_idx;
13096         rss_desc.ind_tbl = shared_rss->ind_tbl;
13097         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
13098                 uint32_t hrxq_idx;
13099                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
13100                 int tunnel = 0;
13101
13102                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
13103                 if (shared_rss->origin.level > 1) {
13104                         hash_fields |= IBV_RX_HASH_INNER;
13105                         tunnel = 1;
13106                 }
13107                 rss_desc.tunnel = tunnel;
13108                 rss_desc.hash_fields = hash_fields;
13109                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
13110                 if (!hrxq_idx) {
13111                         rte_flow_error_set
13112                                 (error, rte_errno,
13113                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13114                                  "cannot get hash queue");
13115                         goto error_hrxq_new;
13116                 }
13117                 err = __flow_dv_action_rss_hrxq_set
13118                         (shared_rss, hash_fields, hrxq_idx);
13119                 MLX5_ASSERT(!err);
13120         }
13121         return 0;
13122 error_hrxq_new:
13123         err = rte_errno;
13124         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13125         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
13126                 shared_rss->ind_tbl = NULL;
13127         rte_errno = err;
13128         return -rte_errno;
13129 }
13130
13131 /**
13132  * Create shared RSS action.
13133  *
13134  * @param[in] dev
13135  *   Pointer to the Ethernet device structure.
13136  * @param[in] conf
13137  *   Shared action configuration.
13138  * @param[in] rss
13139  *   RSS action specification used to create shared action.
13140  * @param[out] error
13141  *   Perform verbose error reporting if not NULL. Initialized in case of
13142  *   error only.
13143  *
13144  * @return
13145  *   A valid shared action ID in case of success, 0 otherwise and
13146  *   rte_errno is set.
13147  */
13148 static uint32_t
13149 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
13150                             const struct rte_flow_indir_action_conf *conf,
13151                             const struct rte_flow_action_rss *rss,
13152                             struct rte_flow_error *error)
13153 {
13154         struct mlx5_priv *priv = dev->data->dev_private;
13155         struct mlx5_shared_action_rss *shared_rss = NULL;
13156         void *queue = NULL;
13157         struct rte_flow_action_rss *origin;
13158         const uint8_t *rss_key;
13159         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
13160         uint32_t idx;
13161
13162         RTE_SET_USED(conf);
13163         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13164                             0, SOCKET_ID_ANY);
13165         shared_rss = mlx5_ipool_zmalloc
13166                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
13167         if (!shared_rss || !queue) {
13168                 rte_flow_error_set(error, ENOMEM,
13169                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13170                                    "cannot allocate resource memory");
13171                 goto error_rss_init;
13172         }
13173         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
13174                 rte_flow_error_set(error, E2BIG,
13175                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13176                                    "rss action number out of range");
13177                 goto error_rss_init;
13178         }
13179         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
13180                                           sizeof(*shared_rss->ind_tbl),
13181                                           0, SOCKET_ID_ANY);
13182         if (!shared_rss->ind_tbl) {
13183                 rte_flow_error_set(error, ENOMEM,
13184                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13185                                    "cannot allocate resource memory");
13186                 goto error_rss_init;
13187         }
13188         memcpy(queue, rss->queue, queue_size);
13189         shared_rss->ind_tbl->queues = queue;
13190         shared_rss->ind_tbl->queues_n = rss->queue_num;
13191         origin = &shared_rss->origin;
13192         origin->func = rss->func;
13193         origin->level = rss->level;
13194         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
13195         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
13196         /* NULL RSS key indicates default RSS key. */
13197         rss_key = !rss->key ? rss_hash_default_key : rss->key;
13198         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
13199         origin->key = &shared_rss->key[0];
13200         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
13201         origin->queue = queue;
13202         origin->queue_num = rss->queue_num;
13203         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
13204                 goto error_rss_init;
13205         rte_spinlock_init(&shared_rss->action_rss_sl);
13206         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13207         rte_spinlock_lock(&priv->shared_act_sl);
13208         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13209                      &priv->rss_shared_actions, idx, shared_rss, next);
13210         rte_spinlock_unlock(&priv->shared_act_sl);
13211         return idx;
13212 error_rss_init:
13213         if (shared_rss) {
13214                 if (shared_rss->ind_tbl)
13215                         mlx5_free(shared_rss->ind_tbl);
13216                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13217                                 idx);
13218         }
13219         if (queue)
13220                 mlx5_free(queue);
13221         return 0;
13222 }
13223
13224 /**
13225  * Destroy the shared RSS action.
13226  * Release related hash RX queue objects.
13227  *
13228  * @param[in] dev
13229  *   Pointer to the Ethernet device structure.
13230  * @param[in] idx
13231  *   The shared RSS action object ID to be removed.
13232  * @param[out] error
13233  *   Perform verbose error reporting if not NULL. Initialized in case of
13234  *   error only.
13235  *
13236  * @return
13237  *   0 on success, otherwise negative errno value.
13238  */
13239 static int
13240 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
13241                              struct rte_flow_error *error)
13242 {
13243         struct mlx5_priv *priv = dev->data->dev_private;
13244         struct mlx5_shared_action_rss *shared_rss =
13245             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13246         uint32_t old_refcnt = 1;
13247         int remaining;
13248         uint16_t *queue = NULL;
13249
13250         if (!shared_rss)
13251                 return rte_flow_error_set(error, EINVAL,
13252                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13253                                           "invalid shared action");
13254         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13255         if (remaining)
13256                 return rte_flow_error_set(error, EBUSY,
13257                                           RTE_FLOW_ERROR_TYPE_ACTION,
13258                                           NULL,
13259                                           "shared rss hrxq has references");
13260         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
13261                                          0, 0, __ATOMIC_ACQUIRE,
13262                                          __ATOMIC_RELAXED))
13263                 return rte_flow_error_set(error, EBUSY,
13264                                           RTE_FLOW_ERROR_TYPE_ACTION,
13265                                           NULL,
13266                                           "shared rss has references");
13267         queue = shared_rss->ind_tbl->queues;
13268         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
13269         if (remaining)
13270                 return rte_flow_error_set(error, EBUSY,
13271                                           RTE_FLOW_ERROR_TYPE_ACTION,
13272                                           NULL,
13273                                           "shared rss indirection table has"
13274                                           " references");
13275         mlx5_free(queue);
13276         rte_spinlock_lock(&priv->shared_act_sl);
13277         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13278                      &priv->rss_shared_actions, idx, shared_rss, next);
13279         rte_spinlock_unlock(&priv->shared_act_sl);
13280         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13281                         idx);
13282         return 0;
13283 }
13284
13285 /**
13286  * Create indirect action, lock free,
13287  * (mutex should be acquired by caller).
13288  * Dispatcher for action type specific call.
13289  *
13290  * @param[in] dev
13291  *   Pointer to the Ethernet device structure.
13292  * @param[in] conf
13293  *   Shared action configuration.
13294  * @param[in] action
13295  *   Action specification used to create indirect action.
13296  * @param[out] error
13297  *   Perform verbose error reporting if not NULL. Initialized in case of
13298  *   error only.
13299  *
13300  * @return
13301  *   A valid shared action handle in case of success, NULL otherwise and
13302  *   rte_errno is set.
13303  */
13304 static struct rte_flow_action_handle *
13305 flow_dv_action_create(struct rte_eth_dev *dev,
13306                       const struct rte_flow_indir_action_conf *conf,
13307                       const struct rte_flow_action *action,
13308                       struct rte_flow_error *err)
13309 {
13310         uint32_t idx = 0;
13311         uint32_t ret = 0;
13312
13313         switch (action->type) {
13314         case RTE_FLOW_ACTION_TYPE_RSS:
13315                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
13316                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
13317                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13318                 break;
13319         case RTE_FLOW_ACTION_TYPE_AGE:
13320                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
13321                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
13322                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13323                 if (ret) {
13324                         struct mlx5_aso_age_action *aso_age =
13325                                               flow_aso_age_get_by_idx(dev, ret);
13326
13327                         if (!aso_age->age_params.context)
13328                                 aso_age->age_params.context =
13329                                                          (void *)(uintptr_t)idx;
13330                 }
13331                 break;
13332         default:
13333                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
13334                                    NULL, "action type not supported");
13335                 break;
13336         }
13337         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
13338 }
13339
13340 /**
13341  * Destroy the indirect action.
13342  * Release action related resources on the NIC and the memory.
13343  * Lock free, (mutex should be acquired by caller).
13344  * Dispatcher for action type specific call.
13345  *
13346  * @param[in] dev
13347  *   Pointer to the Ethernet device structure.
13348  * @param[in] handle
13349  *   The indirect action object handle to be removed.
13350  * @param[out] error
13351  *   Perform verbose error reporting if not NULL. Initialized in case of
13352  *   error only.
13353  *
13354  * @return
13355  *   0 on success, otherwise negative errno value.
13356  */
13357 static int
13358 flow_dv_action_destroy(struct rte_eth_dev *dev,
13359                        struct rte_flow_action_handle *handle,
13360                        struct rte_flow_error *error)
13361 {
13362         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13363         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13364         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13365         int ret;
13366
13367         switch (type) {
13368         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13369                 return __flow_dv_action_rss_release(dev, idx, error);
13370         case MLX5_INDIRECT_ACTION_TYPE_AGE:
13371                 ret = flow_dv_aso_age_release(dev, idx);
13372                 if (ret)
13373                         /*
13374                          * In this case, the last flow has a reference will
13375                          * actually release the age action.
13376                          */
13377                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
13378                                 " released with references %d.", idx, ret);
13379                 return 0;
13380         default:
13381                 return rte_flow_error_set(error, ENOTSUP,
13382                                           RTE_FLOW_ERROR_TYPE_ACTION,
13383                                           NULL,
13384                                           "action type not supported");
13385         }
13386 }
13387
13388 /**
13389  * Updates in place shared RSS action configuration.
13390  *
13391  * @param[in] dev
13392  *   Pointer to the Ethernet device structure.
13393  * @param[in] idx
13394  *   The shared RSS action object ID to be updated.
13395  * @param[in] action_conf
13396  *   RSS action specification used to modify *shared_rss*.
13397  * @param[out] error
13398  *   Perform verbose error reporting if not NULL. Initialized in case of
13399  *   error only.
13400  *
13401  * @return
13402  *   0 on success, otherwise negative errno value.
13403  * @note: currently only support update of RSS queues.
13404  */
13405 static int
13406 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
13407                             const struct rte_flow_action_rss *action_conf,
13408                             struct rte_flow_error *error)
13409 {
13410         struct mlx5_priv *priv = dev->data->dev_private;
13411         struct mlx5_shared_action_rss *shared_rss =
13412             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13413         int ret = 0;
13414         void *queue = NULL;
13415         uint16_t *queue_old = NULL;
13416         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
13417
13418         if (!shared_rss)
13419                 return rte_flow_error_set(error, EINVAL,
13420                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13421                                           "invalid shared action to update");
13422         if (priv->obj_ops.ind_table_modify == NULL)
13423                 return rte_flow_error_set(error, ENOTSUP,
13424                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13425                                           "cannot modify indirection table");
13426         queue = mlx5_malloc(MLX5_MEM_ZERO,
13427                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13428                             0, SOCKET_ID_ANY);
13429         if (!queue)
13430                 return rte_flow_error_set(error, ENOMEM,
13431                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13432                                           NULL,
13433                                           "cannot allocate resource memory");
13434         memcpy(queue, action_conf->queue, queue_size);
13435         MLX5_ASSERT(shared_rss->ind_tbl);
13436         rte_spinlock_lock(&shared_rss->action_rss_sl);
13437         queue_old = shared_rss->ind_tbl->queues;
13438         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
13439                                         queue, action_conf->queue_num, true);
13440         if (ret) {
13441                 mlx5_free(queue);
13442                 ret = rte_flow_error_set(error, rte_errno,
13443                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13444                                           "cannot update indirection table");
13445         } else {
13446                 mlx5_free(queue_old);
13447                 shared_rss->origin.queue = queue;
13448                 shared_rss->origin.queue_num = action_conf->queue_num;
13449         }
13450         rte_spinlock_unlock(&shared_rss->action_rss_sl);
13451         return ret;
13452 }
13453
13454 /**
13455  * Updates in place shared action configuration, lock free,
13456  * (mutex should be acquired by caller).
13457  *
13458  * @param[in] dev
13459  *   Pointer to the Ethernet device structure.
13460  * @param[in] handle
13461  *   The indirect action object handle to be updated.
13462  * @param[in] update
13463  *   Action specification used to modify the action pointed by *handle*.
13464  *   *update* could be of same type with the action pointed by the *handle*
13465  *   handle argument, or some other structures like a wrapper, depending on
13466  *   the indirect action type.
13467  * @param[out] error
13468  *   Perform verbose error reporting if not NULL. Initialized in case of
13469  *   error only.
13470  *
13471  * @return
13472  *   0 on success, otherwise negative errno value.
13473  */
13474 static int
13475 flow_dv_action_update(struct rte_eth_dev *dev,
13476                         struct rte_flow_action_handle *handle,
13477                         const void *update,
13478                         struct rte_flow_error *err)
13479 {
13480         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13481         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13482         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13483         const void *action_conf;
13484
13485         switch (type) {
13486         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13487                 action_conf = ((const struct rte_flow_action *)update)->conf;
13488                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
13489         default:
13490                 return rte_flow_error_set(err, ENOTSUP,
13491                                           RTE_FLOW_ERROR_TYPE_ACTION,
13492                                           NULL,
13493                                           "action type update not supported");
13494         }
13495 }
13496
13497 static int
13498 flow_dv_action_query(struct rte_eth_dev *dev,
13499                      const struct rte_flow_action_handle *handle, void *data,
13500                      struct rte_flow_error *error)
13501 {
13502         struct mlx5_age_param *age_param;
13503         struct rte_flow_query_age *resp;
13504         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13505         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13506         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13507
13508         switch (type) {
13509         case MLX5_INDIRECT_ACTION_TYPE_AGE:
13510                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
13511                 resp = data;
13512                 resp->aged = __atomic_load_n(&age_param->state,
13513                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
13514                                                                           1 : 0;
13515                 resp->sec_since_last_hit_valid = !resp->aged;
13516                 if (resp->sec_since_last_hit_valid)
13517                         resp->sec_since_last_hit = __atomic_load_n
13518                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13519                 return 0;
13520         default:
13521                 return rte_flow_error_set(error, ENOTSUP,
13522                                           RTE_FLOW_ERROR_TYPE_ACTION,
13523                                           NULL,
13524                                           "action type query not supported");
13525         }
13526 }
13527
13528 /**
13529  * Destroy the meter sub policy table rules.
13530  * Lock free, (mutex should be acquired by caller).
13531  *
13532  * @param[in] dev
13533  *   Pointer to Ethernet device.
13534  * @param[in] sub_policy
13535  *   Pointer to meter sub policy table.
13536  */
13537 static void
13538 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
13539                              struct mlx5_flow_meter_sub_policy *sub_policy)
13540 {
13541         struct mlx5_flow_tbl_data_entry *tbl;
13542         int i;
13543
13544         for (i = 0; i < RTE_COLORS; i++) {
13545                 if (sub_policy->color_rule[i]) {
13546                         claim_zero(mlx5_flow_os_destroy_flow
13547                                 (sub_policy->color_rule[i]));
13548                         sub_policy->color_rule[i] = NULL;
13549                 }
13550                 if (sub_policy->color_matcher[i]) {
13551                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
13552                                 typeof(*tbl), tbl);
13553                         mlx5_cache_unregister(&tbl->matchers,
13554                                       &sub_policy->color_matcher[i]->entry);
13555                         sub_policy->color_matcher[i] = NULL;
13556                 }
13557         }
13558         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13559                 if (sub_policy->rix_hrxq[i]) {
13560                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
13561                         sub_policy->rix_hrxq[i] = 0;
13562                 }
13563                 if (sub_policy->jump_tbl[i]) {
13564                         flow_dv_tbl_resource_release(MLX5_SH(dev),
13565                         sub_policy->jump_tbl[i]);
13566                         sub_policy->jump_tbl[i] = NULL;
13567                 }
13568         }
13569         if (sub_policy->tbl_rsc) {
13570                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13571                         sub_policy->tbl_rsc);
13572                 sub_policy->tbl_rsc = NULL;
13573         }
13574 }
13575
13576 /**
13577  * Destroy policy rules, lock free,
13578  * (mutex should be acquired by caller).
13579  * Dispatcher for action type specific call.
13580  *
13581  * @param[in] dev
13582  *   Pointer to the Ethernet device structure.
13583  * @param[in] mtr_policy
13584  *   Meter policy struct.
13585  */
13586 static void
13587 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
13588                       struct mlx5_flow_meter_policy *mtr_policy)
13589 {
13590         uint32_t i, j;
13591         struct mlx5_flow_meter_sub_policy *sub_policy;
13592         uint16_t sub_policy_num;
13593
13594         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
13595                 sub_policy_num = (mtr_policy->sub_policy_num >>
13596                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
13597                         MLX5_MTR_SUB_POLICY_NUM_MASK;
13598                 for (j = 0; j < sub_policy_num; j++) {
13599                         sub_policy = mtr_policy->sub_policys[i][j];
13600                         if (sub_policy)
13601                                 __flow_dv_destroy_sub_policy_rules
13602                                                 (dev, sub_policy);
13603                 }
13604         }
13605 }
13606
13607 /**
13608  * Destroy policy action, lock free,
13609  * (mutex should be acquired by caller).
13610  * Dispatcher for action type specific call.
13611  *
13612  * @param[in] dev
13613  *   Pointer to the Ethernet device structure.
13614  * @param[in] mtr_policy
13615  *   Meter policy struct.
13616  */
13617 static void
13618 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
13619                       struct mlx5_flow_meter_policy *mtr_policy)
13620 {
13621         struct rte_flow_action *rss_action;
13622         struct mlx5_flow_handle dev_handle;
13623         uint32_t i, j;
13624
13625         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13626                 if (mtr_policy->act_cnt[i].rix_mark) {
13627                         flow_dv_tag_release(dev,
13628                                 mtr_policy->act_cnt[i].rix_mark);
13629                         mtr_policy->act_cnt[i].rix_mark = 0;
13630                 }
13631                 if (mtr_policy->act_cnt[i].modify_hdr) {
13632                         dev_handle.dvh.modify_hdr =
13633                                 mtr_policy->act_cnt[i].modify_hdr;
13634                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
13635                 }
13636                 switch (mtr_policy->act_cnt[i].fate_action) {
13637                 case MLX5_FLOW_FATE_SHARED_RSS:
13638                         rss_action = mtr_policy->act_cnt[i].rss;
13639                         mlx5_free(rss_action);
13640                         break;
13641                 case MLX5_FLOW_FATE_PORT_ID:
13642                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
13643                                 flow_dv_port_id_action_resource_release(dev,
13644                                 mtr_policy->act_cnt[i].rix_port_id_action);
13645                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
13646                         }
13647                         break;
13648                 case MLX5_FLOW_FATE_DROP:
13649                 case MLX5_FLOW_FATE_JUMP:
13650                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13651                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
13652                                                 NULL;
13653                         break;
13654                 default:
13655                         /*Queue action do nothing*/
13656                         break;
13657                 }
13658         }
13659         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13660                 mtr_policy->dr_drop_action[j] = NULL;
13661 }
13662
13663 /**
13664  * Create policy action per domain, lock free,
13665  * (mutex should be acquired by caller).
13666  * Dispatcher for action type specific call.
13667  *
13668  * @param[in] dev
13669  *   Pointer to the Ethernet device structure.
13670  * @param[in] mtr_policy
13671  *   Meter policy struct.
13672  * @param[in] action
13673  *   Action specification used to create meter actions.
13674  * @param[out] error
13675  *   Perform verbose error reporting if not NULL. Initialized in case of
13676  *   error only.
13677  *
13678  * @return
13679  *   0 on success, otherwise negative errno value.
13680  */
13681 static int
13682 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
13683                         struct mlx5_flow_meter_policy *mtr_policy,
13684                         const struct rte_flow_action *actions[RTE_COLORS],
13685                         enum mlx5_meter_domain domain,
13686                         struct rte_mtr_error *error)
13687 {
13688         struct mlx5_priv *priv = dev->data->dev_private;
13689         struct rte_flow_error flow_err;
13690         const struct rte_flow_action *act;
13691         uint64_t action_flags = 0;
13692         struct mlx5_flow_handle dh;
13693         struct mlx5_flow dev_flow;
13694         struct mlx5_flow_dv_port_id_action_resource port_id_action;
13695         int i, ret;
13696         uint8_t egress, transfer;
13697         struct mlx5_meter_policy_action_container *act_cnt = NULL;
13698         union {
13699                 struct mlx5_flow_dv_modify_hdr_resource res;
13700                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13701                             sizeof(struct mlx5_modification_cmd) *
13702                             (MLX5_MAX_MODIFY_NUM + 1)];
13703         } mhdr_dummy;
13704
13705         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
13706         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
13707         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
13708         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
13709         memset(&port_id_action, 0,
13710                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
13711         dev_flow.handle = &dh;
13712         dev_flow.dv.port_id_action = &port_id_action;
13713         dev_flow.external = true;
13714         for (i = 0; i < RTE_COLORS; i++) {
13715                 if (i < MLX5_MTR_RTE_COLORS)
13716                         act_cnt = &mtr_policy->act_cnt[i];
13717                 for (act = actions[i];
13718                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
13719                         act++) {
13720                         switch (act->type) {
13721                         case RTE_FLOW_ACTION_TYPE_MARK:
13722                         {
13723                                 uint32_t tag_be = mlx5_flow_mark_set
13724                                         (((const struct rte_flow_action_mark *)
13725                                         (act->conf))->id);
13726
13727                                 if (i >= MLX5_MTR_RTE_COLORS)
13728                                         return -rte_mtr_error_set(error,
13729                                           ENOTSUP,
13730                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13731                                           NULL,
13732                                           "cannot create policy "
13733                                           "mark action for this color");
13734                                 dev_flow.handle->mark = 1;
13735                                 if (flow_dv_tag_resource_register(dev, tag_be,
13736                                                   &dev_flow, &flow_err))
13737                                         return -rte_mtr_error_set(error,
13738                                         ENOTSUP,
13739                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13740                                         NULL,
13741                                         "cannot setup policy mark action");
13742                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
13743                                 act_cnt->rix_mark =
13744                                         dev_flow.handle->dvh.rix_tag;
13745                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13746                                         dev_flow.handle->rix_hrxq =
13747                         mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13748                                         flow_drv_rxq_flags_set(dev,
13749                                                 dev_flow.handle);
13750                                 }
13751                                 action_flags |= MLX5_FLOW_ACTION_MARK;
13752                                 break;
13753                         }
13754                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
13755                         {
13756                                 struct mlx5_flow_dv_modify_hdr_resource
13757                                         *mhdr_res = &mhdr_dummy.res;
13758
13759                                 if (i >= MLX5_MTR_RTE_COLORS)
13760                                         return -rte_mtr_error_set(error,
13761                                           ENOTSUP,
13762                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13763                                           NULL,
13764                                           "cannot create policy "
13765                                           "set tag action for this color");
13766                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
13767                                 mhdr_res->ft_type = transfer ?
13768                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
13769                                         egress ?
13770                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13771                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13772                                 if (flow_dv_convert_action_set_tag
13773                                 (dev, mhdr_res,
13774                                 (const struct rte_flow_action_set_tag *)
13775                                 act->conf,  &flow_err))
13776                                         return -rte_mtr_error_set(error,
13777                                         ENOTSUP,
13778                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13779                                         NULL, "cannot convert policy "
13780                                         "set tag action");
13781                                 if (!mhdr_res->actions_num)
13782                                         return -rte_mtr_error_set(error,
13783                                         ENOTSUP,
13784                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13785                                         NULL, "cannot find policy "
13786                                         "set tag action");
13787                                 /* create modify action if needed. */
13788                                 dev_flow.dv.group = 1;
13789                                 if (flow_dv_modify_hdr_resource_register
13790                                         (dev, mhdr_res, &dev_flow, &flow_err))
13791                                         return -rte_mtr_error_set(error,
13792                                         ENOTSUP,
13793                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13794                                         NULL, "cannot register policy "
13795                                         "set tag action");
13796                                 act_cnt->modify_hdr =
13797                                 dev_flow.handle->dvh.modify_hdr;
13798                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13799                                         dev_flow.handle->rix_hrxq =
13800                                 mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13801                                         flow_drv_rxq_flags_set(dev,
13802                                                 dev_flow.handle);
13803                                 }
13804                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13805                                 break;
13806                         }
13807                         case RTE_FLOW_ACTION_TYPE_DROP:
13808                         {
13809                                 struct mlx5_flow_mtr_mng *mtrmng =
13810                                                 priv->sh->mtrmng;
13811                                 struct mlx5_flow_tbl_data_entry *tbl_data;
13812
13813                                 /*
13814                                  * Create the drop table with
13815                                  * METER DROP level.
13816                                  */
13817                                 if (!mtrmng->drop_tbl[domain]) {
13818                                         mtrmng->drop_tbl[domain] =
13819                                         flow_dv_tbl_resource_get(dev,
13820                                         MLX5_FLOW_TABLE_LEVEL_METER,
13821                                         egress, transfer, false, NULL, 0,
13822                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
13823                                         if (!mtrmng->drop_tbl[domain])
13824                                                 return -rte_mtr_error_set
13825                                         (error, ENOTSUP,
13826                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13827                                         NULL,
13828                                         "Failed to create meter drop table");
13829                                 }
13830                                 tbl_data = container_of
13831                                 (mtrmng->drop_tbl[domain],
13832                                 struct mlx5_flow_tbl_data_entry, tbl);
13833                                 if (i < MLX5_MTR_RTE_COLORS) {
13834                                         act_cnt->dr_jump_action[domain] =
13835                                                 tbl_data->jump.action;
13836                                         act_cnt->fate_action =
13837                                                 MLX5_FLOW_FATE_DROP;
13838                                 }
13839                                 if (i == RTE_COLOR_RED)
13840                                         mtr_policy->dr_drop_action[domain] =
13841                                                 tbl_data->jump.action;
13842                                 action_flags |= MLX5_FLOW_ACTION_DROP;
13843                                 break;
13844                         }
13845                         case RTE_FLOW_ACTION_TYPE_QUEUE:
13846                         {
13847                                 struct mlx5_hrxq *hrxq;
13848                                 uint32_t hrxq_idx;
13849                                 struct mlx5_flow_rss_desc rss_desc;
13850                                 struct mlx5_flow_meter_sub_policy *sub_policy =
13851                                 mtr_policy->sub_policys[domain][0];
13852
13853                                 if (i >= MLX5_MTR_RTE_COLORS)
13854                                         return -rte_mtr_error_set(error,
13855                                         ENOTSUP,
13856                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13857                                         NULL, "cannot create policy "
13858                                         "fate queue for this color");
13859                                 memset(&rss_desc, 0,
13860                                         sizeof(struct mlx5_flow_rss_desc));
13861                                 rss_desc.queue_num = 1;
13862                                 rss_desc.const_q = act->conf;
13863                                 hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
13864                                                     &rss_desc, &hrxq_idx);
13865                                 if (!hrxq)
13866                                         return -rte_mtr_error_set(error,
13867                                         ENOTSUP,
13868                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13869                                         NULL,
13870                                         "cannot create policy fate queue");
13871                                 sub_policy->rix_hrxq[i] = hrxq_idx;
13872                                 act_cnt->fate_action =
13873                                         MLX5_FLOW_FATE_QUEUE;
13874                                 dev_flow.handle->fate_action =
13875                                         MLX5_FLOW_FATE_QUEUE;
13876                                 if (action_flags & MLX5_FLOW_ACTION_MARK ||
13877                                     action_flags & MLX5_FLOW_ACTION_SET_TAG) {
13878                                         dev_flow.handle->rix_hrxq = hrxq_idx;
13879                                         flow_drv_rxq_flags_set(dev,
13880                                                 dev_flow.handle);
13881                                 }
13882                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
13883                                 break;
13884                         }
13885                         case RTE_FLOW_ACTION_TYPE_RSS:
13886                         {
13887                                 int rss_size;
13888
13889                                 if (i >= MLX5_MTR_RTE_COLORS)
13890                                         return -rte_mtr_error_set(error,
13891                                           ENOTSUP,
13892                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13893                                           NULL,
13894                                           "cannot create policy "
13895                                           "rss action for this color");
13896                                 /*
13897                                  * Save RSS conf into policy struct
13898                                  * for translate stage.
13899                                  */
13900                                 rss_size = (int)rte_flow_conv
13901                                         (RTE_FLOW_CONV_OP_ACTION,
13902                                         NULL, 0, act, &flow_err);
13903                                 if (rss_size <= 0)
13904                                         return -rte_mtr_error_set(error,
13905                                           ENOTSUP,
13906                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13907                                           NULL, "Get the wrong "
13908                                           "rss action struct size");
13909                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
13910                                                 rss_size, 0, SOCKET_ID_ANY);
13911                                 if (!act_cnt->rss)
13912                                         return -rte_mtr_error_set(error,
13913                                           ENOTSUP,
13914                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13915                                           NULL,
13916                                           "Fail to malloc rss action memory");
13917                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
13918                                         act_cnt->rss, rss_size,
13919                                         act, &flow_err);
13920                                 if (ret < 0)
13921                                         return -rte_mtr_error_set(error,
13922                                           ENOTSUP,
13923                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13924                                           NULL, "Fail to save "
13925                                           "rss action into policy struct");
13926                                 act_cnt->fate_action =
13927                                         MLX5_FLOW_FATE_SHARED_RSS;
13928                                 action_flags |= MLX5_FLOW_ACTION_RSS;
13929                                 break;
13930                         }
13931                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
13932                         {
13933                                 struct mlx5_flow_dv_port_id_action_resource
13934                                         port_id_resource;
13935                                 uint32_t port_id = 0;
13936
13937                                 if (i >= MLX5_MTR_RTE_COLORS)
13938                                         return -rte_mtr_error_set(error,
13939                                         ENOTSUP,
13940                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13941                                         NULL, "cannot create policy "
13942                                         "port action for this color");
13943                                 memset(&port_id_resource, 0,
13944                                         sizeof(port_id_resource));
13945                                 if (flow_dv_translate_action_port_id(dev, act,
13946                                                 &port_id, &flow_err))
13947                                         return -rte_mtr_error_set(error,
13948                                         ENOTSUP,
13949                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13950                                         NULL, "cannot translate "
13951                                         "policy port action");
13952                                 port_id_resource.port_id = port_id;
13953                                 if (flow_dv_port_id_action_resource_register
13954                                         (dev, &port_id_resource,
13955                                         &dev_flow, &flow_err))
13956                                         return -rte_mtr_error_set(error,
13957                                         ENOTSUP,
13958                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13959                                         NULL, "cannot setup "
13960                                         "policy port action");
13961                                 act_cnt->rix_port_id_action =
13962                                         dev_flow.handle->rix_port_id_action;
13963                                 act_cnt->fate_action =
13964                                         MLX5_FLOW_FATE_PORT_ID;
13965                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
13966                                 break;
13967                         }
13968                         case RTE_FLOW_ACTION_TYPE_JUMP:
13969                         {
13970                                 uint32_t jump_group = 0;
13971                                 uint32_t table = 0;
13972                                 struct mlx5_flow_tbl_data_entry *tbl_data;
13973                                 struct flow_grp_info grp_info = {
13974                                         .external = !!dev_flow.external,
13975                                         .transfer = !!transfer,
13976                                         .fdb_def_rule = !!priv->fdb_def_rule,
13977                                         .std_tbl_fix = 0,
13978                                         .skip_scale = dev_flow.skip_scale &
13979                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
13980                                 };
13981                                 struct mlx5_flow_meter_sub_policy *sub_policy =
13982                                 mtr_policy->sub_policys[domain][0];
13983
13984                                 if (i >= MLX5_MTR_RTE_COLORS)
13985                                         return -rte_mtr_error_set(error,
13986                                           ENOTSUP,
13987                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13988                                           NULL,
13989                                           "cannot create policy "
13990                                           "jump action for this color");
13991                                 jump_group =
13992                                 ((const struct rte_flow_action_jump *)
13993                                                         act->conf)->group;
13994                                 if (mlx5_flow_group_to_table(dev, NULL,
13995                                                        jump_group,
13996                                                        &table,
13997                                                        &grp_info, &flow_err))
13998                                         return -rte_mtr_error_set(error,
13999                                         ENOTSUP,
14000                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14001                                         NULL, "cannot setup "
14002                                         "policy jump action");
14003                                 sub_policy->jump_tbl[i] =
14004                                 flow_dv_tbl_resource_get(dev,
14005                                         table, egress,
14006                                         transfer,
14007                                         !!dev_flow.external,
14008                                         NULL, jump_group, 0,
14009                                         0, &flow_err);
14010                                 if
14011                                 (!sub_policy->jump_tbl[i])
14012                                         return  -rte_mtr_error_set(error,
14013                                         ENOTSUP,
14014                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14015                                         NULL, "cannot create jump action.");
14016                                 tbl_data = container_of
14017                                 (sub_policy->jump_tbl[i],
14018                                 struct mlx5_flow_tbl_data_entry, tbl);
14019                                 act_cnt->dr_jump_action[domain] =
14020                                         tbl_data->jump.action;
14021                                 act_cnt->fate_action =
14022                                         MLX5_FLOW_FATE_JUMP;
14023                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
14024                                 break;
14025                         }
14026                         default:
14027                                 return -rte_mtr_error_set(error, ENOTSUP,
14028                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14029                                           NULL, "action type not supported");
14030                         }
14031                 }
14032         }
14033         return 0;
14034 }
14035
14036 /**
14037  * Create policy action per domain, lock free,
14038  * (mutex should be acquired by caller).
14039  * Dispatcher for action type specific call.
14040  *
14041  * @param[in] dev
14042  *   Pointer to the Ethernet device structure.
14043  * @param[in] mtr_policy
14044  *   Meter policy struct.
14045  * @param[in] action
14046  *   Action specification used to create meter actions.
14047  * @param[out] error
14048  *   Perform verbose error reporting if not NULL. Initialized in case of
14049  *   error only.
14050  *
14051  * @return
14052  *   0 on success, otherwise negative errno value.
14053  */
14054 static int
14055 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
14056                       struct mlx5_flow_meter_policy *mtr_policy,
14057                       const struct rte_flow_action *actions[RTE_COLORS],
14058                       struct rte_mtr_error *error)
14059 {
14060         int ret, i;
14061         uint16_t sub_policy_num;
14062
14063         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14064                 sub_policy_num = (mtr_policy->sub_policy_num >>
14065                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14066                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14067                 if (sub_policy_num) {
14068                         ret = __flow_dv_create_domain_policy_acts(dev,
14069                                 mtr_policy, actions,
14070                                 (enum mlx5_meter_domain)i, error);
14071                         if (ret)
14072                                 return ret;
14073                 }
14074         }
14075         return 0;
14076 }
14077
14078 /**
14079  * Query a dv flow  rule for its statistics via devx.
14080  *
14081  * @param[in] dev
14082  *   Pointer to Ethernet device.
14083  * @param[in] flow
14084  *   Pointer to the sub flow.
14085  * @param[out] data
14086  *   data retrieved by the query.
14087  * @param[out] error
14088  *   Perform verbose error reporting if not NULL.
14089  *
14090  * @return
14091  *   0 on success, a negative errno value otherwise and rte_errno is set.
14092  */
14093 static int
14094 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
14095                     void *data, struct rte_flow_error *error)
14096 {
14097         struct mlx5_priv *priv = dev->data->dev_private;
14098         struct rte_flow_query_count *qc = data;
14099
14100         if (!priv->config.devx)
14101                 return rte_flow_error_set(error, ENOTSUP,
14102                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14103                                           NULL,
14104                                           "counters are not supported");
14105         if (flow->counter) {
14106                 uint64_t pkts, bytes;
14107                 struct mlx5_flow_counter *cnt;
14108
14109                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
14110                                                  NULL);
14111                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
14112                                                &bytes);
14113
14114                 if (err)
14115                         return rte_flow_error_set(error, -err,
14116                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14117                                         NULL, "cannot read counters");
14118                 qc->hits_set = 1;
14119                 qc->bytes_set = 1;
14120                 qc->hits = pkts - cnt->hits;
14121                 qc->bytes = bytes - cnt->bytes;
14122                 if (qc->reset) {
14123                         cnt->hits = pkts;
14124                         cnt->bytes = bytes;
14125                 }
14126                 return 0;
14127         }
14128         return rte_flow_error_set(error, EINVAL,
14129                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14130                                   NULL,
14131                                   "counters are not available");
14132 }
14133
14134 /**
14135  * Query a flow rule AGE action for aging information.
14136  *
14137  * @param[in] dev
14138  *   Pointer to Ethernet device.
14139  * @param[in] flow
14140  *   Pointer to the sub flow.
14141  * @param[out] data
14142  *   data retrieved by the query.
14143  * @param[out] error
14144  *   Perform verbose error reporting if not NULL.
14145  *
14146  * @return
14147  *   0 on success, a negative errno value otherwise and rte_errno is set.
14148  */
14149 static int
14150 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
14151                   void *data, struct rte_flow_error *error)
14152 {
14153         struct rte_flow_query_age *resp = data;
14154         struct mlx5_age_param *age_param;
14155
14156         if (flow->age) {
14157                 struct mlx5_aso_age_action *act =
14158                                      flow_aso_age_get_by_idx(dev, flow->age);
14159
14160                 age_param = &act->age_params;
14161         } else if (flow->counter) {
14162                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
14163
14164                 if (!age_param || !age_param->timeout)
14165                         return rte_flow_error_set
14166                                         (error, EINVAL,
14167                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14168                                          NULL, "cannot read age data");
14169         } else {
14170                 return rte_flow_error_set(error, EINVAL,
14171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14172                                           NULL, "age data not available");
14173         }
14174         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
14175                                      AGE_TMOUT ? 1 : 0;
14176         resp->sec_since_last_hit_valid = !resp->aged;
14177         if (resp->sec_since_last_hit_valid)
14178                 resp->sec_since_last_hit = __atomic_load_n
14179                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
14180         return 0;
14181 }
14182
14183 /**
14184  * Query a flow.
14185  *
14186  * @see rte_flow_query()
14187  * @see rte_flow_ops
14188  */
14189 static int
14190 flow_dv_query(struct rte_eth_dev *dev,
14191               struct rte_flow *flow __rte_unused,
14192               const struct rte_flow_action *actions __rte_unused,
14193               void *data __rte_unused,
14194               struct rte_flow_error *error __rte_unused)
14195 {
14196         int ret = -EINVAL;
14197
14198         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
14199                 switch (actions->type) {
14200                 case RTE_FLOW_ACTION_TYPE_VOID:
14201                         break;
14202                 case RTE_FLOW_ACTION_TYPE_COUNT:
14203                         ret = flow_dv_query_count(dev, flow, data, error);
14204                         break;
14205                 case RTE_FLOW_ACTION_TYPE_AGE:
14206                         ret = flow_dv_query_age(dev, flow, data, error);
14207                         break;
14208                 default:
14209                         return rte_flow_error_set(error, ENOTSUP,
14210                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14211                                                   actions,
14212                                                   "action not supported");
14213                 }
14214         }
14215         return ret;
14216 }
14217
14218 /**
14219  * Destroy the meter table set.
14220  * Lock free, (mutex should be acquired by caller).
14221  *
14222  * @param[in] dev
14223  *   Pointer to Ethernet device.
14224  * @param[in] fm
14225  *   Meter information table.
14226  */
14227 static void
14228 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
14229                         struct mlx5_flow_meter_info *fm)
14230 {
14231         struct mlx5_priv *priv = dev->data->dev_private;
14232         int i;
14233
14234         if (!fm || !priv->config.dv_flow_en)
14235                 return;
14236         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14237                 if (fm->drop_rule[i]) {
14238                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
14239                         fm->drop_rule[i] = NULL;
14240                 }
14241         }
14242 }
14243
14244 static void
14245 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
14246 {
14247         struct mlx5_priv *priv = dev->data->dev_private;
14248         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14249         struct mlx5_flow_tbl_data_entry *tbl;
14250         int i, j;
14251
14252         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14253                 if (mtrmng->def_rule[i]) {
14254                         claim_zero(mlx5_flow_os_destroy_flow
14255                                         (mtrmng->def_rule[i]));
14256                         mtrmng->def_rule[i] = NULL;
14257                 }
14258                 if (mtrmng->def_matcher[i]) {
14259                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
14260                                 struct mlx5_flow_tbl_data_entry, tbl);
14261                         mlx5_cache_unregister(&tbl->matchers,
14262                                       &mtrmng->def_matcher[i]->entry);
14263                         mtrmng->def_matcher[i] = NULL;
14264                 }
14265                 for (j = 0; j < MLX5_REG_BITS; j++) {
14266                         if (mtrmng->drop_matcher[i][j]) {
14267                                 tbl =
14268                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
14269                                              struct mlx5_flow_tbl_data_entry,
14270                                              tbl);
14271                                 mlx5_cache_unregister(&tbl->matchers,
14272                                         &mtrmng->drop_matcher[i][j]->entry);
14273                                 mtrmng->drop_matcher[i][j] = NULL;
14274                         }
14275                 }
14276                 if (mtrmng->drop_tbl[i]) {
14277                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14278                                 mtrmng->drop_tbl[i]);
14279                         mtrmng->drop_tbl[i] = NULL;
14280                 }
14281         }
14282 }
14283
14284 /* Number of meter flow actions, count and jump or count and drop. */
14285 #define METER_ACTIONS 2
14286
14287 static void
14288 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
14289                               enum mlx5_meter_domain domain)
14290 {
14291         struct mlx5_priv *priv = dev->data->dev_private;
14292         struct mlx5_flow_meter_def_policy *def_policy =
14293                         priv->sh->mtrmng->def_policy[domain];
14294
14295         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
14296         mlx5_free(def_policy);
14297         priv->sh->mtrmng->def_policy[domain] = NULL;
14298 }
14299
14300 /**
14301  * Destroy the default policy table set.
14302  *
14303  * @param[in] dev
14304  *   Pointer to Ethernet device.
14305  */
14306 static void
14307 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
14308 {
14309         struct mlx5_priv *priv = dev->data->dev_private;
14310         int i;
14311
14312         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
14313                 if (priv->sh->mtrmng->def_policy[i])
14314                         __flow_dv_destroy_domain_def_policy(dev,
14315                                         (enum mlx5_meter_domain)i);
14316         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
14317 }
14318
14319 static int
14320 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
14321                         uint32_t color_reg_c_idx,
14322                         enum rte_color color, void *matcher_object,
14323                         int actions_n, void *actions,
14324                         bool is_default_policy, void **rule,
14325                         const struct rte_flow_attr *attr)
14326 {
14327         int ret;
14328         struct mlx5_flow_dv_match_params value = {
14329                 .size = sizeof(value.buf) -
14330                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14331         };
14332         struct mlx5_flow_dv_match_params matcher = {
14333                 .size = sizeof(matcher.buf) -
14334                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14335         };
14336         struct mlx5_priv *priv = dev->data->dev_private;
14337
14338         if (!is_default_policy && (priv->representor || priv->master)) {
14339                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
14340                                                    value.buf, NULL, attr)) {
14341                         DRV_LOG(ERR,
14342                         "Failed to create meter policy flow with port.");
14343                         return -1;
14344                 }
14345         }
14346         flow_dv_match_meta_reg(matcher.buf, value.buf,
14347                                 (enum modify_reg)color_reg_c_idx,
14348                                 rte_col_2_mlx5_col(color),
14349                                 UINT32_MAX);
14350         ret = mlx5_flow_os_create_flow(matcher_object,
14351                         (void *)&value, actions_n, actions, rule);
14352         if (ret) {
14353                 DRV_LOG(ERR, "Failed to create meter policy flow.");
14354                 return -1;
14355         }
14356         return 0;
14357 }
14358
14359 static int
14360 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
14361                         uint32_t color_reg_c_idx,
14362                         uint16_t priority,
14363                         struct mlx5_flow_meter_sub_policy *sub_policy,
14364                         const struct rte_flow_attr *attr,
14365                         bool is_default_policy,
14366                         struct rte_flow_error *error)
14367 {
14368         struct mlx5_cache_entry *entry;
14369         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
14370         struct mlx5_flow_dv_matcher matcher = {
14371                 .mask = {
14372                         .size = sizeof(matcher.mask.buf) -
14373                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14374                 },
14375                 .tbl = tbl_rsc,
14376         };
14377         struct mlx5_flow_dv_match_params value = {
14378                 .size = sizeof(value.buf) -
14379                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14380         };
14381         struct mlx5_flow_cb_ctx ctx = {
14382                 .error = error,
14383                 .data = &matcher,
14384         };
14385         struct mlx5_flow_tbl_data_entry *tbl_data;
14386         struct mlx5_priv *priv = dev->data->dev_private;
14387         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
14388
14389         if (!is_default_policy && (priv->representor || priv->master)) {
14390                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
14391                                                    value.buf, NULL, attr)) {
14392                         DRV_LOG(ERR,
14393                         "Failed to register meter drop matcher with port.");
14394                         return -1;
14395                 }
14396         }
14397         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
14398         if (priority < RTE_COLOR_RED)
14399                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14400                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
14401         matcher.priority = priority;
14402         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
14403                                         matcher.mask.size);
14404         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14405         if (!entry) {
14406                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
14407                 return -1;
14408         }
14409         sub_policy->color_matcher[priority] =
14410                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
14411         return 0;
14412 }
14413
14414 /**
14415  * Create the policy rules per domain.
14416  *
14417  * @param[in] dev
14418  *   Pointer to Ethernet device.
14419  * @param[in] sub_policy
14420  *    Pointer to sub policy table..
14421  * @param[in] egress
14422  *   Direction of the table.
14423  * @param[in] transfer
14424  *   E-Switch or NIC flow.
14425  * @param[in] acts
14426  *   Pointer to policy action list per color.
14427  *
14428  * @return
14429  *   0 on success, -1 otherwise.
14430  */
14431 static int
14432 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
14433                 struct mlx5_flow_meter_sub_policy *sub_policy,
14434                 uint8_t egress, uint8_t transfer, bool is_default_policy,
14435                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
14436 {
14437         struct rte_flow_error flow_err;
14438         uint32_t color_reg_c_idx;
14439         struct rte_flow_attr attr = {
14440                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
14441                 .priority = 0,
14442                 .ingress = 0,
14443                 .egress = !!egress,
14444                 .transfer = !!transfer,
14445                 .reserved = 0,
14446         };
14447         int i;
14448         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
14449
14450         if (ret < 0)
14451                 return -1;
14452         /* Create policy table with POLICY level. */
14453         if (!sub_policy->tbl_rsc)
14454                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
14455                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
14456                                 egress, transfer, false, NULL, 0, 0,
14457                                 sub_policy->idx, &flow_err);
14458         if (!sub_policy->tbl_rsc) {
14459                 DRV_LOG(ERR,
14460                         "Failed to create meter sub policy table.");
14461                 return -1;
14462         }
14463         /* Prepare matchers. */
14464         color_reg_c_idx = ret;
14465         for (i = 0; i < RTE_COLORS; i++) {
14466                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
14467                         continue;
14468                 attr.priority = i;
14469                 if (!sub_policy->color_matcher[i]) {
14470                         /* Create matchers for Color. */
14471                         if (__flow_dv_create_policy_matcher(dev,
14472                                 color_reg_c_idx, i, sub_policy,
14473                                 &attr, is_default_policy, &flow_err))
14474                                 return -1;
14475                 }
14476                 /* Create flow, matching color. */
14477                 if (acts[i].actions_n)
14478                         if (__flow_dv_create_policy_flow(dev,
14479                                 color_reg_c_idx, (enum rte_color)i,
14480                                 sub_policy->color_matcher[i]->matcher_object,
14481                                 acts[i].actions_n,
14482                                 acts[i].dv_actions,
14483                                 is_default_policy,
14484                                 &sub_policy->color_rule[i],
14485                                 &attr))
14486                                 return -1;
14487         }
14488         return 0;
14489 }
14490
14491 static int
14492 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
14493                         struct mlx5_flow_meter_policy *mtr_policy,
14494                         struct mlx5_flow_meter_sub_policy *sub_policy,
14495                         uint32_t domain)
14496 {
14497         struct mlx5_priv *priv = dev->data->dev_private;
14498         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14499         struct mlx5_flow_dv_tag_resource *tag;
14500         struct mlx5_flow_dv_port_id_action_resource *port_action;
14501         struct mlx5_hrxq *hrxq;
14502         uint8_t egress, transfer;
14503         int i;
14504
14505         for (i = 0; i < RTE_COLORS; i++) {
14506                 acts[i].actions_n = 0;
14507                 if (i == RTE_COLOR_YELLOW)
14508                         continue;
14509                 if (i == RTE_COLOR_RED) {
14510                         /* Only support drop on red. */
14511                         acts[i].dv_actions[0] =
14512                         mtr_policy->dr_drop_action[domain];
14513                         acts[i].actions_n = 1;
14514                         continue;
14515                 }
14516                 if (mtr_policy->act_cnt[i].rix_mark) {
14517                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
14518                                         mtr_policy->act_cnt[i].rix_mark);
14519                         if (!tag) {
14520                                 DRV_LOG(ERR, "Failed to find "
14521                                 "mark action for policy.");
14522                                 return -1;
14523                         }
14524                         acts[i].dv_actions[acts[i].actions_n] =
14525                                                 tag->action;
14526                         acts[i].actions_n++;
14527                 }
14528                 if (mtr_policy->act_cnt[i].modify_hdr) {
14529                         acts[i].dv_actions[acts[i].actions_n] =
14530                         mtr_policy->act_cnt[i].modify_hdr->action;
14531                         acts[i].actions_n++;
14532                 }
14533                 if (mtr_policy->act_cnt[i].fate_action) {
14534                         switch (mtr_policy->act_cnt[i].fate_action) {
14535                         case MLX5_FLOW_FATE_PORT_ID:
14536                                 port_action = mlx5_ipool_get
14537                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
14538                                 mtr_policy->act_cnt[i].rix_port_id_action);
14539                                 if (!port_action) {
14540                                         DRV_LOG(ERR, "Failed to find "
14541                                                 "port action for policy.");
14542                                         return -1;
14543                                 }
14544                                 acts[i].dv_actions[acts[i].actions_n] =
14545                                 port_action->action;
14546                                 acts[i].actions_n++;
14547                                 break;
14548                         case MLX5_FLOW_FATE_DROP:
14549                         case MLX5_FLOW_FATE_JUMP:
14550                                 acts[i].dv_actions[acts[i].actions_n] =
14551                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
14552                                 acts[i].actions_n++;
14553                                 break;
14554                         case MLX5_FLOW_FATE_SHARED_RSS:
14555                         case MLX5_FLOW_FATE_QUEUE:
14556                                 hrxq = mlx5_ipool_get
14557                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
14558                                 sub_policy->rix_hrxq[i]);
14559                                 if (!hrxq) {
14560                                         DRV_LOG(ERR, "Failed to find "
14561                                                 "queue action for policy.");
14562                                         return -1;
14563                                 }
14564                                 acts[i].dv_actions[acts[i].actions_n] =
14565                                 hrxq->action;
14566                                 acts[i].actions_n++;
14567                                 break;
14568                         default:
14569                                 /*Queue action do nothing*/
14570                                 break;
14571                         }
14572                 }
14573         }
14574         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14575         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14576         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
14577                                 egress, transfer, false, acts)) {
14578                 DRV_LOG(ERR,
14579                 "Failed to create policy rules per domain.");
14580                 return -1;
14581         }
14582         return 0;
14583 }
14584
14585 /**
14586  * Create the policy rules.
14587  *
14588  * @param[in] dev
14589  *   Pointer to Ethernet device.
14590  * @param[in,out] mtr_policy
14591  *   Pointer to meter policy table.
14592  *
14593  * @return
14594  *   0 on success, -1 otherwise.
14595  */
14596 static int
14597 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
14598                              struct mlx5_flow_meter_policy *mtr_policy)
14599 {
14600         int i;
14601         uint16_t sub_policy_num;
14602
14603         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14604                 sub_policy_num = (mtr_policy->sub_policy_num >>
14605                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14606                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14607                 if (!sub_policy_num)
14608                         continue;
14609                 /* Prepare actions list and create policy rules. */
14610                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
14611                         mtr_policy->sub_policys[i][0], i)) {
14612                         DRV_LOG(ERR,
14613                         "Failed to create policy action list per domain.");
14614                         return -1;
14615                 }
14616         }
14617         return 0;
14618 }
14619
14620 static int
14621 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
14622 {
14623         struct mlx5_priv *priv = dev->data->dev_private;
14624         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14625         struct mlx5_flow_meter_def_policy *def_policy;
14626         struct mlx5_flow_tbl_resource *jump_tbl;
14627         struct mlx5_flow_tbl_data_entry *tbl_data;
14628         uint8_t egress, transfer;
14629         struct rte_flow_error error;
14630         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14631         int ret;
14632
14633         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14634         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14635         def_policy = mtrmng->def_policy[domain];
14636         if (!def_policy) {
14637                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
14638                         sizeof(struct mlx5_flow_meter_def_policy),
14639                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
14640                 if (!def_policy) {
14641                         DRV_LOG(ERR, "Failed to alloc "
14642                                         "default policy table.");
14643                         goto def_policy_error;
14644                 }
14645                 mtrmng->def_policy[domain] = def_policy;
14646                 /* Create the meter suffix table with SUFFIX level. */
14647                 jump_tbl = flow_dv_tbl_resource_get(dev,
14648                                 MLX5_FLOW_TABLE_LEVEL_METER,
14649                                 egress, transfer, false, NULL, 0,
14650                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
14651                 if (!jump_tbl) {
14652                         DRV_LOG(ERR,
14653                                 "Failed to create meter suffix table.");
14654                         goto def_policy_error;
14655                 }
14656                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
14657                 tbl_data = container_of(jump_tbl,
14658                                 struct mlx5_flow_tbl_data_entry, tbl);
14659                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
14660                                                 tbl_data->jump.action;
14661                 acts[RTE_COLOR_GREEN].dv_actions[0] =
14662                                                 tbl_data->jump.action;
14663                 acts[RTE_COLOR_GREEN].actions_n = 1;
14664                 /* Create jump action to the drop table. */
14665                 if (!mtrmng->drop_tbl[domain]) {
14666                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
14667                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
14668                                 egress, transfer, false, NULL, 0,
14669                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
14670                         if (!mtrmng->drop_tbl[domain]) {
14671                                 DRV_LOG(ERR, "Failed to create "
14672                                 "meter drop table for default policy.");
14673                                 goto def_policy_error;
14674                         }
14675                 }
14676                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14677                                 struct mlx5_flow_tbl_data_entry, tbl);
14678                 def_policy->dr_jump_action[RTE_COLOR_RED] =
14679                                                 tbl_data->jump.action;
14680                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
14681                 acts[RTE_COLOR_RED].actions_n = 1;
14682                 /* Create default policy rules. */
14683                 ret = __flow_dv_create_domain_policy_rules(dev,
14684                                         &def_policy->sub_policy,
14685                                         egress, transfer, true, acts);
14686                 if (ret) {
14687                         DRV_LOG(ERR, "Failed to create "
14688                                 "default policy rules.");
14689                                 goto def_policy_error;
14690                 }
14691         }
14692         return 0;
14693 def_policy_error:
14694         __flow_dv_destroy_domain_def_policy(dev,
14695                         (enum mlx5_meter_domain)domain);
14696         return -1;
14697 }
14698
14699 /**
14700  * Create the default policy table set.
14701  *
14702  * @param[in] dev
14703  *   Pointer to Ethernet device.
14704  * @return
14705  *   0 on success, -1 otherwise.
14706  */
14707 static int
14708 flow_dv_create_def_policy(struct rte_eth_dev *dev)
14709 {
14710         struct mlx5_priv *priv = dev->data->dev_private;
14711         int i;
14712
14713         /* Non-termination policy table. */
14714         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14715                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
14716                         continue;
14717                 if (__flow_dv_create_domain_def_policy(dev, i)) {
14718                         DRV_LOG(ERR,
14719                         "Failed to create default policy");
14720                         return -1;
14721                 }
14722         }
14723         return 0;
14724 }
14725
14726 /**
14727  * Create the needed meter tables.
14728  * Lock free, (mutex should be acquired by caller).
14729  *
14730  * @param[in] dev
14731  *   Pointer to Ethernet device.
14732  * @param[in] fm
14733  *   Meter information table.
14734  * @param[in] mtr_idx
14735  *   Meter index.
14736  * @param[in] domain_bitmap
14737  *   Domain bitmap.
14738  * @return
14739  *   0 on success, -1 otherwise.
14740  */
14741 static int
14742 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
14743                         struct mlx5_flow_meter_info *fm,
14744                         uint32_t mtr_idx,
14745                         uint8_t domain_bitmap)
14746 {
14747         struct mlx5_priv *priv = dev->data->dev_private;
14748         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14749         struct rte_flow_error error;
14750         struct mlx5_flow_tbl_data_entry *tbl_data;
14751         uint8_t egress, transfer;
14752         void *actions[METER_ACTIONS];
14753         int domain, ret, i;
14754         struct mlx5_flow_counter *cnt;
14755         struct mlx5_flow_dv_match_params value = {
14756                 .size = sizeof(value.buf) -
14757                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14758         };
14759         struct mlx5_flow_dv_match_params matcher_para = {
14760                 .size = sizeof(matcher_para.buf) -
14761                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14762         };
14763         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
14764                                                      0, &error);
14765         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
14766         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
14767         struct mlx5_cache_entry *entry;
14768         struct mlx5_flow_dv_matcher matcher = {
14769                 .mask = {
14770                         .size = sizeof(matcher.mask.buf) -
14771                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14772                 },
14773         };
14774         struct mlx5_flow_dv_matcher *drop_matcher;
14775         struct mlx5_flow_cb_ctx ctx = {
14776                 .error = &error,
14777                 .data = &matcher,
14778         };
14779
14780         if (!priv->mtr_en || mtr_id_reg_c < 0) {
14781                 rte_errno = ENOTSUP;
14782                 return -1;
14783         }
14784         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
14785                 if (!(domain_bitmap & (1 << domain)) ||
14786                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
14787                         continue;
14788                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14789                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14790                 /* Create the drop table with METER DROP level. */
14791                 if (!mtrmng->drop_tbl[domain]) {
14792                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
14793                                         MLX5_FLOW_TABLE_LEVEL_METER,
14794                                         egress, transfer, false, NULL, 0,
14795                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
14796                         if (!mtrmng->drop_tbl[domain]) {
14797                                 DRV_LOG(ERR, "Failed to create meter drop table.");
14798                                 goto policy_error;
14799                         }
14800                 }
14801                 /* Create default matcher in drop table. */
14802                 matcher.tbl = mtrmng->drop_tbl[domain],
14803                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14804                                 struct mlx5_flow_tbl_data_entry, tbl);
14805                 if (!mtrmng->def_matcher[domain]) {
14806                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14807                                        (enum modify_reg)mtr_id_reg_c,
14808                                        0, 0);
14809                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
14810                         matcher.crc = rte_raw_cksum
14811                                         ((const void *)matcher.mask.buf,
14812                                         matcher.mask.size);
14813                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14814                         if (!entry) {
14815                                 DRV_LOG(ERR, "Failed to register meter "
14816                                 "drop default matcher.");
14817                                 goto policy_error;
14818                         }
14819                         mtrmng->def_matcher[domain] = container_of(entry,
14820                         struct mlx5_flow_dv_matcher, entry);
14821                 }
14822                 /* Create default rule in drop table. */
14823                 if (!mtrmng->def_rule[domain]) {
14824                         i = 0;
14825                         actions[i++] = priv->sh->dr_drop_action;
14826                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14827                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
14828                         ret = mlx5_flow_os_create_flow
14829                                 (mtrmng->def_matcher[domain]->matcher_object,
14830                                 (void *)&value, i, actions,
14831                                 &mtrmng->def_rule[domain]);
14832                         if (ret) {
14833                                 DRV_LOG(ERR, "Failed to create meter "
14834                                 "default drop rule for drop table.");
14835                                 goto policy_error;
14836                         }
14837                 }
14838                 if (!fm->drop_cnt)
14839                         continue;
14840                 MLX5_ASSERT(mtrmng->max_mtr_bits);
14841                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
14842                         /* Create matchers for Drop. */
14843                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14844                                         (enum modify_reg)mtr_id_reg_c, 0,
14845                                         (mtr_id_mask << mtr_id_offset));
14846                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
14847                         matcher.crc = rte_raw_cksum
14848                                         ((const void *)matcher.mask.buf,
14849                                         matcher.mask.size);
14850                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14851                         if (!entry) {
14852                                 DRV_LOG(ERR,
14853                                 "Failed to register meter drop matcher.");
14854                                 goto policy_error;
14855                         }
14856                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
14857                                 container_of(entry, struct mlx5_flow_dv_matcher,
14858                                              entry);
14859                 }
14860                 drop_matcher =
14861                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
14862                 /* Create drop rule, matching meter_id only. */
14863                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14864                                 (enum modify_reg)mtr_id_reg_c,
14865                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
14866                 i = 0;
14867                 cnt = flow_dv_counter_get_by_idx(dev,
14868                                         fm->drop_cnt, NULL);
14869                 actions[i++] = cnt->action;
14870                 actions[i++] = priv->sh->dr_drop_action;
14871                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
14872                                                (void *)&value, i, actions,
14873                                                &fm->drop_rule[domain]);
14874                 if (ret) {
14875                         DRV_LOG(ERR, "Failed to create meter "
14876                                 "drop rule for drop table.");
14877                                 goto policy_error;
14878                 }
14879         }
14880         return 0;
14881 policy_error:
14882         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14883                 if (fm->drop_rule[i]) {
14884                         claim_zero(mlx5_flow_os_destroy_flow
14885                                 (fm->drop_rule[i]));
14886                         fm->drop_rule[i] = NULL;
14887                 }
14888         }
14889         return -1;
14890 }
14891
14892 /**
14893  * Find the policy table for prefix table with RSS.
14894  *
14895  * @param[in] dev
14896  *   Pointer to Ethernet device.
14897  * @param[in] mtr_policy
14898  *   Pointer to meter policy table.
14899  * @param[in] rss_desc
14900  *   Pointer to rss_desc
14901  * @return
14902  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
14903  */
14904 static struct mlx5_flow_meter_sub_policy *
14905 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
14906                 struct mlx5_flow_meter_policy *mtr_policy,
14907                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
14908 {
14909         struct mlx5_priv *priv = dev->data->dev_private;
14910         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
14911         uint32_t sub_policy_idx = 0;
14912         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
14913         uint32_t i, j;
14914         struct mlx5_hrxq *hrxq;
14915         struct mlx5_flow_handle dh;
14916         struct mlx5_meter_policy_action_container *act_cnt;
14917         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
14918         uint16_t sub_policy_num;
14919
14920         rte_spinlock_lock(&mtr_policy->sl);
14921         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14922                 if (!rss_desc[i])
14923                         continue;
14924                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
14925                 if (!hrxq_idx[i]) {
14926                         rte_spinlock_unlock(&mtr_policy->sl);
14927                         return NULL;
14928                 }
14929         }
14930         sub_policy_num = (mtr_policy->sub_policy_num >>
14931                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
14932                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14933         for (i = 0; i < sub_policy_num;
14934                 i++) {
14935                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
14936                         if (rss_desc[j] &&
14937                                 hrxq_idx[j] !=
14938                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
14939                                 break;
14940                 }
14941                 if (j >= MLX5_MTR_RTE_COLORS) {
14942                         /*
14943                          * Found the sub policy table with
14944                          * the same queue per color
14945                          */
14946                         rte_spinlock_unlock(&mtr_policy->sl);
14947                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
14948                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
14949                         return mtr_policy->sub_policys[domain][i];
14950                 }
14951         }
14952         /* Create sub policy. */
14953         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
14954                 /* Reuse the first dummy sub_policy*/
14955                 sub_policy = mtr_policy->sub_policys[domain][0];
14956                 sub_policy_idx = sub_policy->idx;
14957         } else {
14958                 sub_policy = mlx5_ipool_zmalloc
14959                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
14960                                 &sub_policy_idx);
14961                 if (!sub_policy ||
14962                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
14963                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
14964                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
14965                         goto rss_sub_policy_error;
14966                 }
14967                 sub_policy->idx = sub_policy_idx;
14968                 sub_policy->main_policy = mtr_policy;
14969         }
14970         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14971                 if (!rss_desc[i])
14972                         continue;
14973                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
14974                 /*
14975                  * Overwrite the last action from
14976                  * RSS action to Queue action.
14977                  */
14978                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
14979                               hrxq_idx[i]);
14980                 if (!hrxq) {
14981                         DRV_LOG(ERR, "Failed to create policy hrxq");
14982                         goto rss_sub_policy_error;
14983                 }
14984                 act_cnt = &mtr_policy->act_cnt[i];
14985                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
14986                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14987                         if (act_cnt->rix_mark)
14988                                 dh.mark = 1;
14989                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
14990                         dh.rix_hrxq = hrxq_idx[i];
14991                         flow_drv_rxq_flags_set(dev, &dh);
14992                 }
14993         }
14994         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
14995                 sub_policy, domain)) {
14996                 DRV_LOG(ERR, "Failed to create policy "
14997                         "rules per domain.");
14998                 goto rss_sub_policy_error;
14999         }
15000         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
15001                 i = (mtr_policy->sub_policy_num >>
15002                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15003                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15004                 mtr_policy->sub_policys[domain][i] = sub_policy;
15005                 i++;
15006                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
15007                         goto rss_sub_policy_error;
15008                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
15009                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
15010                 mtr_policy->sub_policy_num |=
15011                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
15012                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
15013         }
15014         rte_spinlock_unlock(&mtr_policy->sl);
15015         return sub_policy;
15016 rss_sub_policy_error:
15017         if (sub_policy) {
15018                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
15019                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
15020                         i = (mtr_policy->sub_policy_num >>
15021                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15022                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15023                         mtr_policy->sub_policys[domain][i] = NULL;
15024                         mlx5_ipool_free
15025                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15026                                         sub_policy->idx);
15027                 }
15028         }
15029         if (sub_policy_idx)
15030                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15031                         sub_policy_idx);
15032         rte_spinlock_unlock(&mtr_policy->sl);
15033         return NULL;
15034 }
15035
15036 /**
15037  * Validate the batch counter support in root table.
15038  *
15039  * Create a simple flow with invalid counter and drop action on root table to
15040  * validate if batch counter with offset on root table is supported or not.
15041  *
15042  * @param[in] dev
15043  *   Pointer to rte_eth_dev structure.
15044  *
15045  * @return
15046  *   0 on success, a negative errno value otherwise and rte_errno is set.
15047  */
15048 int
15049 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
15050 {
15051         struct mlx5_priv *priv = dev->data->dev_private;
15052         struct mlx5_dev_ctx_shared *sh = priv->sh;
15053         struct mlx5_flow_dv_match_params mask = {
15054                 .size = sizeof(mask.buf),
15055         };
15056         struct mlx5_flow_dv_match_params value = {
15057                 .size = sizeof(value.buf),
15058         };
15059         struct mlx5dv_flow_matcher_attr dv_attr = {
15060                 .type = IBV_FLOW_ATTR_NORMAL,
15061                 .priority = 0,
15062                 .match_criteria_enable = 0,
15063                 .match_mask = (void *)&mask,
15064         };
15065         void *actions[2] = { 0 };
15066         struct mlx5_flow_tbl_resource *tbl = NULL;
15067         struct mlx5_devx_obj *dcs = NULL;
15068         void *matcher = NULL;
15069         void *flow = NULL;
15070         int ret = -1;
15071
15072         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
15073                                         0, 0, 0, NULL);
15074         if (!tbl)
15075                 goto err;
15076         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
15077         if (!dcs)
15078                 goto err;
15079         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
15080                                                     &actions[0]);
15081         if (ret)
15082                 goto err;
15083         actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
15084                                           priv->drop_queue.hrxq->action;
15085         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
15086         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
15087                                                &matcher);
15088         if (ret)
15089                 goto err;
15090         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
15091                                        actions, &flow);
15092 err:
15093         /*
15094          * If batch counter with offset is not supported, the driver will not
15095          * validate the invalid offset value, flow create should success.
15096          * In this case, it means batch counter is not supported in root table.
15097          *
15098          * Otherwise, if flow create is failed, counter offset is supported.
15099          */
15100         if (flow) {
15101                 DRV_LOG(INFO, "Batch counter is not supported in root "
15102                               "table. Switch to fallback mode.");
15103                 rte_errno = ENOTSUP;
15104                 ret = -rte_errno;
15105                 claim_zero(mlx5_flow_os_destroy_flow(flow));
15106         } else {
15107                 /* Check matcher to make sure validate fail at flow create. */
15108                 if (!matcher || (matcher && errno != EINVAL))
15109                         DRV_LOG(ERR, "Unexpected error in counter offset "
15110                                      "support detection");
15111                 ret = 0;
15112         }
15113         if (actions[0])
15114                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
15115         if (matcher)
15116                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
15117         if (tbl)
15118                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
15119         if (dcs)
15120                 claim_zero(mlx5_devx_cmd_destroy(dcs));
15121         return ret;
15122 }
15123
15124 /**
15125  * Query a devx counter.
15126  *
15127  * @param[in] dev
15128  *   Pointer to the Ethernet device structure.
15129  * @param[in] cnt
15130  *   Index to the flow counter.
15131  * @param[in] clear
15132  *   Set to clear the counter statistics.
15133  * @param[out] pkts
15134  *   The statistics value of packets.
15135  * @param[out] bytes
15136  *   The statistics value of bytes.
15137  *
15138  * @return
15139  *   0 on success, otherwise return -1.
15140  */
15141 static int
15142 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
15143                       uint64_t *pkts, uint64_t *bytes)
15144 {
15145         struct mlx5_priv *priv = dev->data->dev_private;
15146         struct mlx5_flow_counter *cnt;
15147         uint64_t inn_pkts, inn_bytes;
15148         int ret;
15149
15150         if (!priv->config.devx)
15151                 return -1;
15152
15153         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
15154         if (ret)
15155                 return -1;
15156         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
15157         *pkts = inn_pkts - cnt->hits;
15158         *bytes = inn_bytes - cnt->bytes;
15159         if (clear) {
15160                 cnt->hits = inn_pkts;
15161                 cnt->bytes = inn_bytes;
15162         }
15163         return 0;
15164 }
15165
15166 /**
15167  * Get aged-out flows.
15168  *
15169  * @param[in] dev
15170  *   Pointer to the Ethernet device structure.
15171  * @param[in] context
15172  *   The address of an array of pointers to the aged-out flows contexts.
15173  * @param[in] nb_contexts
15174  *   The length of context array pointers.
15175  * @param[out] error
15176  *   Perform verbose error reporting if not NULL. Initialized in case of
15177  *   error only.
15178  *
15179  * @return
15180  *   how many contexts get in success, otherwise negative errno value.
15181  *   if nb_contexts is 0, return the amount of all aged contexts.
15182  *   if nb_contexts is not 0 , return the amount of aged flows reported
15183  *   in the context array.
15184  * @note: only stub for now
15185  */
15186 static int
15187 flow_get_aged_flows(struct rte_eth_dev *dev,
15188                     void **context,
15189                     uint32_t nb_contexts,
15190                     struct rte_flow_error *error)
15191 {
15192         struct mlx5_priv *priv = dev->data->dev_private;
15193         struct mlx5_age_info *age_info;
15194         struct mlx5_age_param *age_param;
15195         struct mlx5_flow_counter *counter;
15196         struct mlx5_aso_age_action *act;
15197         int nb_flows = 0;
15198
15199         if (nb_contexts && !context)
15200                 return rte_flow_error_set(error, EINVAL,
15201                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15202                                           NULL, "empty context");
15203         age_info = GET_PORT_AGE_INFO(priv);
15204         rte_spinlock_lock(&age_info->aged_sl);
15205         LIST_FOREACH(act, &age_info->aged_aso, next) {
15206                 nb_flows++;
15207                 if (nb_contexts) {
15208                         context[nb_flows - 1] =
15209                                                 act->age_params.context;
15210                         if (!(--nb_contexts))
15211                                 break;
15212                 }
15213         }
15214         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
15215                 nb_flows++;
15216                 if (nb_contexts) {
15217                         age_param = MLX5_CNT_TO_AGE(counter);
15218                         context[nb_flows - 1] = age_param->context;
15219                         if (!(--nb_contexts))
15220                                 break;
15221                 }
15222         }
15223         rte_spinlock_unlock(&age_info->aged_sl);
15224         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
15225         return nb_flows;
15226 }
15227
15228 /*
15229  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
15230  */
15231 static uint32_t
15232 flow_dv_counter_allocate(struct rte_eth_dev *dev)
15233 {
15234         return flow_dv_counter_alloc(dev, 0);
15235 }
15236
15237 /**
15238  * Validate indirect action.
15239  * Dispatcher for action type specific validation.
15240  *
15241  * @param[in] dev
15242  *   Pointer to the Ethernet device structure.
15243  * @param[in] conf
15244  *   Shared action configuration.
15245  * @param[in] action
15246  *   The indirect action object to validate.
15247  * @param[out] error
15248  *   Perform verbose error reporting if not NULL. Initialized in case of
15249  *   error only.
15250  *
15251  * @return
15252  *   0 on success, otherwise negative errno value.
15253  */
15254 static int
15255 flow_dv_action_validate(struct rte_eth_dev *dev,
15256                         const struct rte_flow_indir_action_conf *conf,
15257                         const struct rte_flow_action *action,
15258                         struct rte_flow_error *err)
15259 {
15260         struct mlx5_priv *priv = dev->data->dev_private;
15261
15262         RTE_SET_USED(conf);
15263         switch (action->type) {
15264         case RTE_FLOW_ACTION_TYPE_RSS:
15265                 /*
15266                  * priv->obj_ops is set according to driver capabilities.
15267                  * When DevX capabilities are
15268                  * sufficient, it is set to devx_obj_ops.
15269                  * Otherwise, it is set to ibv_obj_ops.
15270                  * ibv_obj_ops doesn't support ind_table_modify operation.
15271                  * In this case the shared RSS action can't be used.
15272                  */
15273                 if (priv->obj_ops.ind_table_modify == NULL)
15274                         return rte_flow_error_set
15275                                         (err, ENOTSUP,
15276                                          RTE_FLOW_ERROR_TYPE_ACTION,
15277                                          NULL,
15278                                          "shared RSS action not supported");
15279                 return mlx5_validate_action_rss(dev, action, err);
15280         case RTE_FLOW_ACTION_TYPE_AGE:
15281                 if (!priv->sh->aso_age_mng)
15282                         return rte_flow_error_set(err, ENOTSUP,
15283                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15284                                                 NULL,
15285                                              "shared age action not supported");
15286                 return flow_dv_validate_action_age(0, action, dev, err);
15287         default:
15288                 return rte_flow_error_set(err, ENOTSUP,
15289                                           RTE_FLOW_ERROR_TYPE_ACTION,
15290                                           NULL,
15291                                           "action type not supported");
15292         }
15293 }
15294
15295 /**
15296  * Validate meter policy actions.
15297  * Dispatcher for action type specific validation.
15298  *
15299  * @param[in] dev
15300  *   Pointer to the Ethernet device structure.
15301  * @param[in] action
15302  *   The meter policy action object to validate.
15303  * @param[in] attr
15304  *   Attributes of flow to determine steering domain.
15305  * @param[out] error
15306  *   Perform verbose error reporting if not NULL. Initialized in case of
15307  *   error only.
15308  *
15309  * @return
15310  *   0 on success, otherwise negative errno value.
15311  */
15312 static int
15313 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
15314                         const struct rte_flow_action *actions[RTE_COLORS],
15315                         struct rte_flow_attr *attr,
15316                         bool *is_rss,
15317                         uint8_t *domain_bitmap,
15318                         bool *is_def_policy,
15319                         struct rte_mtr_error *error)
15320 {
15321         struct mlx5_priv *priv = dev->data->dev_private;
15322         struct mlx5_dev_config *dev_conf = &priv->config;
15323         const struct rte_flow_action *act;
15324         uint64_t action_flags = 0;
15325         int actions_n;
15326         int i, ret;
15327         struct rte_flow_error flow_err;
15328         uint8_t domain_color[RTE_COLORS] = {0};
15329         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
15330
15331         if (!priv->config.dv_esw_en)
15332                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
15333         *domain_bitmap = def_domain;
15334         if (actions[RTE_COLOR_YELLOW] &&
15335                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
15336                 return -rte_mtr_error_set(error, ENOTSUP,
15337                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15338                                 NULL,
15339                                 "Yellow color does not support any action.");
15340         if (actions[RTE_COLOR_YELLOW] &&
15341                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
15342                 return -rte_mtr_error_set(error, ENOTSUP,
15343                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15344                                 NULL, "Red color only supports drop action.");
15345         /*
15346          * Check default policy actions:
15347          * Green/Yellow: no action, Red: drop action
15348          */
15349         if ((!actions[RTE_COLOR_GREEN] ||
15350                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
15351                 *is_def_policy = true;
15352                 return 0;
15353         }
15354         flow_err.message = NULL;
15355         for (i = 0; i < RTE_COLORS; i++) {
15356                 act = actions[i];
15357                 for (action_flags = 0, actions_n = 0;
15358                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15359                         act++) {
15360                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
15361                                 return -rte_mtr_error_set(error, ENOTSUP,
15362                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15363                                           NULL, "too many actions");
15364                         switch (act->type) {
15365                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15366                                 if (!priv->config.dv_esw_en)
15367                                         return -rte_mtr_error_set(error,
15368                                         ENOTSUP,
15369                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15370                                         NULL, "PORT action validate check"
15371                                         " fail for ESW disable");
15372                                 ret = flow_dv_validate_action_port_id(dev,
15373                                                 action_flags,
15374                                                 act, attr, &flow_err);
15375                                 if (ret)
15376                                         return -rte_mtr_error_set(error,
15377                                         ENOTSUP,
15378                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15379                                         NULL, flow_err.message ?
15380                                         flow_err.message :
15381                                         "PORT action validate check fail");
15382                                 ++actions_n;
15383                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15384                                 break;
15385                         case RTE_FLOW_ACTION_TYPE_MARK:
15386                                 ret = flow_dv_validate_action_mark(dev, act,
15387                                                            action_flags,
15388                                                            attr, &flow_err);
15389                                 if (ret < 0)
15390                                         return -rte_mtr_error_set(error,
15391                                         ENOTSUP,
15392                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15393                                         NULL, flow_err.message ?
15394                                         flow_err.message :
15395                                         "Mark action validate check fail");
15396                                 if (dev_conf->dv_xmeta_en !=
15397                                         MLX5_XMETA_MODE_LEGACY)
15398                                         return -rte_mtr_error_set(error,
15399                                         ENOTSUP,
15400                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15401                                         NULL, "Extend MARK action is "
15402                                         "not supported. Please try use "
15403                                         "default policy for meter.");
15404                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15405                                 ++actions_n;
15406                                 break;
15407                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15408                                 ret = flow_dv_validate_action_set_tag(dev,
15409                                                         act, action_flags,
15410                                                         attr, &flow_err);
15411                                 if (ret)
15412                                         return -rte_mtr_error_set(error,
15413                                         ENOTSUP,
15414                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15415                                         NULL, flow_err.message ?
15416                                         flow_err.message :
15417                                         "Set tag action validate check fail");
15418                                 /*
15419                                  * Count all modify-header actions
15420                                  * as one action.
15421                                  */
15422                                 if (!(action_flags &
15423                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
15424                                         ++actions_n;
15425                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15426                                 break;
15427                         case RTE_FLOW_ACTION_TYPE_DROP:
15428                                 ret = mlx5_flow_validate_action_drop
15429                                         (action_flags,
15430                                         attr, &flow_err);
15431                                 if (ret < 0)
15432                                         return -rte_mtr_error_set(error,
15433                                         ENOTSUP,
15434                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15435                                         NULL, flow_err.message ?
15436                                         flow_err.message :
15437                                         "Drop action validate check fail");
15438                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15439                                 ++actions_n;
15440                                 break;
15441                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15442                                 /*
15443                                  * Check whether extensive
15444                                  * metadata feature is engaged.
15445                                  */
15446                                 if (dev_conf->dv_flow_en &&
15447                                         (dev_conf->dv_xmeta_en !=
15448                                         MLX5_XMETA_MODE_LEGACY) &&
15449                                         mlx5_flow_ext_mreg_supported(dev))
15450                                         return -rte_mtr_error_set(error,
15451                                           ENOTSUP,
15452                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15453                                           NULL, "Queue action with meta "
15454                                           "is not supported. Please try use "
15455                                           "default policy for meter.");
15456                                 ret = mlx5_flow_validate_action_queue(act,
15457                                                         action_flags, dev,
15458                                                         attr, &flow_err);
15459                                 if (ret < 0)
15460                                         return -rte_mtr_error_set(error,
15461                                           ENOTSUP,
15462                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15463                                           NULL, flow_err.message ?
15464                                           flow_err.message :
15465                                           "Queue action validate check fail");
15466                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15467                                 ++actions_n;
15468                                 break;
15469                         case RTE_FLOW_ACTION_TYPE_RSS:
15470                                 if (dev_conf->dv_flow_en &&
15471                                         (dev_conf->dv_xmeta_en !=
15472                                         MLX5_XMETA_MODE_LEGACY) &&
15473                                         mlx5_flow_ext_mreg_supported(dev))
15474                                         return -rte_mtr_error_set(error,
15475                                           ENOTSUP,
15476                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15477                                           NULL, "RSS action with meta "
15478                                           "is not supported. Please try use "
15479                                           "default policy for meter.");
15480                                 ret = mlx5_validate_action_rss(dev, act,
15481                                                 &flow_err);
15482                                 if (ret < 0)
15483                                         return -rte_mtr_error_set(error,
15484                                           ENOTSUP,
15485                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15486                                           NULL, flow_err.message ?
15487                                           flow_err.message :
15488                                           "RSS action validate check fail");
15489                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15490                                 ++actions_n;
15491                                 *is_rss = true;
15492                                 break;
15493                         case RTE_FLOW_ACTION_TYPE_JUMP:
15494                                 ret = flow_dv_validate_action_jump(dev,
15495                                         NULL, act, action_flags,
15496                                         attr, true, &flow_err);
15497                                 if (ret)
15498                                         return -rte_mtr_error_set(error,
15499                                           ENOTSUP,
15500                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15501                                           NULL, flow_err.message ?
15502                                           flow_err.message :
15503                                           "Jump action validate check fail");
15504                                 ++actions_n;
15505                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15506                                 break;
15507                         default:
15508                                 return -rte_mtr_error_set(error, ENOTSUP,
15509                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15510                                         NULL,
15511                                         "Doesn't support optional action");
15512                         }
15513                 }
15514                 /* Yellow is not supported, just skip. */
15515                 if (i == RTE_COLOR_YELLOW)
15516                         continue;
15517                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
15518                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
15519                 else if ((action_flags &
15520                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
15521                         (action_flags & MLX5_FLOW_ACTION_MARK))
15522                         /*
15523                          * Only support MLX5_XMETA_MODE_LEGACY
15524                          * so MARK action only in ingress domain.
15525                          */
15526                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
15527                 else
15528                         domain_color[i] = def_domain;
15529                 /*
15530                  * Validate the drop action mutual exclusion
15531                  * with other actions. Drop action is mutually-exclusive
15532                  * with any other action, except for Count action.
15533                  */
15534                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
15535                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
15536                         return -rte_mtr_error_set(error, ENOTSUP,
15537                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15538                                 NULL, "Drop action is mutually-exclusive "
15539                                 "with any other action");
15540                 }
15541                 /* Eswitch has few restrictions on using items and actions */
15542                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
15543                         if (!mlx5_flow_ext_mreg_supported(dev) &&
15544                                 action_flags & MLX5_FLOW_ACTION_MARK)
15545                                 return -rte_mtr_error_set(error, ENOTSUP,
15546                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15547                                         NULL, "unsupported action MARK");
15548                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
15549                                 return -rte_mtr_error_set(error, ENOTSUP,
15550                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15551                                         NULL, "unsupported action QUEUE");
15552                         if (action_flags & MLX5_FLOW_ACTION_RSS)
15553                                 return -rte_mtr_error_set(error, ENOTSUP,
15554                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15555                                         NULL, "unsupported action RSS");
15556                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
15557                                 return -rte_mtr_error_set(error, ENOTSUP,
15558                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15559                                         NULL, "no fate action is found");
15560                 } else {
15561                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
15562                                 (domain_color[i] &
15563                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
15564                                 if ((domain_color[i] &
15565                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
15566                                         domain_color[i] =
15567                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
15568                                 else
15569                                         return -rte_mtr_error_set(error,
15570                                         ENOTSUP,
15571                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15572                                         NULL, "no fate action is found");
15573                         }
15574                 }
15575                 if (domain_color[i] != def_domain)
15576                         *domain_bitmap = domain_color[i];
15577         }
15578         return 0;
15579 }
15580
15581 static int
15582 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
15583 {
15584         struct mlx5_priv *priv = dev->data->dev_private;
15585         int ret = 0;
15586
15587         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
15588                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
15589                                                 flags);
15590                 if (ret != 0)
15591                         return ret;
15592         }
15593         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
15594                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
15595                 if (ret != 0)
15596                         return ret;
15597         }
15598         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
15599                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
15600                 if (ret != 0)
15601                         return ret;
15602         }
15603         return 0;
15604 }
15605
15606 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
15607         .validate = flow_dv_validate,
15608         .prepare = flow_dv_prepare,
15609         .translate = flow_dv_translate,
15610         .apply = flow_dv_apply,
15611         .remove = flow_dv_remove,
15612         .destroy = flow_dv_destroy,
15613         .query = flow_dv_query,
15614         .create_mtr_tbls = flow_dv_create_mtr_tbls,
15615         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
15616         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
15617         .create_meter = flow_dv_mtr_alloc,
15618         .free_meter = flow_dv_aso_mtr_release_to_pool,
15619         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
15620         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
15621         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
15622         .create_policy_rules = flow_dv_create_policy_rules,
15623         .destroy_policy_rules = flow_dv_destroy_policy_rules,
15624         .create_def_policy = flow_dv_create_def_policy,
15625         .destroy_def_policy = flow_dv_destroy_def_policy,
15626         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
15627         .counter_alloc = flow_dv_counter_allocate,
15628         .counter_free = flow_dv_counter_free,
15629         .counter_query = flow_dv_counter_query,
15630         .get_aged_flows = flow_get_aged_flows,
15631         .action_validate = flow_dv_action_validate,
15632         .action_create = flow_dv_action_create,
15633         .action_destroy = flow_dv_action_destroy,
15634         .action_update = flow_dv_action_update,
15635         .action_query = flow_dv_action_query,
15636         .sync_domain = flow_dv_sync_domain,
15637 };
15638
15639 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
15640