b74bac10836519dee4b06623c0872819b510cc88
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static void
272 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
273                           uint8_t next_protocol, uint64_t *item_flags,
274                           int *tunnel)
275 {
276         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
277                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
278         if (next_protocol == IPPROTO_IPIP) {
279                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
280                 *tunnel = 1;
281         }
282         if (next_protocol == IPPROTO_IPV6) {
283                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
284                 *tunnel = 1;
285         }
286 }
287
288 /* Update VLAN's VID/PCP based on input rte_flow_action.
289  *
290  * @param[in] action
291  *   Pointer to struct rte_flow_action.
292  * @param[out] vlan
293  *   Pointer to struct rte_vlan_hdr.
294  */
295 static void
296 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
297                          struct rte_vlan_hdr *vlan)
298 {
299         uint16_t vlan_tci;
300         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
301                 vlan_tci =
302                     ((const struct rte_flow_action_of_set_vlan_pcp *)
303                                                action->conf)->vlan_pcp;
304                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
306                 vlan->vlan_tci |= vlan_tci;
307         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
308                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
309                 vlan->vlan_tci |= rte_be_to_cpu_16
310                     (((const struct rte_flow_action_of_set_vlan_vid *)
311                                              action->conf)->vlan_vid);
312         }
313 }
314
315 /**
316  * Fetch 1, 2, 3 or 4 byte field from the byte array
317  * and return as unsigned integer in host-endian format.
318  *
319  * @param[in] data
320  *   Pointer to data array.
321  * @param[in] size
322  *   Size of field to extract.
323  *
324  * @return
325  *   converted field in host endian format.
326  */
327 static inline uint32_t
328 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
329 {
330         uint32_t ret;
331
332         switch (size) {
333         case 1:
334                 ret = *data;
335                 break;
336         case 2:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 break;
339         case 3:
340                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
341                 ret = (ret << 8) | *(data + sizeof(uint16_t));
342                 break;
343         case 4:
344                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
345                 break;
346         default:
347                 MLX5_ASSERT(false);
348                 ret = 0;
349                 break;
350         }
351         return ret;
352 }
353
354 /**
355  * Convert modify-header action to DV specification.
356  *
357  * Data length of each action is determined by provided field description
358  * and the item mask. Data bit offset and width of each action is determined
359  * by provided item mask.
360  *
361  * @param[in] item
362  *   Pointer to item specification.
363  * @param[in] field
364  *   Pointer to field modification information.
365  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
366  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
367  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
368  * @param[in] dcopy
369  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
370  *   Negative offset value sets the same offset as source offset.
371  *   size field is ignored, value is taken from source field.
372  * @param[in,out] resource
373  *   Pointer to the modify-header resource.
374  * @param[in] type
375  *   Type of modification.
376  * @param[out] error
377  *   Pointer to the error structure.
378  *
379  * @return
380  *   0 on success, a negative errno value otherwise and rte_errno is set.
381  */
382 static int
383 flow_dv_convert_modify_action(struct rte_flow_item *item,
384                               struct field_modify_info *field,
385                               struct field_modify_info *dcopy,
386                               struct mlx5_flow_dv_modify_hdr_resource *resource,
387                               uint32_t type, struct rte_flow_error *error)
388 {
389         uint32_t i = resource->actions_num;
390         struct mlx5_modification_cmd *actions = resource->actions;
391
392         /*
393          * The item and mask are provided in big-endian format.
394          * The fields should be presented as in big-endian format either.
395          * Mask must be always present, it defines the actual field width.
396          */
397         MLX5_ASSERT(item->mask);
398         MLX5_ASSERT(field->size);
399         do {
400                 unsigned int size_b;
401                 unsigned int off_b;
402                 uint32_t mask;
403                 uint32_t data;
404
405                 if (i >= MLX5_MAX_MODIFY_NUM)
406                         return rte_flow_error_set(error, EINVAL,
407                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
408                                  "too many items to modify");
409                 /* Fetch variable byte size mask from the array. */
410                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
411                                            field->offset, field->size);
412                 if (!mask) {
413                         ++field;
414                         continue;
415                 }
416                 /* Deduce actual data width in bits from mask value. */
417                 off_b = rte_bsf32(mask);
418                 size_b = sizeof(uint32_t) * CHAR_BIT -
419                          off_b - __builtin_clz(mask);
420                 MLX5_ASSERT(size_b);
421                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
422                 actions[i] = (struct mlx5_modification_cmd) {
423                         .action_type = type,
424                         .field = field->id,
425                         .offset = off_b,
426                         .length = size_b,
427                 };
428                 /* Convert entire record to expected big-endian format. */
429                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
430                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
431                         MLX5_ASSERT(dcopy);
432                         actions[i].dst_field = dcopy->id;
433                         actions[i].dst_offset =
434                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
435                         /* Convert entire record to big-endian format. */
436                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
437                         ++dcopy;
438                 } else {
439                         MLX5_ASSERT(item->spec);
440                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
441                                                    field->offset, field->size);
442                         /* Shift out the trailing masked bits from data. */
443                         data = (data & mask) >> off_b;
444                         actions[i].data1 = rte_cpu_to_be_32(data);
445                 }
446                 ++i;
447                 ++field;
448         } while (field->size);
449         if (resource->actions_num == i)
450                 return rte_flow_error_set(error, EINVAL,
451                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
452                                           "invalid modification flow item");
453         resource->actions_num = i;
454         return 0;
455 }
456
457 /**
458  * Convert modify-header set IPv4 address action to DV specification.
459  *
460  * @param[in,out] resource
461  *   Pointer to the modify-header resource.
462  * @param[in] action
463  *   Pointer to action specification.
464  * @param[out] error
465  *   Pointer to the error structure.
466  *
467  * @return
468  *   0 on success, a negative errno value otherwise and rte_errno is set.
469  */
470 static int
471 flow_dv_convert_action_modify_ipv4
472                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
473                          const struct rte_flow_action *action,
474                          struct rte_flow_error *error)
475 {
476         const struct rte_flow_action_set_ipv4 *conf =
477                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
478         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
479         struct rte_flow_item_ipv4 ipv4;
480         struct rte_flow_item_ipv4 ipv4_mask;
481
482         memset(&ipv4, 0, sizeof(ipv4));
483         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
484         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
485                 ipv4.hdr.src_addr = conf->ipv4_addr;
486                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
487         } else {
488                 ipv4.hdr.dst_addr = conf->ipv4_addr;
489                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
490         }
491         item.spec = &ipv4;
492         item.mask = &ipv4_mask;
493         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
494                                              MLX5_MODIFICATION_TYPE_SET, error);
495 }
496
497 /**
498  * Convert modify-header set IPv6 address action to DV specification.
499  *
500  * @param[in,out] resource
501  *   Pointer to the modify-header resource.
502  * @param[in] action
503  *   Pointer to action specification.
504  * @param[out] error
505  *   Pointer to the error structure.
506  *
507  * @return
508  *   0 on success, a negative errno value otherwise and rte_errno is set.
509  */
510 static int
511 flow_dv_convert_action_modify_ipv6
512                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
513                          const struct rte_flow_action *action,
514                          struct rte_flow_error *error)
515 {
516         const struct rte_flow_action_set_ipv6 *conf =
517                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
518         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
519         struct rte_flow_item_ipv6 ipv6;
520         struct rte_flow_item_ipv6 ipv6_mask;
521
522         memset(&ipv6, 0, sizeof(ipv6));
523         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
524         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
525                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
526                        sizeof(ipv6.hdr.src_addr));
527                 memcpy(&ipv6_mask.hdr.src_addr,
528                        &rte_flow_item_ipv6_mask.hdr.src_addr,
529                        sizeof(ipv6.hdr.src_addr));
530         } else {
531                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
532                        sizeof(ipv6.hdr.dst_addr));
533                 memcpy(&ipv6_mask.hdr.dst_addr,
534                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
535                        sizeof(ipv6.hdr.dst_addr));
536         }
537         item.spec = &ipv6;
538         item.mask = &ipv6_mask;
539         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
540                                              MLX5_MODIFICATION_TYPE_SET, error);
541 }
542
543 /**
544  * Convert modify-header set MAC address action to DV specification.
545  *
546  * @param[in,out] resource
547  *   Pointer to the modify-header resource.
548  * @param[in] action
549  *   Pointer to action specification.
550  * @param[out] error
551  *   Pointer to the error structure.
552  *
553  * @return
554  *   0 on success, a negative errno value otherwise and rte_errno is set.
555  */
556 static int
557 flow_dv_convert_action_modify_mac
558                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
559                          const struct rte_flow_action *action,
560                          struct rte_flow_error *error)
561 {
562         const struct rte_flow_action_set_mac *conf =
563                 (const struct rte_flow_action_set_mac *)(action->conf);
564         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
565         struct rte_flow_item_eth eth;
566         struct rte_flow_item_eth eth_mask;
567
568         memset(&eth, 0, sizeof(eth));
569         memset(&eth_mask, 0, sizeof(eth_mask));
570         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
571                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
572                        sizeof(eth.src.addr_bytes));
573                 memcpy(&eth_mask.src.addr_bytes,
574                        &rte_flow_item_eth_mask.src.addr_bytes,
575                        sizeof(eth_mask.src.addr_bytes));
576         } else {
577                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
578                        sizeof(eth.dst.addr_bytes));
579                 memcpy(&eth_mask.dst.addr_bytes,
580                        &rte_flow_item_eth_mask.dst.addr_bytes,
581                        sizeof(eth_mask.dst.addr_bytes));
582         }
583         item.spec = &eth;
584         item.mask = &eth_mask;
585         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
586                                              MLX5_MODIFICATION_TYPE_SET, error);
587 }
588
589 /**
590  * Convert modify-header set VLAN VID action to DV specification.
591  *
592  * @param[in,out] resource
593  *   Pointer to the modify-header resource.
594  * @param[in] action
595  *   Pointer to action specification.
596  * @param[out] error
597  *   Pointer to the error structure.
598  *
599  * @return
600  *   0 on success, a negative errno value otherwise and rte_errno is set.
601  */
602 static int
603 flow_dv_convert_action_modify_vlan_vid
604                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
605                          const struct rte_flow_action *action,
606                          struct rte_flow_error *error)
607 {
608         const struct rte_flow_action_of_set_vlan_vid *conf =
609                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
610         int i = resource->actions_num;
611         struct mlx5_modification_cmd *actions = resource->actions;
612         struct field_modify_info *field = modify_vlan_out_first_vid;
613
614         if (i >= MLX5_MAX_MODIFY_NUM)
615                 return rte_flow_error_set(error, EINVAL,
616                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
617                          "too many items to modify");
618         actions[i] = (struct mlx5_modification_cmd) {
619                 .action_type = MLX5_MODIFICATION_TYPE_SET,
620                 .field = field->id,
621                 .length = field->size,
622                 .offset = field->offset,
623         };
624         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
625         actions[i].data1 = conf->vlan_vid;
626         actions[i].data1 = actions[i].data1 << 16;
627         resource->actions_num = ++i;
628         return 0;
629 }
630
631 /**
632  * Convert modify-header set TP action to DV specification.
633  *
634  * @param[in,out] resource
635  *   Pointer to the modify-header resource.
636  * @param[in] action
637  *   Pointer to action specification.
638  * @param[in] items
639  *   Pointer to rte_flow_item objects list.
640  * @param[in] attr
641  *   Pointer to flow attributes structure.
642  * @param[in] dev_flow
643  *   Pointer to the sub flow.
644  * @param[in] tunnel_decap
645  *   Whether action is after tunnel decapsulation.
646  * @param[out] error
647  *   Pointer to the error structure.
648  *
649  * @return
650  *   0 on success, a negative errno value otherwise and rte_errno is set.
651  */
652 static int
653 flow_dv_convert_action_modify_tp
654                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
655                          const struct rte_flow_action *action,
656                          const struct rte_flow_item *items,
657                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
658                          bool tunnel_decap, struct rte_flow_error *error)
659 {
660         const struct rte_flow_action_set_tp *conf =
661                 (const struct rte_flow_action_set_tp *)(action->conf);
662         struct rte_flow_item item;
663         struct rte_flow_item_udp udp;
664         struct rte_flow_item_udp udp_mask;
665         struct rte_flow_item_tcp tcp;
666         struct rte_flow_item_tcp tcp_mask;
667         struct field_modify_info *field;
668
669         if (!attr->valid)
670                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
671         if (attr->udp) {
672                 memset(&udp, 0, sizeof(udp));
673                 memset(&udp_mask, 0, sizeof(udp_mask));
674                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
675                         udp.hdr.src_port = conf->port;
676                         udp_mask.hdr.src_port =
677                                         rte_flow_item_udp_mask.hdr.src_port;
678                 } else {
679                         udp.hdr.dst_port = conf->port;
680                         udp_mask.hdr.dst_port =
681                                         rte_flow_item_udp_mask.hdr.dst_port;
682                 }
683                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
684                 item.spec = &udp;
685                 item.mask = &udp_mask;
686                 field = modify_udp;
687         } else {
688                 MLX5_ASSERT(attr->tcp);
689                 memset(&tcp, 0, sizeof(tcp));
690                 memset(&tcp_mask, 0, sizeof(tcp_mask));
691                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
692                         tcp.hdr.src_port = conf->port;
693                         tcp_mask.hdr.src_port =
694                                         rte_flow_item_tcp_mask.hdr.src_port;
695                 } else {
696                         tcp.hdr.dst_port = conf->port;
697                         tcp_mask.hdr.dst_port =
698                                         rte_flow_item_tcp_mask.hdr.dst_port;
699                 }
700                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
701                 item.spec = &tcp;
702                 item.mask = &tcp_mask;
703                 field = modify_tcp;
704         }
705         return flow_dv_convert_modify_action(&item, field, NULL, resource,
706                                              MLX5_MODIFICATION_TYPE_SET, error);
707 }
708
709 /**
710  * Convert modify-header set TTL action to DV specification.
711  *
712  * @param[in,out] resource
713  *   Pointer to the modify-header resource.
714  * @param[in] action
715  *   Pointer to action specification.
716  * @param[in] items
717  *   Pointer to rte_flow_item objects list.
718  * @param[in] attr
719  *   Pointer to flow attributes structure.
720  * @param[in] dev_flow
721  *   Pointer to the sub flow.
722  * @param[in] tunnel_decap
723  *   Whether action is after tunnel decapsulation.
724  * @param[out] error
725  *   Pointer to the error structure.
726  *
727  * @return
728  *   0 on success, a negative errno value otherwise and rte_errno is set.
729  */
730 static int
731 flow_dv_convert_action_modify_ttl
732                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
733                          const struct rte_flow_action *action,
734                          const struct rte_flow_item *items,
735                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
736                          bool tunnel_decap, struct rte_flow_error *error)
737 {
738         const struct rte_flow_action_set_ttl *conf =
739                 (const struct rte_flow_action_set_ttl *)(action->conf);
740         struct rte_flow_item item;
741         struct rte_flow_item_ipv4 ipv4;
742         struct rte_flow_item_ipv4 ipv4_mask;
743         struct rte_flow_item_ipv6 ipv6;
744         struct rte_flow_item_ipv6 ipv6_mask;
745         struct field_modify_info *field;
746
747         if (!attr->valid)
748                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
749         if (attr->ipv4) {
750                 memset(&ipv4, 0, sizeof(ipv4));
751                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
752                 ipv4.hdr.time_to_live = conf->ttl_value;
753                 ipv4_mask.hdr.time_to_live = 0xFF;
754                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
755                 item.spec = &ipv4;
756                 item.mask = &ipv4_mask;
757                 field = modify_ipv4;
758         } else {
759                 MLX5_ASSERT(attr->ipv6);
760                 memset(&ipv6, 0, sizeof(ipv6));
761                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
762                 ipv6.hdr.hop_limits = conf->ttl_value;
763                 ipv6_mask.hdr.hop_limits = 0xFF;
764                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
765                 item.spec = &ipv6;
766                 item.mask = &ipv6_mask;
767                 field = modify_ipv6;
768         }
769         return flow_dv_convert_modify_action(&item, field, NULL, resource,
770                                              MLX5_MODIFICATION_TYPE_SET, error);
771 }
772
773 /**
774  * Convert modify-header decrement TTL action to DV specification.
775  *
776  * @param[in,out] resource
777  *   Pointer to the modify-header resource.
778  * @param[in] action
779  *   Pointer to action specification.
780  * @param[in] items
781  *   Pointer to rte_flow_item objects list.
782  * @param[in] attr
783  *   Pointer to flow attributes structure.
784  * @param[in] dev_flow
785  *   Pointer to the sub flow.
786  * @param[in] tunnel_decap
787  *   Whether action is after tunnel decapsulation.
788  * @param[out] error
789  *   Pointer to the error structure.
790  *
791  * @return
792  *   0 on success, a negative errno value otherwise and rte_errno is set.
793  */
794 static int
795 flow_dv_convert_action_modify_dec_ttl
796                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
797                          const struct rte_flow_item *items,
798                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
799                          bool tunnel_decap, struct rte_flow_error *error)
800 {
801         struct rte_flow_item item;
802         struct rte_flow_item_ipv4 ipv4;
803         struct rte_flow_item_ipv4 ipv4_mask;
804         struct rte_flow_item_ipv6 ipv6;
805         struct rte_flow_item_ipv6 ipv6_mask;
806         struct field_modify_info *field;
807
808         if (!attr->valid)
809                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
810         if (attr->ipv4) {
811                 memset(&ipv4, 0, sizeof(ipv4));
812                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
813                 ipv4.hdr.time_to_live = 0xFF;
814                 ipv4_mask.hdr.time_to_live = 0xFF;
815                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
816                 item.spec = &ipv4;
817                 item.mask = &ipv4_mask;
818                 field = modify_ipv4;
819         } else {
820                 MLX5_ASSERT(attr->ipv6);
821                 memset(&ipv6, 0, sizeof(ipv6));
822                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
823                 ipv6.hdr.hop_limits = 0xFF;
824                 ipv6_mask.hdr.hop_limits = 0xFF;
825                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
826                 item.spec = &ipv6;
827                 item.mask = &ipv6_mask;
828                 field = modify_ipv6;
829         }
830         return flow_dv_convert_modify_action(&item, field, NULL, resource,
831                                              MLX5_MODIFICATION_TYPE_ADD, error);
832 }
833
834 /**
835  * Convert modify-header increment/decrement TCP Sequence number
836  * to DV specification.
837  *
838  * @param[in,out] resource
839  *   Pointer to the modify-header resource.
840  * @param[in] action
841  *   Pointer to action specification.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_tcp_seq
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_action *action,
852                          struct rte_flow_error *error)
853 {
854         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
855         uint64_t value = rte_be_to_cpu_32(*conf);
856         struct rte_flow_item item;
857         struct rte_flow_item_tcp tcp;
858         struct rte_flow_item_tcp tcp_mask;
859
860         memset(&tcp, 0, sizeof(tcp));
861         memset(&tcp_mask, 0, sizeof(tcp_mask));
862         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
863                 /*
864                  * The HW has no decrement operation, only increment operation.
865                  * To simulate decrement X from Y using increment operation
866                  * we need to add UINT32_MAX X times to Y.
867                  * Each adding of UINT32_MAX decrements Y by 1.
868                  */
869                 value *= UINT32_MAX;
870         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
871         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
872         item.type = RTE_FLOW_ITEM_TYPE_TCP;
873         item.spec = &tcp;
874         item.mask = &tcp_mask;
875         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
876                                              MLX5_MODIFICATION_TYPE_ADD, error);
877 }
878
879 /**
880  * Convert modify-header increment/decrement TCP Acknowledgment number
881  * to DV specification.
882  *
883  * @param[in,out] resource
884  *   Pointer to the modify-header resource.
885  * @param[in] action
886  *   Pointer to action specification.
887  * @param[out] error
888  *   Pointer to the error structure.
889  *
890  * @return
891  *   0 on success, a negative errno value otherwise and rte_errno is set.
892  */
893 static int
894 flow_dv_convert_action_modify_tcp_ack
895                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
896                          const struct rte_flow_action *action,
897                          struct rte_flow_error *error)
898 {
899         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
900         uint64_t value = rte_be_to_cpu_32(*conf);
901         struct rte_flow_item item;
902         struct rte_flow_item_tcp tcp;
903         struct rte_flow_item_tcp tcp_mask;
904
905         memset(&tcp, 0, sizeof(tcp));
906         memset(&tcp_mask, 0, sizeof(tcp_mask));
907         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
908                 /*
909                  * The HW has no decrement operation, only increment operation.
910                  * To simulate decrement X from Y using increment operation
911                  * we need to add UINT32_MAX X times to Y.
912                  * Each adding of UINT32_MAX decrements Y by 1.
913                  */
914                 value *= UINT32_MAX;
915         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
916         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
917         item.type = RTE_FLOW_ITEM_TYPE_TCP;
918         item.spec = &tcp;
919         item.mask = &tcp_mask;
920         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
921                                              MLX5_MODIFICATION_TYPE_ADD, error);
922 }
923
924 static enum mlx5_modification_field reg_to_field[] = {
925         [REG_NON] = MLX5_MODI_OUT_NONE,
926         [REG_A] = MLX5_MODI_META_DATA_REG_A,
927         [REG_B] = MLX5_MODI_META_DATA_REG_B,
928         [REG_C_0] = MLX5_MODI_META_REG_C_0,
929         [REG_C_1] = MLX5_MODI_META_REG_C_1,
930         [REG_C_2] = MLX5_MODI_META_REG_C_2,
931         [REG_C_3] = MLX5_MODI_META_REG_C_3,
932         [REG_C_4] = MLX5_MODI_META_REG_C_4,
933         [REG_C_5] = MLX5_MODI_META_REG_C_5,
934         [REG_C_6] = MLX5_MODI_META_REG_C_6,
935         [REG_C_7] = MLX5_MODI_META_REG_C_7,
936 };
937
938 /**
939  * Convert register set to DV specification.
940  *
941  * @param[in,out] resource
942  *   Pointer to the modify-header resource.
943  * @param[in] action
944  *   Pointer to action specification.
945  * @param[out] error
946  *   Pointer to the error structure.
947  *
948  * @return
949  *   0 on success, a negative errno value otherwise and rte_errno is set.
950  */
951 static int
952 flow_dv_convert_action_set_reg
953                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
954                          const struct rte_flow_action *action,
955                          struct rte_flow_error *error)
956 {
957         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
958         struct mlx5_modification_cmd *actions = resource->actions;
959         uint32_t i = resource->actions_num;
960
961         if (i >= MLX5_MAX_MODIFY_NUM)
962                 return rte_flow_error_set(error, EINVAL,
963                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
964                                           "too many items to modify");
965         MLX5_ASSERT(conf->id != REG_NON);
966         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
967         actions[i] = (struct mlx5_modification_cmd) {
968                 .action_type = MLX5_MODIFICATION_TYPE_SET,
969                 .field = reg_to_field[conf->id],
970                 .offset = conf->offset,
971                 .length = conf->length,
972         };
973         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
974         actions[i].data1 = rte_cpu_to_be_32(conf->data);
975         ++i;
976         resource->actions_num = i;
977         return 0;
978 }
979
980 /**
981  * Convert SET_TAG action to DV specification.
982  *
983  * @param[in] dev
984  *   Pointer to the rte_eth_dev structure.
985  * @param[in,out] resource
986  *   Pointer to the modify-header resource.
987  * @param[in] conf
988  *   Pointer to action specification.
989  * @param[out] error
990  *   Pointer to the error structure.
991  *
992  * @return
993  *   0 on success, a negative errno value otherwise and rte_errno is set.
994  */
995 static int
996 flow_dv_convert_action_set_tag
997                         (struct rte_eth_dev *dev,
998                          struct mlx5_flow_dv_modify_hdr_resource *resource,
999                          const struct rte_flow_action_set_tag *conf,
1000                          struct rte_flow_error *error)
1001 {
1002         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1003         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1004         struct rte_flow_item item = {
1005                 .spec = &data,
1006                 .mask = &mask,
1007         };
1008         struct field_modify_info reg_c_x[] = {
1009                 [1] = {0, 0, 0},
1010         };
1011         enum mlx5_modification_field reg_type;
1012         int ret;
1013
1014         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1015         if (ret < 0)
1016                 return ret;
1017         MLX5_ASSERT(ret != REG_NON);
1018         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1019         reg_type = reg_to_field[ret];
1020         MLX5_ASSERT(reg_type > 0);
1021         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1022         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1023                                              MLX5_MODIFICATION_TYPE_SET, error);
1024 }
1025
1026 /**
1027  * Convert internal COPY_REG action to DV specification.
1028  *
1029  * @param[in] dev
1030  *   Pointer to the rte_eth_dev structure.
1031  * @param[in,out] res
1032  *   Pointer to the modify-header resource.
1033  * @param[in] action
1034  *   Pointer to action specification.
1035  * @param[out] error
1036  *   Pointer to the error structure.
1037  *
1038  * @return
1039  *   0 on success, a negative errno value otherwise and rte_errno is set.
1040  */
1041 static int
1042 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1043                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1044                                  const struct rte_flow_action *action,
1045                                  struct rte_flow_error *error)
1046 {
1047         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1048         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1049         struct rte_flow_item item = {
1050                 .spec = NULL,
1051                 .mask = &mask,
1052         };
1053         struct field_modify_info reg_src[] = {
1054                 {4, 0, reg_to_field[conf->src]},
1055                 {0, 0, 0},
1056         };
1057         struct field_modify_info reg_dst = {
1058                 .offset = 0,
1059                 .id = reg_to_field[conf->dst],
1060         };
1061         /* Adjust reg_c[0] usage according to reported mask. */
1062         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1063                 struct mlx5_priv *priv = dev->data->dev_private;
1064                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1065
1066                 MLX5_ASSERT(reg_c0);
1067                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1068                 if (conf->dst == REG_C_0) {
1069                         /* Copy to reg_c[0], within mask only. */
1070                         reg_dst.offset = rte_bsf32(reg_c0);
1071                         /*
1072                          * Mask is ignoring the enianness, because
1073                          * there is no conversion in datapath.
1074                          */
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076                         /* Copy from destination lower bits to reg_c[0]. */
1077                         mask = reg_c0 >> reg_dst.offset;
1078 #else
1079                         /* Copy from destination upper bits to reg_c[0]. */
1080                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1081                                           rte_fls_u32(reg_c0));
1082 #endif
1083                 } else {
1084                         mask = rte_cpu_to_be_32(reg_c0);
1085 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1086                         /* Copy from reg_c[0] to destination lower bits. */
1087                         reg_dst.offset = 0;
1088 #else
1089                         /* Copy from reg_c[0] to destination upper bits. */
1090                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1091                                          (rte_fls_u32(reg_c0) -
1092                                           rte_bsf32(reg_c0));
1093 #endif
1094                 }
1095         }
1096         return flow_dv_convert_modify_action(&item,
1097                                              reg_src, &reg_dst, res,
1098                                              MLX5_MODIFICATION_TYPE_COPY,
1099                                              error);
1100 }
1101
1102 /**
1103  * Convert MARK action to DV specification. This routine is used
1104  * in extensive metadata only and requires metadata register to be
1105  * handled. In legacy mode hardware tag resource is engaged.
1106  *
1107  * @param[in] dev
1108  *   Pointer to the rte_eth_dev structure.
1109  * @param[in] conf
1110  *   Pointer to MARK action specification.
1111  * @param[in,out] resource
1112  *   Pointer to the modify-header resource.
1113  * @param[out] error
1114  *   Pointer to the error structure.
1115  *
1116  * @return
1117  *   0 on success, a negative errno value otherwise and rte_errno is set.
1118  */
1119 static int
1120 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1121                             const struct rte_flow_action_mark *conf,
1122                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1123                             struct rte_flow_error *error)
1124 {
1125         struct mlx5_priv *priv = dev->data->dev_private;
1126         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1127                                            priv->sh->dv_mark_mask);
1128         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1129         struct rte_flow_item item = {
1130                 .spec = &data,
1131                 .mask = &mask,
1132         };
1133         struct field_modify_info reg_c_x[] = {
1134                 [1] = {0, 0, 0},
1135         };
1136         int reg;
1137
1138         if (!mask)
1139                 return rte_flow_error_set(error, EINVAL,
1140                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1141                                           NULL, "zero mark action mask");
1142         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1143         if (reg < 0)
1144                 return reg;
1145         MLX5_ASSERT(reg > 0);
1146         if (reg == REG_C_0) {
1147                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1148                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1149
1150                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1151                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1152                 mask = rte_cpu_to_be_32(mask << shl_c0);
1153         }
1154         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1155         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1156                                              MLX5_MODIFICATION_TYPE_SET, error);
1157 }
1158
1159 /**
1160  * Get metadata register index for specified steering domain.
1161  *
1162  * @param[in] dev
1163  *   Pointer to the rte_eth_dev structure.
1164  * @param[in] attr
1165  *   Attributes of flow to determine steering domain.
1166  * @param[out] error
1167  *   Pointer to the error structure.
1168  *
1169  * @return
1170  *   positive index on success, a negative errno value otherwise
1171  *   and rte_errno is set.
1172  */
1173 static enum modify_reg
1174 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1175                          const struct rte_flow_attr *attr,
1176                          struct rte_flow_error *error)
1177 {
1178         int reg =
1179                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1180                                           MLX5_METADATA_FDB :
1181                                             attr->egress ?
1182                                             MLX5_METADATA_TX :
1183                                             MLX5_METADATA_RX, 0, error);
1184         if (reg < 0)
1185                 return rte_flow_error_set(error,
1186                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1187                                           NULL, "unavailable "
1188                                           "metadata register");
1189         return reg;
1190 }
1191
1192 /**
1193  * Convert SET_META action to DV specification.
1194  *
1195  * @param[in] dev
1196  *   Pointer to the rte_eth_dev structure.
1197  * @param[in,out] resource
1198  *   Pointer to the modify-header resource.
1199  * @param[in] attr
1200  *   Attributes of flow that includes this item.
1201  * @param[in] conf
1202  *   Pointer to action specification.
1203  * @param[out] error
1204  *   Pointer to the error structure.
1205  *
1206  * @return
1207  *   0 on success, a negative errno value otherwise and rte_errno is set.
1208  */
1209 static int
1210 flow_dv_convert_action_set_meta
1211                         (struct rte_eth_dev *dev,
1212                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1213                          const struct rte_flow_attr *attr,
1214                          const struct rte_flow_action_set_meta *conf,
1215                          struct rte_flow_error *error)
1216 {
1217         uint32_t data = conf->data;
1218         uint32_t mask = conf->mask;
1219         struct rte_flow_item item = {
1220                 .spec = &data,
1221                 .mask = &mask,
1222         };
1223         struct field_modify_info reg_c_x[] = {
1224                 [1] = {0, 0, 0},
1225         };
1226         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1227
1228         if (reg < 0)
1229                 return reg;
1230         MLX5_ASSERT(reg != REG_NON);
1231         /*
1232          * In datapath code there is no endianness
1233          * coversions for perfromance reasons, all
1234          * pattern conversions are done in rte_flow.
1235          */
1236         if (reg == REG_C_0) {
1237                 struct mlx5_priv *priv = dev->data->dev_private;
1238                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1239                 uint32_t shl_c0;
1240
1241                 MLX5_ASSERT(msk_c0);
1242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1243                 shl_c0 = rte_bsf32(msk_c0);
1244 #else
1245                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1246 #endif
1247                 mask <<= shl_c0;
1248                 data <<= shl_c0;
1249                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1250         }
1251         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1252         /* The routine expects parameters in memory as big-endian ones. */
1253         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1254                                              MLX5_MODIFICATION_TYPE_SET, error);
1255 }
1256
1257 /**
1258  * Convert modify-header set IPv4 DSCP action to DV specification.
1259  *
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] action
1263  *   Pointer to action specification.
1264  * @param[out] error
1265  *   Pointer to the error structure.
1266  *
1267  * @return
1268  *   0 on success, a negative errno value otherwise and rte_errno is set.
1269  */
1270 static int
1271 flow_dv_convert_action_modify_ipv4_dscp
1272                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1273                          const struct rte_flow_action *action,
1274                          struct rte_flow_error *error)
1275 {
1276         const struct rte_flow_action_set_dscp *conf =
1277                 (const struct rte_flow_action_set_dscp *)(action->conf);
1278         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1279         struct rte_flow_item_ipv4 ipv4;
1280         struct rte_flow_item_ipv4 ipv4_mask;
1281
1282         memset(&ipv4, 0, sizeof(ipv4));
1283         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1284         ipv4.hdr.type_of_service = conf->dscp;
1285         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1286         item.spec = &ipv4;
1287         item.mask = &ipv4_mask;
1288         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1289                                              MLX5_MODIFICATION_TYPE_SET, error);
1290 }
1291
1292 /**
1293  * Convert modify-header set IPv6 DSCP action to DV specification.
1294  *
1295  * @param[in,out] resource
1296  *   Pointer to the modify-header resource.
1297  * @param[in] action
1298  *   Pointer to action specification.
1299  * @param[out] error
1300  *   Pointer to the error structure.
1301  *
1302  * @return
1303  *   0 on success, a negative errno value otherwise and rte_errno is set.
1304  */
1305 static int
1306 flow_dv_convert_action_modify_ipv6_dscp
1307                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1308                          const struct rte_flow_action *action,
1309                          struct rte_flow_error *error)
1310 {
1311         const struct rte_flow_action_set_dscp *conf =
1312                 (const struct rte_flow_action_set_dscp *)(action->conf);
1313         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1314         struct rte_flow_item_ipv6 ipv6;
1315         struct rte_flow_item_ipv6 ipv6_mask;
1316
1317         memset(&ipv6, 0, sizeof(ipv6));
1318         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1319         /*
1320          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1321          * rdma-core only accept the DSCP bits byte aligned start from
1322          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1323          * bits in IPv6 case as rdma-core requires byte aligned value.
1324          */
1325         ipv6.hdr.vtc_flow = conf->dscp;
1326         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1327         item.spec = &ipv6;
1328         item.mask = &ipv6_mask;
1329         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1330                                              MLX5_MODIFICATION_TYPE_SET, error);
1331 }
1332
1333 static int
1334 mlx5_flow_item_field_width(enum rte_flow_field_id field)
1335 {
1336         switch (field) {
1337         case RTE_FLOW_FIELD_START:
1338                 return 32;
1339         case RTE_FLOW_FIELD_MAC_DST:
1340         case RTE_FLOW_FIELD_MAC_SRC:
1341                 return 48;
1342         case RTE_FLOW_FIELD_VLAN_TYPE:
1343                 return 16;
1344         case RTE_FLOW_FIELD_VLAN_ID:
1345                 return 12;
1346         case RTE_FLOW_FIELD_MAC_TYPE:
1347                 return 16;
1348         case RTE_FLOW_FIELD_IPV4_DSCP:
1349                 return 6;
1350         case RTE_FLOW_FIELD_IPV4_TTL:
1351                 return 8;
1352         case RTE_FLOW_FIELD_IPV4_SRC:
1353         case RTE_FLOW_FIELD_IPV4_DST:
1354                 return 32;
1355         case RTE_FLOW_FIELD_IPV6_DSCP:
1356                 return 6;
1357         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1358                 return 8;
1359         case RTE_FLOW_FIELD_IPV6_SRC:
1360         case RTE_FLOW_FIELD_IPV6_DST:
1361                 return 128;
1362         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1363         case RTE_FLOW_FIELD_TCP_PORT_DST:
1364                 return 16;
1365         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1366         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1367                 return 32;
1368         case RTE_FLOW_FIELD_TCP_FLAGS:
1369                 return 6;
1370         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1371         case RTE_FLOW_FIELD_UDP_PORT_DST:
1372                 return 16;
1373         case RTE_FLOW_FIELD_VXLAN_VNI:
1374         case RTE_FLOW_FIELD_GENEVE_VNI:
1375                 return 24;
1376         case RTE_FLOW_FIELD_GTP_TEID:
1377         case RTE_FLOW_FIELD_TAG:
1378                 return 32;
1379         case RTE_FLOW_FIELD_MARK:
1380                 return 24;
1381         case RTE_FLOW_FIELD_META:
1382                 return 32;
1383         case RTE_FLOW_FIELD_POINTER:
1384         case RTE_FLOW_FIELD_VALUE:
1385                 return 64;
1386         default:
1387                 MLX5_ASSERT(false);
1388         }
1389         return 0;
1390 }
1391
1392 static void
1393 mlx5_flow_field_id_to_modify_info
1394                 (const struct rte_flow_action_modify_data *data,
1395                  struct field_modify_info *info,
1396                  uint32_t *mask, uint32_t *value,
1397                  uint32_t width, uint32_t dst_width,
1398                  struct rte_eth_dev *dev,
1399                  const struct rte_flow_attr *attr,
1400                  struct rte_flow_error *error)
1401 {
1402         uint32_t idx = 0;
1403         uint64_t val = 0;
1404         switch (data->field) {
1405         case RTE_FLOW_FIELD_START:
1406                 /* not supported yet */
1407                 MLX5_ASSERT(false);
1408                 break;
1409         case RTE_FLOW_FIELD_MAC_DST:
1410                 if (mask) {
1411                         if (data->offset < 32) {
1412                                 info[idx] = (struct field_modify_info){4, 0,
1413                                                 MLX5_MODI_OUT_DMAC_47_16};
1414                                 if (width < 32) {
1415                                         mask[idx] =
1416                                                 rte_cpu_to_be_32(0xffffffff >>
1417                                                                  (32 - width));
1418                                         width = 0;
1419                                 } else {
1420                                         mask[idx] = RTE_BE32(0xffffffff);
1421                                         width -= 32;
1422                                 }
1423                                 if (!width)
1424                                         break;
1425                                 ++idx;
1426                         }
1427                         info[idx] = (struct field_modify_info){2, 4 * idx,
1428                                                 MLX5_MODI_OUT_DMAC_15_0};
1429                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1430                 } else {
1431                         if (data->offset < 32)
1432                                 info[idx++] = (struct field_modify_info){4, 0,
1433                                                 MLX5_MODI_OUT_DMAC_47_16};
1434                         info[idx] = (struct field_modify_info){2, 0,
1435                                                 MLX5_MODI_OUT_DMAC_15_0};
1436                 }
1437                 break;
1438         case RTE_FLOW_FIELD_MAC_SRC:
1439                 if (mask) {
1440                         if (data->offset < 32) {
1441                                 info[idx] = (struct field_modify_info){4, 0,
1442                                                 MLX5_MODI_OUT_SMAC_47_16};
1443                                 if (width < 32) {
1444                                         mask[idx] =
1445                                                 rte_cpu_to_be_32(0xffffffff >>
1446                                                                 (32 - width));
1447                                         width = 0;
1448                                 } else {
1449                                         mask[idx] = RTE_BE32(0xffffffff);
1450                                         width -= 32;
1451                                 }
1452                                 if (!width)
1453                                         break;
1454                                 ++idx;
1455                         }
1456                         info[idx] = (struct field_modify_info){2, 4 * idx,
1457                                                 MLX5_MODI_OUT_SMAC_15_0};
1458                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1459                 } else {
1460                         if (data->offset < 32)
1461                                 info[idx++] = (struct field_modify_info){4, 0,
1462                                                 MLX5_MODI_OUT_SMAC_47_16};
1463                         info[idx] = (struct field_modify_info){2, 0,
1464                                                 MLX5_MODI_OUT_SMAC_15_0};
1465                 }
1466                 break;
1467         case RTE_FLOW_FIELD_VLAN_TYPE:
1468                 /* not supported yet */
1469                 break;
1470         case RTE_FLOW_FIELD_VLAN_ID:
1471                 info[idx] = (struct field_modify_info){2, 0,
1472                                         MLX5_MODI_OUT_FIRST_VID};
1473                 if (mask)
1474                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1475                 break;
1476         case RTE_FLOW_FIELD_MAC_TYPE:
1477                 info[idx] = (struct field_modify_info){2, 0,
1478                                         MLX5_MODI_OUT_ETHERTYPE};
1479                 if (mask)
1480                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1481                 break;
1482         case RTE_FLOW_FIELD_IPV4_DSCP:
1483                 info[idx] = (struct field_modify_info){1, 0,
1484                                         MLX5_MODI_OUT_IP_DSCP};
1485                 if (mask)
1486                         mask[idx] = 0x3f >> (6 - width);
1487                 break;
1488         case RTE_FLOW_FIELD_IPV4_TTL:
1489                 info[idx] = (struct field_modify_info){1, 0,
1490                                         MLX5_MODI_OUT_IPV4_TTL};
1491                 if (mask)
1492                         mask[idx] = 0xff >> (8 - width);
1493                 break;
1494         case RTE_FLOW_FIELD_IPV4_SRC:
1495                 info[idx] = (struct field_modify_info){4, 0,
1496                                         MLX5_MODI_OUT_SIPV4};
1497                 if (mask)
1498                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1499                                                      (32 - width));
1500                 break;
1501         case RTE_FLOW_FIELD_IPV4_DST:
1502                 info[idx] = (struct field_modify_info){4, 0,
1503                                         MLX5_MODI_OUT_DIPV4};
1504                 if (mask)
1505                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1506                                                      (32 - width));
1507                 break;
1508         case RTE_FLOW_FIELD_IPV6_DSCP:
1509                 info[idx] = (struct field_modify_info){1, 0,
1510                                         MLX5_MODI_OUT_IP_DSCP};
1511                 if (mask)
1512                         mask[idx] = 0x3f >> (6 - width);
1513                 break;
1514         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1515                 info[idx] = (struct field_modify_info){1, 0,
1516                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1517                 if (mask)
1518                         mask[idx] = 0xff >> (8 - width);
1519                 break;
1520         case RTE_FLOW_FIELD_IPV6_SRC:
1521                 if (mask) {
1522                         if (data->offset < 32) {
1523                                 info[idx] = (struct field_modify_info){4,
1524                                                 4 * idx,
1525                                                 MLX5_MODI_OUT_SIPV6_31_0};
1526                                 if (width < 32) {
1527                                         mask[idx] =
1528                                                 rte_cpu_to_be_32(0xffffffff >>
1529                                                                  (32 - width));
1530                                         width = 0;
1531                                 } else {
1532                                         mask[idx] = RTE_BE32(0xffffffff);
1533                                         width -= 32;
1534                                 }
1535                                 if (!width)
1536                                         break;
1537                                 ++idx;
1538                         }
1539                         if (data->offset < 64) {
1540                                 info[idx] = (struct field_modify_info){4,
1541                                                 4 * idx,
1542                                                 MLX5_MODI_OUT_SIPV6_63_32};
1543                                 if (width < 32) {
1544                                         mask[idx] =
1545                                                 rte_cpu_to_be_32(0xffffffff >>
1546                                                                  (32 - width));
1547                                         width = 0;
1548                                 } else {
1549                                         mask[idx] = RTE_BE32(0xffffffff);
1550                                         width -= 32;
1551                                 }
1552                                 if (!width)
1553                                         break;
1554                                 ++idx;
1555                         }
1556                         if (data->offset < 96) {
1557                                 info[idx] = (struct field_modify_info){4,
1558                                                 4 * idx,
1559                                                 MLX5_MODI_OUT_SIPV6_95_64};
1560                                 if (width < 32) {
1561                                         mask[idx] =
1562                                                 rte_cpu_to_be_32(0xffffffff >>
1563                                                                  (32 - width));
1564                                         width = 0;
1565                                 } else {
1566                                         mask[idx] = RTE_BE32(0xffffffff);
1567                                         width -= 32;
1568                                 }
1569                                 if (!width)
1570                                         break;
1571                                 ++idx;
1572                         }
1573                         info[idx] = (struct field_modify_info){4, 4 * idx,
1574                                                 MLX5_MODI_OUT_SIPV6_127_96};
1575                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1576                                                      (32 - width));
1577                 } else {
1578                         if (data->offset < 32)
1579                                 info[idx++] = (struct field_modify_info){4, 0,
1580                                                 MLX5_MODI_OUT_SIPV6_31_0};
1581                         if (data->offset < 64)
1582                                 info[idx++] = (struct field_modify_info){4, 0,
1583                                                 MLX5_MODI_OUT_SIPV6_63_32};
1584                         if (data->offset < 96)
1585                                 info[idx++] = (struct field_modify_info){4, 0,
1586                                                 MLX5_MODI_OUT_SIPV6_95_64};
1587                         if (data->offset < 128)
1588                                 info[idx++] = (struct field_modify_info){4, 0,
1589                                                 MLX5_MODI_OUT_SIPV6_127_96};
1590                 }
1591                 break;
1592         case RTE_FLOW_FIELD_IPV6_DST:
1593                 if (mask) {
1594                         if (data->offset < 32) {
1595                                 info[idx] = (struct field_modify_info){4,
1596                                                 4 * idx,
1597                                                 MLX5_MODI_OUT_DIPV6_31_0};
1598                                 if (width < 32) {
1599                                         mask[idx] =
1600                                                 rte_cpu_to_be_32(0xffffffff >>
1601                                                                  (32 - width));
1602                                         width = 0;
1603                                 } else {
1604                                         mask[idx] = RTE_BE32(0xffffffff);
1605                                         width -= 32;
1606                                 }
1607                                 if (!width)
1608                                         break;
1609                                 ++idx;
1610                         }
1611                         if (data->offset < 64) {
1612                                 info[idx] = (struct field_modify_info){4,
1613                                                 4 * idx,
1614                                                 MLX5_MODI_OUT_DIPV6_63_32};
1615                                 if (width < 32) {
1616                                         mask[idx] =
1617                                                 rte_cpu_to_be_32(0xffffffff >>
1618                                                                  (32 - width));
1619                                         width = 0;
1620                                 } else {
1621                                         mask[idx] = RTE_BE32(0xffffffff);
1622                                         width -= 32;
1623                                 }
1624                                 if (!width)
1625                                         break;
1626                                 ++idx;
1627                         }
1628                         if (data->offset < 96) {
1629                                 info[idx] = (struct field_modify_info){4,
1630                                                 4 * idx,
1631                                                 MLX5_MODI_OUT_DIPV6_95_64};
1632                                 if (width < 32) {
1633                                         mask[idx] =
1634                                                 rte_cpu_to_be_32(0xffffffff >>
1635                                                                  (32 - width));
1636                                         width = 0;
1637                                 } else {
1638                                         mask[idx] = RTE_BE32(0xffffffff);
1639                                         width -= 32;
1640                                 }
1641                                 if (!width)
1642                                         break;
1643                                 ++idx;
1644                         }
1645                         info[idx] = (struct field_modify_info){4, 4 * idx,
1646                                                 MLX5_MODI_OUT_DIPV6_127_96};
1647                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1648                                                      (32 - width));
1649                 } else {
1650                         if (data->offset < 32)
1651                                 info[idx++] = (struct field_modify_info){4, 0,
1652                                                 MLX5_MODI_OUT_DIPV6_31_0};
1653                         if (data->offset < 64)
1654                                 info[idx++] = (struct field_modify_info){4, 0,
1655                                                 MLX5_MODI_OUT_DIPV6_63_32};
1656                         if (data->offset < 96)
1657                                 info[idx++] = (struct field_modify_info){4, 0,
1658                                                 MLX5_MODI_OUT_DIPV6_95_64};
1659                         if (data->offset < 128)
1660                                 info[idx++] = (struct field_modify_info){4, 0,
1661                                                 MLX5_MODI_OUT_DIPV6_127_96};
1662                 }
1663                 break;
1664         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1665                 info[idx] = (struct field_modify_info){2, 0,
1666                                         MLX5_MODI_OUT_TCP_SPORT};
1667                 if (mask)
1668                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1669                 break;
1670         case RTE_FLOW_FIELD_TCP_PORT_DST:
1671                 info[idx] = (struct field_modify_info){2, 0,
1672                                         MLX5_MODI_OUT_TCP_DPORT};
1673                 if (mask)
1674                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1675                 break;
1676         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1677                 info[idx] = (struct field_modify_info){4, 0,
1678                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1679                 if (mask)
1680                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1681                                                      (32 - width));
1682                 break;
1683         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1684                 info[idx] = (struct field_modify_info){4, 0,
1685                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1686                 if (mask)
1687                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1688                                                      (32 - width));
1689                 break;
1690         case RTE_FLOW_FIELD_TCP_FLAGS:
1691                 info[idx] = (struct field_modify_info){1, 0,
1692                                         MLX5_MODI_OUT_TCP_FLAGS};
1693                 if (mask)
1694                         mask[idx] = 0x3f >> (6 - width);
1695                 break;
1696         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1697                 info[idx] = (struct field_modify_info){2, 0,
1698                                         MLX5_MODI_OUT_UDP_SPORT};
1699                 if (mask)
1700                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1701                 break;
1702         case RTE_FLOW_FIELD_UDP_PORT_DST:
1703                 info[idx] = (struct field_modify_info){2, 0,
1704                                         MLX5_MODI_OUT_UDP_DPORT};
1705                 if (mask)
1706                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1707                 break;
1708         case RTE_FLOW_FIELD_VXLAN_VNI:
1709                 /* not supported yet */
1710                 break;
1711         case RTE_FLOW_FIELD_GENEVE_VNI:
1712                 /* not supported yet*/
1713                 break;
1714         case RTE_FLOW_FIELD_GTP_TEID:
1715                 info[idx] = (struct field_modify_info){4, 0,
1716                                         MLX5_MODI_GTP_TEID};
1717                 if (mask)
1718                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1719                                                      (32 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TAG:
1722                 {
1723                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1724                                                    data->level, error);
1725                         if (reg < 0)
1726                                 return;
1727                         MLX5_ASSERT(reg != REG_NON);
1728                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1729                         info[idx] = (struct field_modify_info){4, 0,
1730                                                 reg_to_field[reg]};
1731                         if (mask)
1732                                 mask[idx] =
1733                                         rte_cpu_to_be_32(0xffffffff >>
1734                                                          (32 - width));
1735                 }
1736                 break;
1737         case RTE_FLOW_FIELD_MARK:
1738                 {
1739                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1740                                                        0, error);
1741                         if (reg < 0)
1742                                 return;
1743                         MLX5_ASSERT(reg != REG_NON);
1744                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1745                         info[idx] = (struct field_modify_info){4, 0,
1746                                                 reg_to_field[reg]};
1747                         if (mask)
1748                                 mask[idx] =
1749                                         rte_cpu_to_be_32(0xffffffff >>
1750                                                          (32 - width));
1751                 }
1752                 break;
1753         case RTE_FLOW_FIELD_META:
1754                 {
1755                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1756                         if (reg < 0)
1757                                 return;
1758                         MLX5_ASSERT(reg != REG_NON);
1759                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1760                         info[idx] = (struct field_modify_info){4, 0,
1761                                                 reg_to_field[reg]};
1762                         if (mask)
1763                                 mask[idx] =
1764                                         rte_cpu_to_be_32(0xffffffff >>
1765                                                          (32 - width));
1766                 }
1767                 break;
1768         case RTE_FLOW_FIELD_POINTER:
1769         case RTE_FLOW_FIELD_VALUE:
1770                 if (data->field == RTE_FLOW_FIELD_POINTER)
1771                         memcpy(&val, (void *)(uintptr_t)data->value,
1772                                sizeof(uint64_t));
1773                 else
1774                         val = data->value;
1775                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1776                         if (mask[idx]) {
1777                                 if (dst_width > 16) {
1778                                         value[idx] = rte_cpu_to_be_32(val);
1779                                         val >>= 32;
1780                                 } else if (dst_width > 8) {
1781                                         value[idx] = rte_cpu_to_be_16(val);
1782                                         val >>= 16;
1783                                 } else {
1784                                         value[idx] = (uint8_t)val;
1785                                         val >>= 8;
1786                                 }
1787                                 if (!val)
1788                                         break;
1789                         }
1790                 }
1791                 break;
1792         default:
1793                 MLX5_ASSERT(false);
1794                 break;
1795         }
1796 }
1797
1798 /**
1799  * Convert modify_field action to DV specification.
1800  *
1801  * @param[in] dev
1802  *   Pointer to the rte_eth_dev structure.
1803  * @param[in,out] resource
1804  *   Pointer to the modify-header resource.
1805  * @param[in] action
1806  *   Pointer to action specification.
1807  * @param[in] attr
1808  *   Attributes of flow that includes this item.
1809  * @param[out] error
1810  *   Pointer to the error structure.
1811  *
1812  * @return
1813  *   0 on success, a negative errno value otherwise and rte_errno is set.
1814  */
1815 static int
1816 flow_dv_convert_action_modify_field
1817                         (struct rte_eth_dev *dev,
1818                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1819                          const struct rte_flow_action *action,
1820                          const struct rte_flow_attr *attr,
1821                          struct rte_flow_error *error)
1822 {
1823         const struct rte_flow_action_modify_field *conf =
1824                 (const struct rte_flow_action_modify_field *)(action->conf);
1825         struct rte_flow_item item;
1826         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1827                                                                 {0, 0, 0} };
1828         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1829                                                                 {0, 0, 0} };
1830         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1831         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1832         uint32_t type;
1833         uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
1834
1835         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1836                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1837                 type = MLX5_MODIFICATION_TYPE_SET;
1838                 /** For SET fill the destination field (field) first. */
1839                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1840                         value, conf->width, dst_width, dev, attr, error);
1841                 /** Then copy immediate value from source as per mask. */
1842                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1843                         value, conf->width, dst_width, dev, attr, error);
1844                 item.spec = &value;
1845         } else {
1846                 type = MLX5_MODIFICATION_TYPE_COPY;
1847                 /** For COPY fill the destination field (dcopy) without mask. */
1848                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1849                         value, conf->width, dst_width, dev, attr, error);
1850                 /** Then construct the source field (field) with mask. */
1851                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1852                         value, conf->width, dst_width, dev, attr, error);
1853         }
1854         item.mask = &mask;
1855         return flow_dv_convert_modify_action(&item,
1856                         field, dcopy, resource, type, error);
1857 }
1858
1859 /**
1860  * Validate MARK item.
1861  *
1862  * @param[in] dev
1863  *   Pointer to the rte_eth_dev structure.
1864  * @param[in] item
1865  *   Item specification.
1866  * @param[in] attr
1867  *   Attributes of flow that includes this item.
1868  * @param[out] error
1869  *   Pointer to error structure.
1870  *
1871  * @return
1872  *   0 on success, a negative errno value otherwise and rte_errno is set.
1873  */
1874 static int
1875 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1876                            const struct rte_flow_item *item,
1877                            const struct rte_flow_attr *attr __rte_unused,
1878                            struct rte_flow_error *error)
1879 {
1880         struct mlx5_priv *priv = dev->data->dev_private;
1881         struct mlx5_dev_config *config = &priv->config;
1882         const struct rte_flow_item_mark *spec = item->spec;
1883         const struct rte_flow_item_mark *mask = item->mask;
1884         const struct rte_flow_item_mark nic_mask = {
1885                 .id = priv->sh->dv_mark_mask,
1886         };
1887         int ret;
1888
1889         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1890                 return rte_flow_error_set(error, ENOTSUP,
1891                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1892                                           "extended metadata feature"
1893                                           " isn't enabled");
1894         if (!mlx5_flow_ext_mreg_supported(dev))
1895                 return rte_flow_error_set(error, ENOTSUP,
1896                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1897                                           "extended metadata register"
1898                                           " isn't supported");
1899         if (!nic_mask.id)
1900                 return rte_flow_error_set(error, ENOTSUP,
1901                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1902                                           "extended metadata register"
1903                                           " isn't available");
1904         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1905         if (ret < 0)
1906                 return ret;
1907         if (!spec)
1908                 return rte_flow_error_set(error, EINVAL,
1909                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1910                                           item->spec,
1911                                           "data cannot be empty");
1912         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1913                 return rte_flow_error_set(error, EINVAL,
1914                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1915                                           &spec->id,
1916                                           "mark id exceeds the limit");
1917         if (!mask)
1918                 mask = &nic_mask;
1919         if (!mask->id)
1920                 return rte_flow_error_set(error, EINVAL,
1921                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1922                                         "mask cannot be zero");
1923
1924         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1925                                         (const uint8_t *)&nic_mask,
1926                                         sizeof(struct rte_flow_item_mark),
1927                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1928         if (ret < 0)
1929                 return ret;
1930         return 0;
1931 }
1932
1933 /**
1934  * Validate META item.
1935  *
1936  * @param[in] dev
1937  *   Pointer to the rte_eth_dev structure.
1938  * @param[in] item
1939  *   Item specification.
1940  * @param[in] attr
1941  *   Attributes of flow that includes this item.
1942  * @param[out] error
1943  *   Pointer to error structure.
1944  *
1945  * @return
1946  *   0 on success, a negative errno value otherwise and rte_errno is set.
1947  */
1948 static int
1949 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1950                            const struct rte_flow_item *item,
1951                            const struct rte_flow_attr *attr,
1952                            struct rte_flow_error *error)
1953 {
1954         struct mlx5_priv *priv = dev->data->dev_private;
1955         struct mlx5_dev_config *config = &priv->config;
1956         const struct rte_flow_item_meta *spec = item->spec;
1957         const struct rte_flow_item_meta *mask = item->mask;
1958         struct rte_flow_item_meta nic_mask = {
1959                 .data = UINT32_MAX
1960         };
1961         int reg;
1962         int ret;
1963
1964         if (!spec)
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1967                                           item->spec,
1968                                           "data cannot be empty");
1969         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1970                 if (!mlx5_flow_ext_mreg_supported(dev))
1971                         return rte_flow_error_set(error, ENOTSUP,
1972                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1973                                           "extended metadata register"
1974                                           " isn't supported");
1975                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1976                 if (reg < 0)
1977                         return reg;
1978                 if (reg == REG_NON)
1979                         return rte_flow_error_set(error, ENOTSUP,
1980                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                         "unavalable extended metadata register");
1982                 if (reg == REG_B)
1983                         return rte_flow_error_set(error, ENOTSUP,
1984                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1985                                           "match on reg_b "
1986                                           "isn't supported");
1987                 if (reg != REG_A)
1988                         nic_mask.data = priv->sh->dv_meta_mask;
1989         } else {
1990                 if (attr->transfer)
1991                         return rte_flow_error_set(error, ENOTSUP,
1992                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1993                                         "extended metadata feature "
1994                                         "should be enabled when "
1995                                         "meta item is requested "
1996                                         "with e-switch mode ");
1997                 if (attr->ingress)
1998                         return rte_flow_error_set(error, ENOTSUP,
1999                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2000                                         "match on metadata for ingress "
2001                                         "is not supported in legacy "
2002                                         "metadata mode");
2003         }
2004         if (!mask)
2005                 mask = &rte_flow_item_meta_mask;
2006         if (!mask->data)
2007                 return rte_flow_error_set(error, EINVAL,
2008                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2009                                         "mask cannot be zero");
2010
2011         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2012                                         (const uint8_t *)&nic_mask,
2013                                         sizeof(struct rte_flow_item_meta),
2014                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2015         return ret;
2016 }
2017
2018 /**
2019  * Validate TAG item.
2020  *
2021  * @param[in] dev
2022  *   Pointer to the rte_eth_dev structure.
2023  * @param[in] item
2024  *   Item specification.
2025  * @param[in] attr
2026  *   Attributes of flow that includes this item.
2027  * @param[out] error
2028  *   Pointer to error structure.
2029  *
2030  * @return
2031  *   0 on success, a negative errno value otherwise and rte_errno is set.
2032  */
2033 static int
2034 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2035                           const struct rte_flow_item *item,
2036                           const struct rte_flow_attr *attr __rte_unused,
2037                           struct rte_flow_error *error)
2038 {
2039         const struct rte_flow_item_tag *spec = item->spec;
2040         const struct rte_flow_item_tag *mask = item->mask;
2041         const struct rte_flow_item_tag nic_mask = {
2042                 .data = RTE_BE32(UINT32_MAX),
2043                 .index = 0xff,
2044         };
2045         int ret;
2046
2047         if (!mlx5_flow_ext_mreg_supported(dev))
2048                 return rte_flow_error_set(error, ENOTSUP,
2049                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2050                                           "extensive metadata register"
2051                                           " isn't supported");
2052         if (!spec)
2053                 return rte_flow_error_set(error, EINVAL,
2054                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2055                                           item->spec,
2056                                           "data cannot be empty");
2057         if (!mask)
2058                 mask = &rte_flow_item_tag_mask;
2059         if (!mask->data)
2060                 return rte_flow_error_set(error, EINVAL,
2061                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2062                                         "mask cannot be zero");
2063
2064         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2065                                         (const uint8_t *)&nic_mask,
2066                                         sizeof(struct rte_flow_item_tag),
2067                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2068         if (ret < 0)
2069                 return ret;
2070         if (mask->index != 0xff)
2071                 return rte_flow_error_set(error, EINVAL,
2072                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2073                                           "partial mask for tag index"
2074                                           " is not supported");
2075         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2076         if (ret < 0)
2077                 return ret;
2078         MLX5_ASSERT(ret != REG_NON);
2079         return 0;
2080 }
2081
2082 /**
2083  * Validate vport item.
2084  *
2085  * @param[in] dev
2086  *   Pointer to the rte_eth_dev structure.
2087  * @param[in] item
2088  *   Item specification.
2089  * @param[in] attr
2090  *   Attributes of flow that includes this item.
2091  * @param[in] item_flags
2092  *   Bit-fields that holds the items detected until now.
2093  * @param[out] error
2094  *   Pointer to error structure.
2095  *
2096  * @return
2097  *   0 on success, a negative errno value otherwise and rte_errno is set.
2098  */
2099 static int
2100 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2101                               const struct rte_flow_item *item,
2102                               const struct rte_flow_attr *attr,
2103                               uint64_t item_flags,
2104                               struct rte_flow_error *error)
2105 {
2106         const struct rte_flow_item_port_id *spec = item->spec;
2107         const struct rte_flow_item_port_id *mask = item->mask;
2108         const struct rte_flow_item_port_id switch_mask = {
2109                         .id = 0xffffffff,
2110         };
2111         struct mlx5_priv *esw_priv;
2112         struct mlx5_priv *dev_priv;
2113         int ret;
2114
2115         if (!attr->transfer)
2116                 return rte_flow_error_set(error, EINVAL,
2117                                           RTE_FLOW_ERROR_TYPE_ITEM,
2118                                           NULL,
2119                                           "match on port id is valid only"
2120                                           " when transfer flag is enabled");
2121         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2122                 return rte_flow_error_set(error, ENOTSUP,
2123                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2124                                           "multiple source ports are not"
2125                                           " supported");
2126         if (!mask)
2127                 mask = &switch_mask;
2128         if (mask->id != 0xffffffff)
2129                 return rte_flow_error_set(error, ENOTSUP,
2130                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2131                                            mask,
2132                                            "no support for partial mask on"
2133                                            " \"id\" field");
2134         ret = mlx5_flow_item_acceptable
2135                                 (item, (const uint8_t *)mask,
2136                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2137                                  sizeof(struct rte_flow_item_port_id),
2138                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2139         if (ret)
2140                 return ret;
2141         if (!spec)
2142                 return 0;
2143         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2144         if (!esw_priv)
2145                 return rte_flow_error_set(error, rte_errno,
2146                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2147                                           "failed to obtain E-Switch info for"
2148                                           " port");
2149         dev_priv = mlx5_dev_to_eswitch_info(dev);
2150         if (!dev_priv)
2151                 return rte_flow_error_set(error, rte_errno,
2152                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2153                                           NULL,
2154                                           "failed to obtain E-Switch info");
2155         if (esw_priv->domain_id != dev_priv->domain_id)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2158                                           "cannot match on a port from a"
2159                                           " different E-Switch");
2160         return 0;
2161 }
2162
2163 /**
2164  * Validate VLAN item.
2165  *
2166  * @param[in] item
2167  *   Item specification.
2168  * @param[in] item_flags
2169  *   Bit-fields that holds the items detected until now.
2170  * @param[in] dev
2171  *   Ethernet device flow is being created on.
2172  * @param[out] error
2173  *   Pointer to error structure.
2174  *
2175  * @return
2176  *   0 on success, a negative errno value otherwise and rte_errno is set.
2177  */
2178 static int
2179 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2180                            uint64_t item_flags,
2181                            struct rte_eth_dev *dev,
2182                            struct rte_flow_error *error)
2183 {
2184         const struct rte_flow_item_vlan *mask = item->mask;
2185         const struct rte_flow_item_vlan nic_mask = {
2186                 .tci = RTE_BE16(UINT16_MAX),
2187                 .inner_type = RTE_BE16(UINT16_MAX),
2188                 .has_more_vlan = 1,
2189         };
2190         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2191         int ret;
2192         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2193                                         MLX5_FLOW_LAYER_INNER_L4) :
2194                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2195                                         MLX5_FLOW_LAYER_OUTER_L4);
2196         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2197                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2198
2199         if (item_flags & vlanm)
2200                 return rte_flow_error_set(error, EINVAL,
2201                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2202                                           "multiple VLAN layers not supported");
2203         else if ((item_flags & l34m) != 0)
2204                 return rte_flow_error_set(error, EINVAL,
2205                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2206                                           "VLAN cannot follow L3/L4 layer");
2207         if (!mask)
2208                 mask = &rte_flow_item_vlan_mask;
2209         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2210                                         (const uint8_t *)&nic_mask,
2211                                         sizeof(struct rte_flow_item_vlan),
2212                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2213         if (ret)
2214                 return ret;
2215         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2216                 struct mlx5_priv *priv = dev->data->dev_private;
2217
2218                 if (priv->vmwa_context) {
2219                         /*
2220                          * Non-NULL context means we have a virtual machine
2221                          * and SR-IOV enabled, we have to create VLAN interface
2222                          * to make hypervisor to setup E-Switch vport
2223                          * context correctly. We avoid creating the multiple
2224                          * VLAN interfaces, so we cannot support VLAN tag mask.
2225                          */
2226                         return rte_flow_error_set(error, EINVAL,
2227                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2228                                                   item,
2229                                                   "VLAN tag mask is not"
2230                                                   " supported in virtual"
2231                                                   " environment");
2232                 }
2233         }
2234         return 0;
2235 }
2236
2237 /*
2238  * GTP flags are contained in 1 byte of the format:
2239  * -------------------------------------------
2240  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2241  * |-----------------------------------------|
2242  * | value | Version | PT | Res | E | S | PN |
2243  * -------------------------------------------
2244  *
2245  * Matching is supported only for GTP flags E, S, PN.
2246  */
2247 #define MLX5_GTP_FLAGS_MASK     0x07
2248
2249 /**
2250  * Validate GTP item.
2251  *
2252  * @param[in] dev
2253  *   Pointer to the rte_eth_dev structure.
2254  * @param[in] item
2255  *   Item specification.
2256  * @param[in] item_flags
2257  *   Bit-fields that holds the items detected until now.
2258  * @param[out] error
2259  *   Pointer to error structure.
2260  *
2261  * @return
2262  *   0 on success, a negative errno value otherwise and rte_errno is set.
2263  */
2264 static int
2265 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2266                           const struct rte_flow_item *item,
2267                           uint64_t item_flags,
2268                           struct rte_flow_error *error)
2269 {
2270         struct mlx5_priv *priv = dev->data->dev_private;
2271         const struct rte_flow_item_gtp *spec = item->spec;
2272         const struct rte_flow_item_gtp *mask = item->mask;
2273         const struct rte_flow_item_gtp nic_mask = {
2274                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2275                 .msg_type = 0xff,
2276                 .teid = RTE_BE32(0xffffffff),
2277         };
2278
2279         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2280                 return rte_flow_error_set(error, ENOTSUP,
2281                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2282                                           "GTP support is not enabled");
2283         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2284                 return rte_flow_error_set(error, ENOTSUP,
2285                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2286                                           "multiple tunnel layers not"
2287                                           " supported");
2288         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2289                 return rte_flow_error_set(error, EINVAL,
2290                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2291                                           "no outer UDP layer found");
2292         if (!mask)
2293                 mask = &rte_flow_item_gtp_mask;
2294         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2295                 return rte_flow_error_set(error, ENOTSUP,
2296                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2297                                           "Match is supported for GTP"
2298                                           " flags only");
2299         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2300                                          (const uint8_t *)&nic_mask,
2301                                          sizeof(struct rte_flow_item_gtp),
2302                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2303 }
2304
2305 /**
2306  * Validate GTP PSC item.
2307  *
2308  * @param[in] item
2309  *   Item specification.
2310  * @param[in] last_item
2311  *   Previous validated item in the pattern items.
2312  * @param[in] gtp_item
2313  *   Previous GTP item specification.
2314  * @param[in] attr
2315  *   Pointer to flow attributes.
2316  * @param[out] error
2317  *   Pointer to error structure.
2318  *
2319  * @return
2320  *   0 on success, a negative errno value otherwise and rte_errno is set.
2321  */
2322 static int
2323 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2324                               uint64_t last_item,
2325                               const struct rte_flow_item *gtp_item,
2326                               const struct rte_flow_attr *attr,
2327                               struct rte_flow_error *error)
2328 {
2329         const struct rte_flow_item_gtp *gtp_spec;
2330         const struct rte_flow_item_gtp *gtp_mask;
2331         const struct rte_flow_item_gtp_psc *spec;
2332         const struct rte_flow_item_gtp_psc *mask;
2333         const struct rte_flow_item_gtp_psc nic_mask = {
2334                 .pdu_type = 0xFF,
2335                 .qfi = 0xFF,
2336         };
2337
2338         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2339                 return rte_flow_error_set
2340                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2341                          "GTP PSC item must be preceded with GTP item");
2342         gtp_spec = gtp_item->spec;
2343         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2344         /* GTP spec and E flag is requested to match zero. */
2345         if (gtp_spec &&
2346                 (gtp_mask->v_pt_rsv_flags &
2347                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2348                 return rte_flow_error_set
2349                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2350                          "GTP E flag must be 1 to match GTP PSC");
2351         /* Check the flow is not created in group zero. */
2352         if (!attr->transfer && !attr->group)
2353                 return rte_flow_error_set
2354                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2355                          "GTP PSC is not supported for group 0");
2356         /* GTP spec is here and E flag is requested to match zero. */
2357         if (!item->spec)
2358                 return 0;
2359         spec = item->spec;
2360         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2361         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2362                 return rte_flow_error_set
2363                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                          "PDU type should be smaller than 16");
2365         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2366                                          (const uint8_t *)&nic_mask,
2367                                          sizeof(struct rte_flow_item_gtp_psc),
2368                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2369 }
2370
2371 /**
2372  * Validate IPV4 item.
2373  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2374  * add specific validation of fragment_offset field,
2375  *
2376  * @param[in] item
2377  *   Item specification.
2378  * @param[in] item_flags
2379  *   Bit-fields that holds the items detected until now.
2380  * @param[out] error
2381  *   Pointer to error structure.
2382  *
2383  * @return
2384  *   0 on success, a negative errno value otherwise and rte_errno is set.
2385  */
2386 static int
2387 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2388                            uint64_t item_flags,
2389                            uint64_t last_item,
2390                            uint16_t ether_type,
2391                            struct rte_flow_error *error)
2392 {
2393         int ret;
2394         const struct rte_flow_item_ipv4 *spec = item->spec;
2395         const struct rte_flow_item_ipv4 *last = item->last;
2396         const struct rte_flow_item_ipv4 *mask = item->mask;
2397         rte_be16_t fragment_offset_spec = 0;
2398         rte_be16_t fragment_offset_last = 0;
2399         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2400                 .hdr = {
2401                         .src_addr = RTE_BE32(0xffffffff),
2402                         .dst_addr = RTE_BE32(0xffffffff),
2403                         .type_of_service = 0xff,
2404                         .fragment_offset = RTE_BE16(0xffff),
2405                         .next_proto_id = 0xff,
2406                         .time_to_live = 0xff,
2407                 },
2408         };
2409
2410         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2411                                            ether_type, &nic_ipv4_mask,
2412                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2413         if (ret < 0)
2414                 return ret;
2415         if (spec && mask)
2416                 fragment_offset_spec = spec->hdr.fragment_offset &
2417                                        mask->hdr.fragment_offset;
2418         if (!fragment_offset_spec)
2419                 return 0;
2420         /*
2421          * spec and mask are valid, enforce using full mask to make sure the
2422          * complete value is used correctly.
2423          */
2424         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2425                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2426                 return rte_flow_error_set(error, EINVAL,
2427                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2428                                           item, "must use full mask for"
2429                                           " fragment_offset");
2430         /*
2431          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2432          * indicating this is 1st fragment of fragmented packet.
2433          * This is not yet supported in MLX5, return appropriate error message.
2434          */
2435         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2436                 return rte_flow_error_set(error, ENOTSUP,
2437                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2438                                           "match on first fragment not "
2439                                           "supported");
2440         if (fragment_offset_spec && !last)
2441                 return rte_flow_error_set(error, ENOTSUP,
2442                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2443                                           "specified value not supported");
2444         /* spec and last are valid, validate the specified range. */
2445         fragment_offset_last = last->hdr.fragment_offset &
2446                                mask->hdr.fragment_offset;
2447         /*
2448          * Match on fragment_offset spec 0x2001 and last 0x3fff
2449          * means MF is 1 and frag-offset is > 0.
2450          * This packet is fragment 2nd and onward, excluding last.
2451          * This is not yet supported in MLX5, return appropriate
2452          * error message.
2453          */
2454         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2455             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2456                 return rte_flow_error_set(error, ENOTSUP,
2457                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2458                                           last, "match on following "
2459                                           "fragments not supported");
2460         /*
2461          * Match on fragment_offset spec 0x0001 and last 0x1fff
2462          * means MF is 0 and frag-offset is > 0.
2463          * This packet is last fragment of fragmented packet.
2464          * This is not yet supported in MLX5, return appropriate
2465          * error message.
2466          */
2467         if (fragment_offset_spec == RTE_BE16(1) &&
2468             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2469                 return rte_flow_error_set(error, ENOTSUP,
2470                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2471                                           last, "match on last "
2472                                           "fragment not supported");
2473         /*
2474          * Match on fragment_offset spec 0x0001 and last 0x3fff
2475          * means MF and/or frag-offset is not 0.
2476          * This is a fragmented packet.
2477          * Other range values are invalid and rejected.
2478          */
2479         if (!(fragment_offset_spec == RTE_BE16(1) &&
2480               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2481                 return rte_flow_error_set(error, ENOTSUP,
2482                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2483                                           "specified range not supported");
2484         return 0;
2485 }
2486
2487 /**
2488  * Validate IPV6 fragment extension item.
2489  *
2490  * @param[in] item
2491  *   Item specification.
2492  * @param[in] item_flags
2493  *   Bit-fields that holds the items detected until now.
2494  * @param[out] error
2495  *   Pointer to error structure.
2496  *
2497  * @return
2498  *   0 on success, a negative errno value otherwise and rte_errno is set.
2499  */
2500 static int
2501 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2502                                     uint64_t item_flags,
2503                                     struct rte_flow_error *error)
2504 {
2505         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2506         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2507         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2508         rte_be16_t frag_data_spec = 0;
2509         rte_be16_t frag_data_last = 0;
2510         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2511         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2512                                       MLX5_FLOW_LAYER_OUTER_L4;
2513         int ret = 0;
2514         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2515                 .hdr = {
2516                         .next_header = 0xff,
2517                         .frag_data = RTE_BE16(0xffff),
2518                 },
2519         };
2520
2521         if (item_flags & l4m)
2522                 return rte_flow_error_set(error, EINVAL,
2523                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2524                                           "ipv6 fragment extension item cannot "
2525                                           "follow L4 item.");
2526         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2527             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2528                 return rte_flow_error_set(error, EINVAL,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2530                                           "ipv6 fragment extension item must "
2531                                           "follow ipv6 item");
2532         if (spec && mask)
2533                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2534         if (!frag_data_spec)
2535                 return 0;
2536         /*
2537          * spec and mask are valid, enforce using full mask to make sure the
2538          * complete value is used correctly.
2539          */
2540         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2541                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2542                 return rte_flow_error_set(error, EINVAL,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2544                                           item, "must use full mask for"
2545                                           " frag_data");
2546         /*
2547          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2548          * This is 1st fragment of fragmented packet.
2549          */
2550         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2551                 return rte_flow_error_set(error, ENOTSUP,
2552                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2553                                           "match on first fragment not "
2554                                           "supported");
2555         if (frag_data_spec && !last)
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2558                                           "specified value not supported");
2559         ret = mlx5_flow_item_acceptable
2560                                 (item, (const uint8_t *)mask,
2561                                  (const uint8_t *)&nic_mask,
2562                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2563                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2564         if (ret)
2565                 return ret;
2566         /* spec and last are valid, validate the specified range. */
2567         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2568         /*
2569          * Match on frag_data spec 0x0009 and last 0xfff9
2570          * means M is 1 and frag-offset is > 0.
2571          * This packet is fragment 2nd and onward, excluding last.
2572          * This is not yet supported in MLX5, return appropriate
2573          * error message.
2574          */
2575         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2576                                        RTE_IPV6_EHDR_MF_MASK) &&
2577             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2578                 return rte_flow_error_set(error, ENOTSUP,
2579                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2580                                           last, "match on following "
2581                                           "fragments not supported");
2582         /*
2583          * Match on frag_data spec 0x0008 and last 0xfff8
2584          * means M is 0 and frag-offset is > 0.
2585          * This packet is last fragment of fragmented packet.
2586          * This is not yet supported in MLX5, return appropriate
2587          * error message.
2588          */
2589         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2590             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2591                 return rte_flow_error_set(error, ENOTSUP,
2592                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2593                                           last, "match on last "
2594                                           "fragment not supported");
2595         /* Other range values are invalid and rejected. */
2596         return rte_flow_error_set(error, EINVAL,
2597                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2598                                   "specified range not supported");
2599 }
2600
2601 /**
2602  * Validate the pop VLAN action.
2603  *
2604  * @param[in] dev
2605  *   Pointer to the rte_eth_dev structure.
2606  * @param[in] action_flags
2607  *   Holds the actions detected until now.
2608  * @param[in] action
2609  *   Pointer to the pop vlan action.
2610  * @param[in] item_flags
2611  *   The items found in this flow rule.
2612  * @param[in] attr
2613  *   Pointer to flow attributes.
2614  * @param[out] error
2615  *   Pointer to error structure.
2616  *
2617  * @return
2618  *   0 on success, a negative errno value otherwise and rte_errno is set.
2619  */
2620 static int
2621 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2622                                  uint64_t action_flags,
2623                                  const struct rte_flow_action *action,
2624                                  uint64_t item_flags,
2625                                  const struct rte_flow_attr *attr,
2626                                  struct rte_flow_error *error)
2627 {
2628         const struct mlx5_priv *priv = dev->data->dev_private;
2629
2630         (void)action;
2631         (void)attr;
2632         if (!priv->sh->pop_vlan_action)
2633                 return rte_flow_error_set(error, ENOTSUP,
2634                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2635                                           NULL,
2636                                           "pop vlan action is not supported");
2637         if (attr->egress)
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2640                                           NULL,
2641                                           "pop vlan action not supported for "
2642                                           "egress");
2643         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2644                 return rte_flow_error_set(error, ENOTSUP,
2645                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2646                                           "no support for multiple VLAN "
2647                                           "actions");
2648         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2649         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2650             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2653                                           NULL,
2654                                           "cannot pop vlan after decap without "
2655                                           "match on inner vlan in the flow");
2656         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2657         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2658             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2659                 return rte_flow_error_set(error, ENOTSUP,
2660                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2661                                           NULL,
2662                                           "cannot pop vlan without a "
2663                                           "match on (outer) vlan in the flow");
2664         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2665                 return rte_flow_error_set(error, EINVAL,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2667                                           "wrong action order, port_id should "
2668                                           "be after pop VLAN action");
2669         if (!attr->transfer && priv->representor)
2670                 return rte_flow_error_set(error, ENOTSUP,
2671                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2672                                           "pop vlan action for VF representor "
2673                                           "not supported on NIC table");
2674         return 0;
2675 }
2676
2677 /**
2678  * Get VLAN default info from vlan match info.
2679  *
2680  * @param[in] items
2681  *   the list of item specifications.
2682  * @param[out] vlan
2683  *   pointer VLAN info to fill to.
2684  *
2685  * @return
2686  *   0 on success, a negative errno value otherwise and rte_errno is set.
2687  */
2688 static void
2689 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2690                                   struct rte_vlan_hdr *vlan)
2691 {
2692         const struct rte_flow_item_vlan nic_mask = {
2693                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2694                                 MLX5DV_FLOW_VLAN_VID_MASK),
2695                 .inner_type = RTE_BE16(0xffff),
2696         };
2697
2698         if (items == NULL)
2699                 return;
2700         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2701                 int type = items->type;
2702
2703                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2704                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2705                         break;
2706         }
2707         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2708                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2709                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2710
2711                 /* If VLAN item in pattern doesn't contain data, return here. */
2712                 if (!vlan_v)
2713                         return;
2714                 if (!vlan_m)
2715                         vlan_m = &nic_mask;
2716                 /* Only full match values are accepted */
2717                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2718                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2719                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2720                         vlan->vlan_tci |=
2721                                 rte_be_to_cpu_16(vlan_v->tci &
2722                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2723                 }
2724                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2725                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2726                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2727                         vlan->vlan_tci |=
2728                                 rte_be_to_cpu_16(vlan_v->tci &
2729                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2730                 }
2731                 if (vlan_m->inner_type == nic_mask.inner_type)
2732                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2733                                                            vlan_m->inner_type);
2734         }
2735 }
2736
2737 /**
2738  * Validate the push VLAN action.
2739  *
2740  * @param[in] dev
2741  *   Pointer to the rte_eth_dev structure.
2742  * @param[in] action_flags
2743  *   Holds the actions detected until now.
2744  * @param[in] item_flags
2745  *   The items found in this flow rule.
2746  * @param[in] action
2747  *   Pointer to the action structure.
2748  * @param[in] attr
2749  *   Pointer to flow attributes
2750  * @param[out] error
2751  *   Pointer to error structure.
2752  *
2753  * @return
2754  *   0 on success, a negative errno value otherwise and rte_errno is set.
2755  */
2756 static int
2757 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2758                                   uint64_t action_flags,
2759                                   const struct rte_flow_item_vlan *vlan_m,
2760                                   const struct rte_flow_action *action,
2761                                   const struct rte_flow_attr *attr,
2762                                   struct rte_flow_error *error)
2763 {
2764         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2765         const struct mlx5_priv *priv = dev->data->dev_private;
2766
2767         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2768             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2769                 return rte_flow_error_set(error, EINVAL,
2770                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2771                                           "invalid vlan ethertype");
2772         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2773                 return rte_flow_error_set(error, EINVAL,
2774                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2775                                           "wrong action order, port_id should "
2776                                           "be after push VLAN");
2777         if (!attr->transfer && priv->representor)
2778                 return rte_flow_error_set(error, ENOTSUP,
2779                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2780                                           "push vlan action for VF representor "
2781                                           "not supported on NIC table");
2782         if (vlan_m &&
2783             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2784             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2785                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2786             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2787             !(mlx5_flow_find_action
2788                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2789                 return rte_flow_error_set(error, EINVAL,
2790                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2791                                           "not full match mask on VLAN PCP and "
2792                                           "there is no of_set_vlan_pcp action, "
2793                                           "push VLAN action cannot figure out "
2794                                           "PCP value");
2795         if (vlan_m &&
2796             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2797             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2798                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2799             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2800             !(mlx5_flow_find_action
2801                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2802                 return rte_flow_error_set(error, EINVAL,
2803                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2804                                           "not full match mask on VLAN VID and "
2805                                           "there is no of_set_vlan_vid action, "
2806                                           "push VLAN action cannot figure out "
2807                                           "VID value");
2808         (void)attr;
2809         return 0;
2810 }
2811
2812 /**
2813  * Validate the set VLAN PCP.
2814  *
2815  * @param[in] action_flags
2816  *   Holds the actions detected until now.
2817  * @param[in] actions
2818  *   Pointer to the list of actions remaining in the flow rule.
2819  * @param[out] error
2820  *   Pointer to error structure.
2821  *
2822  * @return
2823  *   0 on success, a negative errno value otherwise and rte_errno is set.
2824  */
2825 static int
2826 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2827                                      const struct rte_flow_action actions[],
2828                                      struct rte_flow_error *error)
2829 {
2830         const struct rte_flow_action *action = actions;
2831         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2832
2833         if (conf->vlan_pcp > 7)
2834                 return rte_flow_error_set(error, EINVAL,
2835                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2836                                           "VLAN PCP value is too big");
2837         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2838                 return rte_flow_error_set(error, ENOTSUP,
2839                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2840                                           "set VLAN PCP action must follow "
2841                                           "the push VLAN action");
2842         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2843                 return rte_flow_error_set(error, ENOTSUP,
2844                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2845                                           "Multiple VLAN PCP modification are "
2846                                           "not supported");
2847         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2848                 return rte_flow_error_set(error, EINVAL,
2849                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2850                                           "wrong action order, port_id should "
2851                                           "be after set VLAN PCP");
2852         return 0;
2853 }
2854
2855 /**
2856  * Validate the set VLAN VID.
2857  *
2858  * @param[in] item_flags
2859  *   Holds the items detected in this rule.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] actions
2863  *   Pointer to the list of actions remaining in the flow rule.
2864  * @param[out] error
2865  *   Pointer to error structure.
2866  *
2867  * @return
2868  *   0 on success, a negative errno value otherwise and rte_errno is set.
2869  */
2870 static int
2871 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2872                                      uint64_t action_flags,
2873                                      const struct rte_flow_action actions[],
2874                                      struct rte_flow_error *error)
2875 {
2876         const struct rte_flow_action *action = actions;
2877         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2878
2879         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2880                 return rte_flow_error_set(error, EINVAL,
2881                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2882                                           "VLAN VID value is too big");
2883         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2884             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2885                 return rte_flow_error_set(error, ENOTSUP,
2886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2887                                           "set VLAN VID action must follow push"
2888                                           " VLAN action or match on VLAN item");
2889         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2892                                           "Multiple VLAN VID modifications are "
2893                                           "not supported");
2894         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2895                 return rte_flow_error_set(error, EINVAL,
2896                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2897                                           "wrong action order, port_id should "
2898                                           "be after set VLAN VID");
2899         return 0;
2900 }
2901
2902 /*
2903  * Validate the FLAG action.
2904  *
2905  * @param[in] dev
2906  *   Pointer to the rte_eth_dev structure.
2907  * @param[in] action_flags
2908  *   Holds the actions detected until now.
2909  * @param[in] attr
2910  *   Pointer to flow attributes
2911  * @param[out] error
2912  *   Pointer to error structure.
2913  *
2914  * @return
2915  *   0 on success, a negative errno value otherwise and rte_errno is set.
2916  */
2917 static int
2918 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2919                              uint64_t action_flags,
2920                              const struct rte_flow_attr *attr,
2921                              struct rte_flow_error *error)
2922 {
2923         struct mlx5_priv *priv = dev->data->dev_private;
2924         struct mlx5_dev_config *config = &priv->config;
2925         int ret;
2926
2927         /* Fall back if no extended metadata register support. */
2928         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2929                 return mlx5_flow_validate_action_flag(action_flags, attr,
2930                                                       error);
2931         /* Extensive metadata mode requires registers. */
2932         if (!mlx5_flow_ext_mreg_supported(dev))
2933                 return rte_flow_error_set(error, ENOTSUP,
2934                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2935                                           "no metadata registers "
2936                                           "to support flag action");
2937         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2938                 return rte_flow_error_set(error, ENOTSUP,
2939                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2940                                           "extended metadata register"
2941                                           " isn't available");
2942         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2943         if (ret < 0)
2944                 return ret;
2945         MLX5_ASSERT(ret > 0);
2946         if (action_flags & MLX5_FLOW_ACTION_MARK)
2947                 return rte_flow_error_set(error, EINVAL,
2948                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2949                                           "can't mark and flag in same flow");
2950         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2951                 return rte_flow_error_set(error, EINVAL,
2952                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2953                                           "can't have 2 flag"
2954                                           " actions in same flow");
2955         return 0;
2956 }
2957
2958 /**
2959  * Validate MARK action.
2960  *
2961  * @param[in] dev
2962  *   Pointer to the rte_eth_dev structure.
2963  * @param[in] action
2964  *   Pointer to action.
2965  * @param[in] action_flags
2966  *   Holds the actions detected until now.
2967  * @param[in] attr
2968  *   Pointer to flow attributes
2969  * @param[out] error
2970  *   Pointer to error structure.
2971  *
2972  * @return
2973  *   0 on success, a negative errno value otherwise and rte_errno is set.
2974  */
2975 static int
2976 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2977                              const struct rte_flow_action *action,
2978                              uint64_t action_flags,
2979                              const struct rte_flow_attr *attr,
2980                              struct rte_flow_error *error)
2981 {
2982         struct mlx5_priv *priv = dev->data->dev_private;
2983         struct mlx5_dev_config *config = &priv->config;
2984         const struct rte_flow_action_mark *mark = action->conf;
2985         int ret;
2986
2987         if (is_tunnel_offload_active(dev))
2988                 return rte_flow_error_set(error, ENOTSUP,
2989                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2990                                           "no mark action "
2991                                           "if tunnel offload active");
2992         /* Fall back if no extended metadata register support. */
2993         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2994                 return mlx5_flow_validate_action_mark(action, action_flags,
2995                                                       attr, error);
2996         /* Extensive metadata mode requires registers. */
2997         if (!mlx5_flow_ext_mreg_supported(dev))
2998                 return rte_flow_error_set(error, ENOTSUP,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3000                                           "no metadata registers "
3001                                           "to support mark action");
3002         if (!priv->sh->dv_mark_mask)
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3005                                           "extended metadata register"
3006                                           " isn't available");
3007         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3008         if (ret < 0)
3009                 return ret;
3010         MLX5_ASSERT(ret > 0);
3011         if (!mark)
3012                 return rte_flow_error_set(error, EINVAL,
3013                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                           "configuration cannot be null");
3015         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3016                 return rte_flow_error_set(error, EINVAL,
3017                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3018                                           &mark->id,
3019                                           "mark id exceeds the limit");
3020         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3021                 return rte_flow_error_set(error, EINVAL,
3022                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3023                                           "can't flag and mark in same flow");
3024         if (action_flags & MLX5_FLOW_ACTION_MARK)
3025                 return rte_flow_error_set(error, EINVAL,
3026                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3027                                           "can't have 2 mark actions in same"
3028                                           " flow");
3029         return 0;
3030 }
3031
3032 /**
3033  * Validate SET_META action.
3034  *
3035  * @param[in] dev
3036  *   Pointer to the rte_eth_dev structure.
3037  * @param[in] action
3038  *   Pointer to the action structure.
3039  * @param[in] action_flags
3040  *   Holds the actions detected until now.
3041  * @param[in] attr
3042  *   Pointer to flow attributes
3043  * @param[out] error
3044  *   Pointer to error structure.
3045  *
3046  * @return
3047  *   0 on success, a negative errno value otherwise and rte_errno is set.
3048  */
3049 static int
3050 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3051                                  const struct rte_flow_action *action,
3052                                  uint64_t action_flags __rte_unused,
3053                                  const struct rte_flow_attr *attr,
3054                                  struct rte_flow_error *error)
3055 {
3056         const struct rte_flow_action_set_meta *conf;
3057         uint32_t nic_mask = UINT32_MAX;
3058         int reg;
3059
3060         if (!mlx5_flow_ext_mreg_supported(dev))
3061                 return rte_flow_error_set(error, ENOTSUP,
3062                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3063                                           "extended metadata register"
3064                                           " isn't supported");
3065         reg = flow_dv_get_metadata_reg(dev, attr, error);
3066         if (reg < 0)
3067                 return reg;
3068         if (reg == REG_NON)
3069                 return rte_flow_error_set(error, ENOTSUP,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3071                                           "unavalable extended metadata register");
3072         if (reg != REG_A && reg != REG_B) {
3073                 struct mlx5_priv *priv = dev->data->dev_private;
3074
3075                 nic_mask = priv->sh->dv_meta_mask;
3076         }
3077         if (!(action->conf))
3078                 return rte_flow_error_set(error, EINVAL,
3079                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3080                                           "configuration cannot be null");
3081         conf = (const struct rte_flow_action_set_meta *)action->conf;
3082         if (!conf->mask)
3083                 return rte_flow_error_set(error, EINVAL,
3084                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3085                                           "zero mask doesn't have any effect");
3086         if (conf->mask & ~nic_mask)
3087                 return rte_flow_error_set(error, EINVAL,
3088                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3089                                           "meta data must be within reg C0");
3090         return 0;
3091 }
3092
3093 /**
3094  * Validate SET_TAG action.
3095  *
3096  * @param[in] dev
3097  *   Pointer to the rte_eth_dev structure.
3098  * @param[in] action
3099  *   Pointer to the action structure.
3100  * @param[in] action_flags
3101  *   Holds the actions detected until now.
3102  * @param[in] attr
3103  *   Pointer to flow attributes
3104  * @param[out] error
3105  *   Pointer to error structure.
3106  *
3107  * @return
3108  *   0 on success, a negative errno value otherwise and rte_errno is set.
3109  */
3110 static int
3111 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3112                                 const struct rte_flow_action *action,
3113                                 uint64_t action_flags,
3114                                 const struct rte_flow_attr *attr,
3115                                 struct rte_flow_error *error)
3116 {
3117         const struct rte_flow_action_set_tag *conf;
3118         const uint64_t terminal_action_flags =
3119                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3120                 MLX5_FLOW_ACTION_RSS;
3121         int ret;
3122
3123         if (!mlx5_flow_ext_mreg_supported(dev))
3124                 return rte_flow_error_set(error, ENOTSUP,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3126                                           "extensive metadata register"
3127                                           " isn't supported");
3128         if (!(action->conf))
3129                 return rte_flow_error_set(error, EINVAL,
3130                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3131                                           "configuration cannot be null");
3132         conf = (const struct rte_flow_action_set_tag *)action->conf;
3133         if (!conf->mask)
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3136                                           "zero mask doesn't have any effect");
3137         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3138         if (ret < 0)
3139                 return ret;
3140         if (!attr->transfer && attr->ingress &&
3141             (action_flags & terminal_action_flags))
3142                 return rte_flow_error_set(error, EINVAL,
3143                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3144                                           "set_tag has no effect"
3145                                           " with terminal actions");
3146         return 0;
3147 }
3148
3149 /**
3150  * Check if action counter is shared by either old or new mechanism.
3151  *
3152  * @param[in] action
3153  *   Pointer to the action structure.
3154  *
3155  * @return
3156  *   True when counter is shared, false otherwise.
3157  */
3158 static inline bool
3159 is_shared_action_count(const struct rte_flow_action *action)
3160 {
3161         const struct rte_flow_action_count *count =
3162                         (const struct rte_flow_action_count *)action->conf;
3163
3164         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3165                 return true;
3166         return !!(count && count->shared);
3167 }
3168
3169 /**
3170  * Validate count action.
3171  *
3172  * @param[in] dev
3173  *   Pointer to rte_eth_dev structure.
3174  * @param[in] shared
3175  *   Indicator if action is shared.
3176  * @param[in] action_flags
3177  *   Holds the actions detected until now.
3178  * @param[out] error
3179  *   Pointer to error structure.
3180  *
3181  * @return
3182  *   0 on success, a negative errno value otherwise and rte_errno is set.
3183  */
3184 static int
3185 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3186                               uint64_t action_flags,
3187                               struct rte_flow_error *error)
3188 {
3189         struct mlx5_priv *priv = dev->data->dev_private;
3190
3191         if (!priv->config.devx)
3192                 goto notsup_err;
3193         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3194                 return rte_flow_error_set(error, EINVAL,
3195                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3196                                           "duplicate count actions set");
3197         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3198             !priv->sh->flow_hit_aso_en)
3199                 return rte_flow_error_set(error, EINVAL,
3200                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3201                                           "old age and shared count combination is not supported");
3202 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3203         return 0;
3204 #endif
3205 notsup_err:
3206         return rte_flow_error_set
3207                       (error, ENOTSUP,
3208                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3209                        NULL,
3210                        "count action not supported");
3211 }
3212
3213 /**
3214  * Validate the L2 encap action.
3215  *
3216  * @param[in] dev
3217  *   Pointer to the rte_eth_dev structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] action
3221  *   Pointer to the action structure.
3222  * @param[in] attr
3223  *   Pointer to flow attributes.
3224  * @param[out] error
3225  *   Pointer to error structure.
3226  *
3227  * @return
3228  *   0 on success, a negative errno value otherwise and rte_errno is set.
3229  */
3230 static int
3231 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3232                                  uint64_t action_flags,
3233                                  const struct rte_flow_action *action,
3234                                  const struct rte_flow_attr *attr,
3235                                  struct rte_flow_error *error)
3236 {
3237         const struct mlx5_priv *priv = dev->data->dev_private;
3238
3239         if (!(action->conf))
3240                 return rte_flow_error_set(error, EINVAL,
3241                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3242                                           "configuration cannot be null");
3243         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3244                 return rte_flow_error_set(error, EINVAL,
3245                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3246                                           "can only have a single encap action "
3247                                           "in a flow");
3248         if (!attr->transfer && priv->representor)
3249                 return rte_flow_error_set(error, ENOTSUP,
3250                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3251                                           "encap action for VF representor "
3252                                           "not supported on NIC table");
3253         return 0;
3254 }
3255
3256 /**
3257  * Validate a decap action.
3258  *
3259  * @param[in] dev
3260  *   Pointer to the rte_eth_dev structure.
3261  * @param[in] action_flags
3262  *   Holds the actions detected until now.
3263  * @param[in] action
3264  *   Pointer to the action structure.
3265  * @param[in] item_flags
3266  *   Holds the items detected.
3267  * @param[in] attr
3268  *   Pointer to flow attributes
3269  * @param[out] error
3270  *   Pointer to error structure.
3271  *
3272  * @return
3273  *   0 on success, a negative errno value otherwise and rte_errno is set.
3274  */
3275 static int
3276 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3277                               uint64_t action_flags,
3278                               const struct rte_flow_action *action,
3279                               const uint64_t item_flags,
3280                               const struct rte_flow_attr *attr,
3281                               struct rte_flow_error *error)
3282 {
3283         const struct mlx5_priv *priv = dev->data->dev_private;
3284
3285         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3286             !priv->config.decap_en)
3287                 return rte_flow_error_set(error, ENOTSUP,
3288                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3289                                           "decap is not enabled");
3290         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3291                 return rte_flow_error_set(error, ENOTSUP,
3292                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3293                                           action_flags &
3294                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3295                                           "have a single decap action" : "decap "
3296                                           "after encap is not supported");
3297         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3298                 return rte_flow_error_set(error, EINVAL,
3299                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3300                                           "can't have decap action after"
3301                                           " modify action");
3302         if (attr->egress)
3303                 return rte_flow_error_set(error, ENOTSUP,
3304                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3305                                           NULL,
3306                                           "decap action not supported for "
3307                                           "egress");
3308         if (!attr->transfer && priv->representor)
3309                 return rte_flow_error_set(error, ENOTSUP,
3310                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3311                                           "decap action for VF representor "
3312                                           "not supported on NIC table");
3313         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3314             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3315                 return rte_flow_error_set(error, ENOTSUP,
3316                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3317                                 "VXLAN item should be present for VXLAN decap");
3318         return 0;
3319 }
3320
3321 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3322
3323 /**
3324  * Validate the raw encap and decap actions.
3325  *
3326  * @param[in] dev
3327  *   Pointer to the rte_eth_dev structure.
3328  * @param[in] decap
3329  *   Pointer to the decap action.
3330  * @param[in] encap
3331  *   Pointer to the encap action.
3332  * @param[in] attr
3333  *   Pointer to flow attributes
3334  * @param[in/out] action_flags
3335  *   Holds the actions detected until now.
3336  * @param[out] actions_n
3337  *   pointer to the number of actions counter.
3338  * @param[in] action
3339  *   Pointer to the action structure.
3340  * @param[in] item_flags
3341  *   Holds the items detected.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_raw_encap_decap
3350         (struct rte_eth_dev *dev,
3351          const struct rte_flow_action_raw_decap *decap,
3352          const struct rte_flow_action_raw_encap *encap,
3353          const struct rte_flow_attr *attr, uint64_t *action_flags,
3354          int *actions_n, const struct rte_flow_action *action,
3355          uint64_t item_flags, struct rte_flow_error *error)
3356 {
3357         const struct mlx5_priv *priv = dev->data->dev_private;
3358         int ret;
3359
3360         if (encap && (!encap->size || !encap->data))
3361                 return rte_flow_error_set(error, EINVAL,
3362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3363                                           "raw encap data cannot be empty");
3364         if (decap && encap) {
3365                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3366                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3367                         /* L3 encap. */
3368                         decap = NULL;
3369                 else if (encap->size <=
3370                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3371                            decap->size >
3372                            MLX5_ENCAPSULATION_DECISION_SIZE)
3373                         /* L3 decap. */
3374                         encap = NULL;
3375                 else if (encap->size >
3376                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3377                            decap->size >
3378                            MLX5_ENCAPSULATION_DECISION_SIZE)
3379                         /* 2 L2 actions: encap and decap. */
3380                         ;
3381                 else
3382                         return rte_flow_error_set(error,
3383                                 ENOTSUP,
3384                                 RTE_FLOW_ERROR_TYPE_ACTION,
3385                                 NULL, "unsupported too small "
3386                                 "raw decap and too small raw "
3387                                 "encap combination");
3388         }
3389         if (decap) {
3390                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3391                                                     item_flags, attr, error);
3392                 if (ret < 0)
3393                         return ret;
3394                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3395                 ++(*actions_n);
3396         }
3397         if (encap) {
3398                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3399                         return rte_flow_error_set(error, ENOTSUP,
3400                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3401                                                   NULL,
3402                                                   "small raw encap size");
3403                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3404                         return rte_flow_error_set(error, EINVAL,
3405                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3406                                                   NULL,
3407                                                   "more than one encap action");
3408                 if (!attr->transfer && priv->representor)
3409                         return rte_flow_error_set
3410                                         (error, ENOTSUP,
3411                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3412                                          "encap action for VF representor "
3413                                          "not supported on NIC table");
3414                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3415                 ++(*actions_n);
3416         }
3417         return 0;
3418 }
3419
3420 /**
3421  * Match encap_decap resource.
3422  *
3423  * @param list
3424  *   Pointer to the hash list.
3425  * @param entry
3426  *   Pointer to exist resource entry object.
3427  * @param key
3428  *   Key of the new entry.
3429  * @param ctx_cb
3430  *   Pointer to new encap_decap resource.
3431  *
3432  * @return
3433  *   0 on matching, none-zero otherwise.
3434  */
3435 int
3436 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3437                              struct mlx5_hlist_entry *entry,
3438                              uint64_t key __rte_unused, void *cb_ctx)
3439 {
3440         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3441         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3442         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3443
3444         cache_resource = container_of(entry,
3445                                       struct mlx5_flow_dv_encap_decap_resource,
3446                                       entry);
3447         if (resource->reformat_type == cache_resource->reformat_type &&
3448             resource->ft_type == cache_resource->ft_type &&
3449             resource->flags == cache_resource->flags &&
3450             resource->size == cache_resource->size &&
3451             !memcmp((const void *)resource->buf,
3452                     (const void *)cache_resource->buf,
3453                     resource->size))
3454                 return 0;
3455         return -1;
3456 }
3457
3458 /**
3459  * Allocate encap_decap resource.
3460  *
3461  * @param list
3462  *   Pointer to the hash list.
3463  * @param entry
3464  *   Pointer to exist resource entry object.
3465  * @param ctx_cb
3466  *   Pointer to new encap_decap resource.
3467  *
3468  * @return
3469  *   0 on matching, none-zero otherwise.
3470  */
3471 struct mlx5_hlist_entry *
3472 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3473                               uint64_t key __rte_unused,
3474                               void *cb_ctx)
3475 {
3476         struct mlx5_dev_ctx_shared *sh = list->ctx;
3477         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3478         struct mlx5dv_dr_domain *domain;
3479         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3480         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3481         uint32_t idx;
3482         int ret;
3483
3484         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3485                 domain = sh->fdb_domain;
3486         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3487                 domain = sh->rx_domain;
3488         else
3489                 domain = sh->tx_domain;
3490         /* Register new encap/decap resource. */
3491         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3492                                        &idx);
3493         if (!cache_resource) {
3494                 rte_flow_error_set(ctx->error, ENOMEM,
3495                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3496                                    "cannot allocate resource memory");
3497                 return NULL;
3498         }
3499         *cache_resource = *resource;
3500         cache_resource->idx = idx;
3501         ret = mlx5_flow_os_create_flow_action_packet_reformat
3502                                         (sh->ctx, domain, cache_resource,
3503                                          &cache_resource->action);
3504         if (ret) {
3505                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3506                 rte_flow_error_set(ctx->error, ENOMEM,
3507                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3508                                    NULL, "cannot create action");
3509                 return NULL;
3510         }
3511
3512         return &cache_resource->entry;
3513 }
3514
3515 /**
3516  * Find existing encap/decap resource or create and register a new one.
3517  *
3518  * @param[in, out] dev
3519  *   Pointer to rte_eth_dev structure.
3520  * @param[in, out] resource
3521  *   Pointer to encap/decap resource.
3522  * @parm[in, out] dev_flow
3523  *   Pointer to the dev_flow.
3524  * @param[out] error
3525  *   pointer to error structure.
3526  *
3527  * @return
3528  *   0 on success otherwise -errno and errno is set.
3529  */
3530 static int
3531 flow_dv_encap_decap_resource_register
3532                         (struct rte_eth_dev *dev,
3533                          struct mlx5_flow_dv_encap_decap_resource *resource,
3534                          struct mlx5_flow *dev_flow,
3535                          struct rte_flow_error *error)
3536 {
3537         struct mlx5_priv *priv = dev->data->dev_private;
3538         struct mlx5_dev_ctx_shared *sh = priv->sh;
3539         struct mlx5_hlist_entry *entry;
3540         union {
3541                 struct {
3542                         uint32_t ft_type:8;
3543                         uint32_t refmt_type:8;
3544                         /*
3545                          * Header reformat actions can be shared between
3546                          * non-root tables. One bit to indicate non-root
3547                          * table or not.
3548                          */
3549                         uint32_t is_root:1;
3550                         uint32_t reserve:15;
3551                 };
3552                 uint32_t v32;
3553         } encap_decap_key = {
3554                 {
3555                         .ft_type = resource->ft_type,
3556                         .refmt_type = resource->reformat_type,
3557                         .is_root = !!dev_flow->dv.group,
3558                         .reserve = 0,
3559                 }
3560         };
3561         struct mlx5_flow_cb_ctx ctx = {
3562                 .error = error,
3563                 .data = resource,
3564         };
3565         uint64_t key64;
3566
3567         resource->flags = dev_flow->dv.group ? 0 : 1;
3568         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3569                                  sizeof(encap_decap_key.v32), 0);
3570         if (resource->reformat_type !=
3571             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3572             resource->size)
3573                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3574         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3575         if (!entry)
3576                 return -rte_errno;
3577         resource = container_of(entry, typeof(*resource), entry);
3578         dev_flow->dv.encap_decap = resource;
3579         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3580         return 0;
3581 }
3582
3583 /**
3584  * Find existing table jump resource or create and register a new one.
3585  *
3586  * @param[in, out] dev
3587  *   Pointer to rte_eth_dev structure.
3588  * @param[in, out] tbl
3589  *   Pointer to flow table resource.
3590  * @parm[in, out] dev_flow
3591  *   Pointer to the dev_flow.
3592  * @param[out] error
3593  *   pointer to error structure.
3594  *
3595  * @return
3596  *   0 on success otherwise -errno and errno is set.
3597  */
3598 static int
3599 flow_dv_jump_tbl_resource_register
3600                         (struct rte_eth_dev *dev __rte_unused,
3601                          struct mlx5_flow_tbl_resource *tbl,
3602                          struct mlx5_flow *dev_flow,
3603                          struct rte_flow_error *error __rte_unused)
3604 {
3605         struct mlx5_flow_tbl_data_entry *tbl_data =
3606                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3607
3608         MLX5_ASSERT(tbl);
3609         MLX5_ASSERT(tbl_data->jump.action);
3610         dev_flow->handle->rix_jump = tbl_data->idx;
3611         dev_flow->dv.jump = &tbl_data->jump;
3612         return 0;
3613 }
3614
3615 int
3616 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3617                          struct mlx5_cache_entry *entry, void *cb_ctx)
3618 {
3619         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3620         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3621         struct mlx5_flow_dv_port_id_action_resource *res =
3622                         container_of(entry, typeof(*res), entry);
3623
3624         return ref->port_id != res->port_id;
3625 }
3626
3627 struct mlx5_cache_entry *
3628 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3629                           struct mlx5_cache_entry *entry __rte_unused,
3630                           void *cb_ctx)
3631 {
3632         struct mlx5_dev_ctx_shared *sh = list->ctx;
3633         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3634         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3635         struct mlx5_flow_dv_port_id_action_resource *cache;
3636         uint32_t idx;
3637         int ret;
3638
3639         /* Register new port id action resource. */
3640         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3641         if (!cache) {
3642                 rte_flow_error_set(ctx->error, ENOMEM,
3643                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3644                                    "cannot allocate port_id action cache memory");
3645                 return NULL;
3646         }
3647         *cache = *ref;
3648         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3649                                                         ref->port_id,
3650                                                         &cache->action);
3651         if (ret) {
3652                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3653                 rte_flow_error_set(ctx->error, ENOMEM,
3654                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3655                                    "cannot create action");
3656                 return NULL;
3657         }
3658         cache->idx = idx;
3659         return &cache->entry;
3660 }
3661
3662 /**
3663  * Find existing table port ID resource or create and register a new one.
3664  *
3665  * @param[in, out] dev
3666  *   Pointer to rte_eth_dev structure.
3667  * @param[in, out] resource
3668  *   Pointer to port ID action resource.
3669  * @parm[in, out] dev_flow
3670  *   Pointer to the dev_flow.
3671  * @param[out] error
3672  *   pointer to error structure.
3673  *
3674  * @return
3675  *   0 on success otherwise -errno and errno is set.
3676  */
3677 static int
3678 flow_dv_port_id_action_resource_register
3679                         (struct rte_eth_dev *dev,
3680                          struct mlx5_flow_dv_port_id_action_resource *resource,
3681                          struct mlx5_flow *dev_flow,
3682                          struct rte_flow_error *error)
3683 {
3684         struct mlx5_priv *priv = dev->data->dev_private;
3685         struct mlx5_cache_entry *entry;
3686         struct mlx5_flow_dv_port_id_action_resource *cache;
3687         struct mlx5_flow_cb_ctx ctx = {
3688                 .error = error,
3689                 .data = resource,
3690         };
3691
3692         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3693         if (!entry)
3694                 return -rte_errno;
3695         cache = container_of(entry, typeof(*cache), entry);
3696         dev_flow->dv.port_id_action = cache;
3697         dev_flow->handle->rix_port_id_action = cache->idx;
3698         return 0;
3699 }
3700
3701 int
3702 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3703                          struct mlx5_cache_entry *entry, void *cb_ctx)
3704 {
3705         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3706         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3707         struct mlx5_flow_dv_push_vlan_action_resource *res =
3708                         container_of(entry, typeof(*res), entry);
3709
3710         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3711 }
3712
3713 struct mlx5_cache_entry *
3714 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3715                           struct mlx5_cache_entry *entry __rte_unused,
3716                           void *cb_ctx)
3717 {
3718         struct mlx5_dev_ctx_shared *sh = list->ctx;
3719         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3720         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3721         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3722         struct mlx5dv_dr_domain *domain;
3723         uint32_t idx;
3724         int ret;
3725
3726         /* Register new port id action resource. */
3727         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3728         if (!cache) {
3729                 rte_flow_error_set(ctx->error, ENOMEM,
3730                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3731                                    "cannot allocate push_vlan action cache memory");
3732                 return NULL;
3733         }
3734         *cache = *ref;
3735         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3736                 domain = sh->fdb_domain;
3737         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3738                 domain = sh->rx_domain;
3739         else
3740                 domain = sh->tx_domain;
3741         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3742                                                         &cache->action);
3743         if (ret) {
3744                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3745                 rte_flow_error_set(ctx->error, ENOMEM,
3746                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3747                                    "cannot create push vlan action");
3748                 return NULL;
3749         }
3750         cache->idx = idx;
3751         return &cache->entry;
3752 }
3753
3754 /**
3755  * Find existing push vlan resource or create and register a new one.
3756  *
3757  * @param [in, out] dev
3758  *   Pointer to rte_eth_dev structure.
3759  * @param[in, out] resource
3760  *   Pointer to port ID action resource.
3761  * @parm[in, out] dev_flow
3762  *   Pointer to the dev_flow.
3763  * @param[out] error
3764  *   pointer to error structure.
3765  *
3766  * @return
3767  *   0 on success otherwise -errno and errno is set.
3768  */
3769 static int
3770 flow_dv_push_vlan_action_resource_register
3771                        (struct rte_eth_dev *dev,
3772                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3773                         struct mlx5_flow *dev_flow,
3774                         struct rte_flow_error *error)
3775 {
3776         struct mlx5_priv *priv = dev->data->dev_private;
3777         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3778         struct mlx5_cache_entry *entry;
3779         struct mlx5_flow_cb_ctx ctx = {
3780                 .error = error,
3781                 .data = resource,
3782         };
3783
3784         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3785         if (!entry)
3786                 return -rte_errno;
3787         cache = container_of(entry, typeof(*cache), entry);
3788
3789         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3790         dev_flow->dv.push_vlan_res = cache;
3791         return 0;
3792 }
3793
3794 /**
3795  * Get the size of specific rte_flow_item_type hdr size
3796  *
3797  * @param[in] item_type
3798  *   Tested rte_flow_item_type.
3799  *
3800  * @return
3801  *   sizeof struct item_type, 0 if void or irrelevant.
3802  */
3803 static size_t
3804 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3805 {
3806         size_t retval;
3807
3808         switch (item_type) {
3809         case RTE_FLOW_ITEM_TYPE_ETH:
3810                 retval = sizeof(struct rte_ether_hdr);
3811                 break;
3812         case RTE_FLOW_ITEM_TYPE_VLAN:
3813                 retval = sizeof(struct rte_vlan_hdr);
3814                 break;
3815         case RTE_FLOW_ITEM_TYPE_IPV4:
3816                 retval = sizeof(struct rte_ipv4_hdr);
3817                 break;
3818         case RTE_FLOW_ITEM_TYPE_IPV6:
3819                 retval = sizeof(struct rte_ipv6_hdr);
3820                 break;
3821         case RTE_FLOW_ITEM_TYPE_UDP:
3822                 retval = sizeof(struct rte_udp_hdr);
3823                 break;
3824         case RTE_FLOW_ITEM_TYPE_TCP:
3825                 retval = sizeof(struct rte_tcp_hdr);
3826                 break;
3827         case RTE_FLOW_ITEM_TYPE_VXLAN:
3828         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3829                 retval = sizeof(struct rte_vxlan_hdr);
3830                 break;
3831         case RTE_FLOW_ITEM_TYPE_GRE:
3832         case RTE_FLOW_ITEM_TYPE_NVGRE:
3833                 retval = sizeof(struct rte_gre_hdr);
3834                 break;
3835         case RTE_FLOW_ITEM_TYPE_MPLS:
3836                 retval = sizeof(struct rte_mpls_hdr);
3837                 break;
3838         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3839         default:
3840                 retval = 0;
3841                 break;
3842         }
3843         return retval;
3844 }
3845
3846 #define MLX5_ENCAP_IPV4_VERSION         0x40
3847 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3848 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3849 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3850 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3851 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3852 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3853
3854 /**
3855  * Convert the encap action data from list of rte_flow_item to raw buffer
3856  *
3857  * @param[in] items
3858  *   Pointer to rte_flow_item objects list.
3859  * @param[out] buf
3860  *   Pointer to the output buffer.
3861  * @param[out] size
3862  *   Pointer to the output buffer size.
3863  * @param[out] error
3864  *   Pointer to the error structure.
3865  *
3866  * @return
3867  *   0 on success, a negative errno value otherwise and rte_errno is set.
3868  */
3869 static int
3870 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3871                            size_t *size, struct rte_flow_error *error)
3872 {
3873         struct rte_ether_hdr *eth = NULL;
3874         struct rte_vlan_hdr *vlan = NULL;
3875         struct rte_ipv4_hdr *ipv4 = NULL;
3876         struct rte_ipv6_hdr *ipv6 = NULL;
3877         struct rte_udp_hdr *udp = NULL;
3878         struct rte_vxlan_hdr *vxlan = NULL;
3879         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3880         struct rte_gre_hdr *gre = NULL;
3881         size_t len;
3882         size_t temp_size = 0;
3883
3884         if (!items)
3885                 return rte_flow_error_set(error, EINVAL,
3886                                           RTE_FLOW_ERROR_TYPE_ACTION,
3887                                           NULL, "invalid empty data");
3888         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3889                 len = flow_dv_get_item_hdr_len(items->type);
3890                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3891                         return rte_flow_error_set(error, EINVAL,
3892                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3893                                                   (void *)items->type,
3894                                                   "items total size is too big"
3895                                                   " for encap action");
3896                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3897                 switch (items->type) {
3898                 case RTE_FLOW_ITEM_TYPE_ETH:
3899                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3900                         break;
3901                 case RTE_FLOW_ITEM_TYPE_VLAN:
3902                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3903                         if (!eth)
3904                                 return rte_flow_error_set(error, EINVAL,
3905                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3906                                                 (void *)items->type,
3907                                                 "eth header not found");
3908                         if (!eth->ether_type)
3909                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3910                         break;
3911                 case RTE_FLOW_ITEM_TYPE_IPV4:
3912                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3913                         if (!vlan && !eth)
3914                                 return rte_flow_error_set(error, EINVAL,
3915                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3916                                                 (void *)items->type,
3917                                                 "neither eth nor vlan"
3918                                                 " header found");
3919                         if (vlan && !vlan->eth_proto)
3920                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3921                         else if (eth && !eth->ether_type)
3922                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3923                         if (!ipv4->version_ihl)
3924                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3925                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3926                         if (!ipv4->time_to_live)
3927                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3928                         break;
3929                 case RTE_FLOW_ITEM_TYPE_IPV6:
3930                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3931                         if (!vlan && !eth)
3932                                 return rte_flow_error_set(error, EINVAL,
3933                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3934                                                 (void *)items->type,
3935                                                 "neither eth nor vlan"
3936                                                 " header found");
3937                         if (vlan && !vlan->eth_proto)
3938                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3939                         else if (eth && !eth->ether_type)
3940                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3941                         if (!ipv6->vtc_flow)
3942                                 ipv6->vtc_flow =
3943                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3944                         if (!ipv6->hop_limits)
3945                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3946                         break;
3947                 case RTE_FLOW_ITEM_TYPE_UDP:
3948                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3949                         if (!ipv4 && !ipv6)
3950                                 return rte_flow_error_set(error, EINVAL,
3951                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3952                                                 (void *)items->type,
3953                                                 "ip header not found");
3954                         if (ipv4 && !ipv4->next_proto_id)
3955                                 ipv4->next_proto_id = IPPROTO_UDP;
3956                         else if (ipv6 && !ipv6->proto)
3957                                 ipv6->proto = IPPROTO_UDP;
3958                         break;
3959                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3960                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3961                         if (!udp)
3962                                 return rte_flow_error_set(error, EINVAL,
3963                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3964                                                 (void *)items->type,
3965                                                 "udp header not found");
3966                         if (!udp->dst_port)
3967                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3968                         if (!vxlan->vx_flags)
3969                                 vxlan->vx_flags =
3970                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3971                         break;
3972                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3973                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3974                         if (!udp)
3975                                 return rte_flow_error_set(error, EINVAL,
3976                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3977                                                 (void *)items->type,
3978                                                 "udp header not found");
3979                         if (!vxlan_gpe->proto)
3980                                 return rte_flow_error_set(error, EINVAL,
3981                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3982                                                 (void *)items->type,
3983                                                 "next protocol not found");
3984                         if (!udp->dst_port)
3985                                 udp->dst_port =
3986                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3987                         if (!vxlan_gpe->vx_flags)
3988                                 vxlan_gpe->vx_flags =
3989                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3990                         break;
3991                 case RTE_FLOW_ITEM_TYPE_GRE:
3992                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3993                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3994                         if (!gre->proto)
3995                                 return rte_flow_error_set(error, EINVAL,
3996                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3997                                                 (void *)items->type,
3998                                                 "next protocol not found");
3999                         if (!ipv4 && !ipv6)
4000                                 return rte_flow_error_set(error, EINVAL,
4001                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4002                                                 (void *)items->type,
4003                                                 "ip header not found");
4004                         if (ipv4 && !ipv4->next_proto_id)
4005                                 ipv4->next_proto_id = IPPROTO_GRE;
4006                         else if (ipv6 && !ipv6->proto)
4007                                 ipv6->proto = IPPROTO_GRE;
4008                         break;
4009                 case RTE_FLOW_ITEM_TYPE_VOID:
4010                         break;
4011                 default:
4012                         return rte_flow_error_set(error, EINVAL,
4013                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4014                                                   (void *)items->type,
4015                                                   "unsupported item type");
4016                         break;
4017                 }
4018                 temp_size += len;
4019         }
4020         *size = temp_size;
4021         return 0;
4022 }
4023
4024 static int
4025 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4026 {
4027         struct rte_ether_hdr *eth = NULL;
4028         struct rte_vlan_hdr *vlan = NULL;
4029         struct rte_ipv6_hdr *ipv6 = NULL;
4030         struct rte_udp_hdr *udp = NULL;
4031         char *next_hdr;
4032         uint16_t proto;
4033
4034         eth = (struct rte_ether_hdr *)data;
4035         next_hdr = (char *)(eth + 1);
4036         proto = RTE_BE16(eth->ether_type);
4037
4038         /* VLAN skipping */
4039         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4040                 vlan = (struct rte_vlan_hdr *)next_hdr;
4041                 proto = RTE_BE16(vlan->eth_proto);
4042                 next_hdr += sizeof(struct rte_vlan_hdr);
4043         }
4044
4045         /* HW calculates IPv4 csum. no need to proceed */
4046         if (proto == RTE_ETHER_TYPE_IPV4)
4047                 return 0;
4048
4049         /* non IPv4/IPv6 header. not supported */
4050         if (proto != RTE_ETHER_TYPE_IPV6) {
4051                 return rte_flow_error_set(error, ENOTSUP,
4052                                           RTE_FLOW_ERROR_TYPE_ACTION,
4053                                           NULL, "Cannot offload non IPv4/IPv6");
4054         }
4055
4056         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4057
4058         /* ignore non UDP */
4059         if (ipv6->proto != IPPROTO_UDP)
4060                 return 0;
4061
4062         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4063         udp->dgram_cksum = 0;
4064
4065         return 0;
4066 }
4067
4068 /**
4069  * Convert L2 encap action to DV specification.
4070  *
4071  * @param[in] dev
4072  *   Pointer to rte_eth_dev structure.
4073  * @param[in] action
4074  *   Pointer to action structure.
4075  * @param[in, out] dev_flow
4076  *   Pointer to the mlx5_flow.
4077  * @param[in] transfer
4078  *   Mark if the flow is E-Switch flow.
4079  * @param[out] error
4080  *   Pointer to the error structure.
4081  *
4082  * @return
4083  *   0 on success, a negative errno value otherwise and rte_errno is set.
4084  */
4085 static int
4086 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4087                                const struct rte_flow_action *action,
4088                                struct mlx5_flow *dev_flow,
4089                                uint8_t transfer,
4090                                struct rte_flow_error *error)
4091 {
4092         const struct rte_flow_item *encap_data;
4093         const struct rte_flow_action_raw_encap *raw_encap_data;
4094         struct mlx5_flow_dv_encap_decap_resource res = {
4095                 .reformat_type =
4096                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4097                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4098                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4099         };
4100
4101         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4102                 raw_encap_data =
4103                         (const struct rte_flow_action_raw_encap *)action->conf;
4104                 res.size = raw_encap_data->size;
4105                 memcpy(res.buf, raw_encap_data->data, res.size);
4106         } else {
4107                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4108                         encap_data =
4109                                 ((const struct rte_flow_action_vxlan_encap *)
4110                                                 action->conf)->definition;
4111                 else
4112                         encap_data =
4113                                 ((const struct rte_flow_action_nvgre_encap *)
4114                                                 action->conf)->definition;
4115                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4116                                                &res.size, error))
4117                         return -rte_errno;
4118         }
4119         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4120                 return -rte_errno;
4121         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4122                 return rte_flow_error_set(error, EINVAL,
4123                                           RTE_FLOW_ERROR_TYPE_ACTION,
4124                                           NULL, "can't create L2 encap action");
4125         return 0;
4126 }
4127
4128 /**
4129  * Convert L2 decap action to DV specification.
4130  *
4131  * @param[in] dev
4132  *   Pointer to rte_eth_dev structure.
4133  * @param[in, out] dev_flow
4134  *   Pointer to the mlx5_flow.
4135  * @param[in] transfer
4136  *   Mark if the flow is E-Switch flow.
4137  * @param[out] error
4138  *   Pointer to the error structure.
4139  *
4140  * @return
4141  *   0 on success, a negative errno value otherwise and rte_errno is set.
4142  */
4143 static int
4144 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4145                                struct mlx5_flow *dev_flow,
4146                                uint8_t transfer,
4147                                struct rte_flow_error *error)
4148 {
4149         struct mlx5_flow_dv_encap_decap_resource res = {
4150                 .size = 0,
4151                 .reformat_type =
4152                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4153                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4154                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4155         };
4156
4157         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4158                 return rte_flow_error_set(error, EINVAL,
4159                                           RTE_FLOW_ERROR_TYPE_ACTION,
4160                                           NULL, "can't create L2 decap action");
4161         return 0;
4162 }
4163
4164 /**
4165  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4166  *
4167  * @param[in] dev
4168  *   Pointer to rte_eth_dev structure.
4169  * @param[in] action
4170  *   Pointer to action structure.
4171  * @param[in, out] dev_flow
4172  *   Pointer to the mlx5_flow.
4173  * @param[in] attr
4174  *   Pointer to the flow attributes.
4175  * @param[out] error
4176  *   Pointer to the error structure.
4177  *
4178  * @return
4179  *   0 on success, a negative errno value otherwise and rte_errno is set.
4180  */
4181 static int
4182 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4183                                 const struct rte_flow_action *action,
4184                                 struct mlx5_flow *dev_flow,
4185                                 const struct rte_flow_attr *attr,
4186                                 struct rte_flow_error *error)
4187 {
4188         const struct rte_flow_action_raw_encap *encap_data;
4189         struct mlx5_flow_dv_encap_decap_resource res;
4190
4191         memset(&res, 0, sizeof(res));
4192         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4193         res.size = encap_data->size;
4194         memcpy(res.buf, encap_data->data, res.size);
4195         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4196                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4197                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4198         if (attr->transfer)
4199                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4200         else
4201                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4202                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4203         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4204                 return rte_flow_error_set(error, EINVAL,
4205                                           RTE_FLOW_ERROR_TYPE_ACTION,
4206                                           NULL, "can't create encap action");
4207         return 0;
4208 }
4209
4210 /**
4211  * Create action push VLAN.
4212  *
4213  * @param[in] dev
4214  *   Pointer to rte_eth_dev structure.
4215  * @param[in] attr
4216  *   Pointer to the flow attributes.
4217  * @param[in] vlan
4218  *   Pointer to the vlan to push to the Ethernet header.
4219  * @param[in, out] dev_flow
4220  *   Pointer to the mlx5_flow.
4221  * @param[out] error
4222  *   Pointer to the error structure.
4223  *
4224  * @return
4225  *   0 on success, a negative errno value otherwise and rte_errno is set.
4226  */
4227 static int
4228 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4229                                 const struct rte_flow_attr *attr,
4230                                 const struct rte_vlan_hdr *vlan,
4231                                 struct mlx5_flow *dev_flow,
4232                                 struct rte_flow_error *error)
4233 {
4234         struct mlx5_flow_dv_push_vlan_action_resource res;
4235
4236         memset(&res, 0, sizeof(res));
4237         res.vlan_tag =
4238                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4239                                  vlan->vlan_tci);
4240         if (attr->transfer)
4241                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4242         else
4243                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4244                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4245         return flow_dv_push_vlan_action_resource_register
4246                                             (dev, &res, dev_flow, error);
4247 }
4248
4249 /**
4250  * Validate the modify-header actions.
4251  *
4252  * @param[in] action_flags
4253  *   Holds the actions detected until now.
4254  * @param[in] action
4255  *   Pointer to the modify action.
4256  * @param[out] error
4257  *   Pointer to error structure.
4258  *
4259  * @return
4260  *   0 on success, a negative errno value otherwise and rte_errno is set.
4261  */
4262 static int
4263 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4264                                    const struct rte_flow_action *action,
4265                                    struct rte_flow_error *error)
4266 {
4267         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4268                 return rte_flow_error_set(error, EINVAL,
4269                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4270                                           NULL, "action configuration not set");
4271         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4272                 return rte_flow_error_set(error, EINVAL,
4273                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4274                                           "can't have encap action before"
4275                                           " modify action");
4276         return 0;
4277 }
4278
4279 /**
4280  * Validate the modify-header MAC address actions.
4281  *
4282  * @param[in] action_flags
4283  *   Holds the actions detected until now.
4284  * @param[in] action
4285  *   Pointer to the modify action.
4286  * @param[in] item_flags
4287  *   Holds the items detected.
4288  * @param[out] error
4289  *   Pointer to error structure.
4290  *
4291  * @return
4292  *   0 on success, a negative errno value otherwise and rte_errno is set.
4293  */
4294 static int
4295 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4296                                    const struct rte_flow_action *action,
4297                                    const uint64_t item_flags,
4298                                    struct rte_flow_error *error)
4299 {
4300         int ret = 0;
4301
4302         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4303         if (!ret) {
4304                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4305                         return rte_flow_error_set(error, EINVAL,
4306                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4307                                                   NULL,
4308                                                   "no L2 item in pattern");
4309         }
4310         return ret;
4311 }
4312
4313 /**
4314  * Validate the modify-header IPv4 address actions.
4315  *
4316  * @param[in] action_flags
4317  *   Holds the actions detected until now.
4318  * @param[in] action
4319  *   Pointer to the modify action.
4320  * @param[in] item_flags
4321  *   Holds the items detected.
4322  * @param[out] error
4323  *   Pointer to error structure.
4324  *
4325  * @return
4326  *   0 on success, a negative errno value otherwise and rte_errno is set.
4327  */
4328 static int
4329 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4330                                     const struct rte_flow_action *action,
4331                                     const uint64_t item_flags,
4332                                     struct rte_flow_error *error)
4333 {
4334         int ret = 0;
4335         uint64_t layer;
4336
4337         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4338         if (!ret) {
4339                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4340                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4341                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4342                 if (!(item_flags & layer))
4343                         return rte_flow_error_set(error, EINVAL,
4344                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4345                                                   NULL,
4346                                                   "no ipv4 item in pattern");
4347         }
4348         return ret;
4349 }
4350
4351 /**
4352  * Validate the modify-header IPv6 address actions.
4353  *
4354  * @param[in] action_flags
4355  *   Holds the actions detected until now.
4356  * @param[in] action
4357  *   Pointer to the modify action.
4358  * @param[in] item_flags
4359  *   Holds the items detected.
4360  * @param[out] error
4361  *   Pointer to error structure.
4362  *
4363  * @return
4364  *   0 on success, a negative errno value otherwise and rte_errno is set.
4365  */
4366 static int
4367 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4368                                     const struct rte_flow_action *action,
4369                                     const uint64_t item_flags,
4370                                     struct rte_flow_error *error)
4371 {
4372         int ret = 0;
4373         uint64_t layer;
4374
4375         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4376         if (!ret) {
4377                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4378                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4379                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4380                 if (!(item_flags & layer))
4381                         return rte_flow_error_set(error, EINVAL,
4382                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4383                                                   NULL,
4384                                                   "no ipv6 item in pattern");
4385         }
4386         return ret;
4387 }
4388
4389 /**
4390  * Validate the modify-header TP actions.
4391  *
4392  * @param[in] action_flags
4393  *   Holds the actions detected until now.
4394  * @param[in] action
4395  *   Pointer to the modify action.
4396  * @param[in] item_flags
4397  *   Holds the items detected.
4398  * @param[out] error
4399  *   Pointer to error structure.
4400  *
4401  * @return
4402  *   0 on success, a negative errno value otherwise and rte_errno is set.
4403  */
4404 static int
4405 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4406                                   const struct rte_flow_action *action,
4407                                   const uint64_t item_flags,
4408                                   struct rte_flow_error *error)
4409 {
4410         int ret = 0;
4411         uint64_t layer;
4412
4413         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4414         if (!ret) {
4415                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4416                                  MLX5_FLOW_LAYER_INNER_L4 :
4417                                  MLX5_FLOW_LAYER_OUTER_L4;
4418                 if (!(item_flags & layer))
4419                         return rte_flow_error_set(error, EINVAL,
4420                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4421                                                   NULL, "no transport layer "
4422                                                   "in pattern");
4423         }
4424         return ret;
4425 }
4426
4427 /**
4428  * Validate the modify-header actions of increment/decrement
4429  * TCP Sequence-number.
4430  *
4431  * @param[in] action_flags
4432  *   Holds the actions detected until now.
4433  * @param[in] action
4434  *   Pointer to the modify action.
4435  * @param[in] item_flags
4436  *   Holds the items detected.
4437  * @param[out] error
4438  *   Pointer to error structure.
4439  *
4440  * @return
4441  *   0 on success, a negative errno value otherwise and rte_errno is set.
4442  */
4443 static int
4444 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4445                                        const struct rte_flow_action *action,
4446                                        const uint64_t item_flags,
4447                                        struct rte_flow_error *error)
4448 {
4449         int ret = 0;
4450         uint64_t layer;
4451
4452         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4453         if (!ret) {
4454                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4455                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4456                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4457                 if (!(item_flags & layer))
4458                         return rte_flow_error_set(error, EINVAL,
4459                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4460                                                   NULL, "no TCP item in"
4461                                                   " pattern");
4462                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4463                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4464                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4465                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4466                         return rte_flow_error_set(error, EINVAL,
4467                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4468                                                   NULL,
4469                                                   "cannot decrease and increase"
4470                                                   " TCP sequence number"
4471                                                   " at the same time");
4472         }
4473         return ret;
4474 }
4475
4476 /**
4477  * Validate the modify-header actions of increment/decrement
4478  * TCP Acknowledgment number.
4479  *
4480  * @param[in] action_flags
4481  *   Holds the actions detected until now.
4482  * @param[in] action
4483  *   Pointer to the modify action.
4484  * @param[in] item_flags
4485  *   Holds the items detected.
4486  * @param[out] error
4487  *   Pointer to error structure.
4488  *
4489  * @return
4490  *   0 on success, a negative errno value otherwise and rte_errno is set.
4491  */
4492 static int
4493 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4494                                        const struct rte_flow_action *action,
4495                                        const uint64_t item_flags,
4496                                        struct rte_flow_error *error)
4497 {
4498         int ret = 0;
4499         uint64_t layer;
4500
4501         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4502         if (!ret) {
4503                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4504                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4505                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4506                 if (!(item_flags & layer))
4507                         return rte_flow_error_set(error, EINVAL,
4508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4509                                                   NULL, "no TCP item in"
4510                                                   " pattern");
4511                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4512                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4513                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4514                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4515                         return rte_flow_error_set(error, EINVAL,
4516                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4517                                                   NULL,
4518                                                   "cannot decrease and increase"
4519                                                   " TCP acknowledgment number"
4520                                                   " at the same time");
4521         }
4522         return ret;
4523 }
4524
4525 /**
4526  * Validate the modify-header TTL actions.
4527  *
4528  * @param[in] action_flags
4529  *   Holds the actions detected until now.
4530  * @param[in] action
4531  *   Pointer to the modify action.
4532  * @param[in] item_flags
4533  *   Holds the items detected.
4534  * @param[out] error
4535  *   Pointer to error structure.
4536  *
4537  * @return
4538  *   0 on success, a negative errno value otherwise and rte_errno is set.
4539  */
4540 static int
4541 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4542                                    const struct rte_flow_action *action,
4543                                    const uint64_t item_flags,
4544                                    struct rte_flow_error *error)
4545 {
4546         int ret = 0;
4547         uint64_t layer;
4548
4549         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4550         if (!ret) {
4551                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4552                                  MLX5_FLOW_LAYER_INNER_L3 :
4553                                  MLX5_FLOW_LAYER_OUTER_L3;
4554                 if (!(item_flags & layer))
4555                         return rte_flow_error_set(error, EINVAL,
4556                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4557                                                   NULL,
4558                                                   "no IP protocol in pattern");
4559         }
4560         return ret;
4561 }
4562
4563 /**
4564  * Validate the generic modify field actions.
4565  * @param[in] dev
4566  *   Pointer to the rte_eth_dev structure.
4567  * @param[in] action_flags
4568  *   Holds the actions detected until now.
4569  * @param[in] action
4570  *   Pointer to the modify action.
4571  * @param[in] attr
4572  *   Pointer to the flow attributes.
4573  * @param[out] error
4574  *   Pointer to error structure.
4575  *
4576  * @return
4577  *   Number of header fields to modify (0 or more) on success,
4578  *   a negative errno value otherwise and rte_errno is set.
4579  */
4580 static int
4581 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4582                                    const uint64_t action_flags,
4583                                    const struct rte_flow_action *action,
4584                                    const struct rte_flow_attr *attr,
4585                                    struct rte_flow_error *error)
4586 {
4587         int ret = 0;
4588         struct mlx5_priv *priv = dev->data->dev_private;
4589         struct mlx5_dev_config *config = &priv->config;
4590         const struct rte_flow_action_modify_field *action_modify_field =
4591                 action->conf;
4592         uint32_t dst_width =
4593                 mlx5_flow_item_field_width(action_modify_field->dst.field);
4594         uint32_t src_width =
4595                 mlx5_flow_item_field_width(action_modify_field->src.field);
4596
4597         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4598         if (ret)
4599                 return ret;
4600
4601         if (action_modify_field->width == 0)
4602                 return rte_flow_error_set(error, EINVAL,
4603                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4604                                 "no bits are requested to be modified");
4605         else if (action_modify_field->width > dst_width ||
4606                  action_modify_field->width > src_width)
4607                 return rte_flow_error_set(error, EINVAL,
4608                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4609                                 "cannot modify more bits than"
4610                                 " the width of a field");
4611         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4612             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4613                 if ((action_modify_field->dst.offset +
4614                      action_modify_field->width > dst_width) ||
4615                     (action_modify_field->dst.offset % 32))
4616                         return rte_flow_error_set(error, EINVAL,
4617                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4618                                         "destination offset is too big"
4619                                         " or not aligned to 4 bytes");
4620                 if (action_modify_field->dst.level &&
4621                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4622                         return rte_flow_error_set(error, ENOTSUP,
4623                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4624                                         "inner header fields modification"
4625                                         " is not supported");
4626         }
4627         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4628             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4629                 if (!attr->transfer && !attr->group)
4630                         return rte_flow_error_set(error, ENOTSUP,
4631                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4632                                         "modify field action is not"
4633                                         " supported for group 0");
4634                 if ((action_modify_field->src.offset +
4635                      action_modify_field->width > src_width) ||
4636                     (action_modify_field->src.offset % 32))
4637                         return rte_flow_error_set(error, EINVAL,
4638                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4639                                         "source offset is too big"
4640                                         " or not aligned to 4 bytes");
4641                 if (action_modify_field->src.level &&
4642                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4643                         return rte_flow_error_set(error, ENOTSUP,
4644                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4645                                         "inner header fields modification"
4646                                         " is not supported");
4647         }
4648         if (action_modify_field->dst.field ==
4649             action_modify_field->src.field)
4650                 return rte_flow_error_set(error, EINVAL,
4651                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4652                                 "source and destination fields"
4653                                 " cannot be the same");
4654         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4655             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4656                 return rte_flow_error_set(error, EINVAL,
4657                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4658                                 "immediate value or a pointer to it"
4659                                 " cannot be used as a destination");
4660         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4661             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4662                 return rte_flow_error_set(error, ENOTSUP,
4663                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4664                                 "modifications of an arbitrary"
4665                                 " place in a packet is not supported");
4666         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4667             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4668                 return rte_flow_error_set(error, ENOTSUP,
4669                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4670                                 "modifications of the 802.1Q Tag"
4671                                 " Identifier is not supported");
4672         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4673             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4674                 return rte_flow_error_set(error, ENOTSUP,
4675                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4676                                 "modifications of the VXLAN Network"
4677                                 " Identifier is not supported");
4678         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4679             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4680                 return rte_flow_error_set(error, ENOTSUP,
4681                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4682                                 "modifications of the GENEVE Network"
4683                                 " Identifier is not supported");
4684         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4685             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4686             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4687             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4688                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4689                     !mlx5_flow_ext_mreg_supported(dev))
4690                         return rte_flow_error_set(error, ENOTSUP,
4691                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4692                                         "cannot modify mark or metadata without"
4693                                         " extended metadata register support");
4694         }
4695         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4696                 return rte_flow_error_set(error, ENOTSUP,
4697                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4698                                 "add and sub operations"
4699                                 " are not supported");
4700         return (action_modify_field->width / 32) +
4701                !!(action_modify_field->width % 32);
4702 }
4703
4704 /**
4705  * Validate jump action.
4706  *
4707  * @param[in] action
4708  *   Pointer to the jump action.
4709  * @param[in] action_flags
4710  *   Holds the actions detected until now.
4711  * @param[in] attributes
4712  *   Pointer to flow attributes
4713  * @param[in] external
4714  *   Action belongs to flow rule created by request external to PMD.
4715  * @param[out] error
4716  *   Pointer to error structure.
4717  *
4718  * @return
4719  *   0 on success, a negative errno value otherwise and rte_errno is set.
4720  */
4721 static int
4722 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4723                              const struct mlx5_flow_tunnel *tunnel,
4724                              const struct rte_flow_action *action,
4725                              uint64_t action_flags,
4726                              const struct rte_flow_attr *attributes,
4727                              bool external, struct rte_flow_error *error)
4728 {
4729         uint32_t target_group, table;
4730         int ret = 0;
4731         struct flow_grp_info grp_info = {
4732                 .external = !!external,
4733                 .transfer = !!attributes->transfer,
4734                 .fdb_def_rule = 1,
4735                 .std_tbl_fix = 0
4736         };
4737         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4738                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4739                 return rte_flow_error_set(error, EINVAL,
4740                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4741                                           "can't have 2 fate actions in"
4742                                           " same flow");
4743         if (!action->conf)
4744                 return rte_flow_error_set(error, EINVAL,
4745                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4746                                           NULL, "action configuration not set");
4747         target_group =
4748                 ((const struct rte_flow_action_jump *)action->conf)->group;
4749         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4750                                        &grp_info, error);
4751         if (ret)
4752                 return ret;
4753         if (attributes->group == target_group &&
4754             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4755                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4756                 return rte_flow_error_set(error, EINVAL,
4757                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4758                                           "target group must be other than"
4759                                           " the current flow group");
4760         return 0;
4761 }
4762
4763 /*
4764  * Validate the port_id action.
4765  *
4766  * @param[in] dev
4767  *   Pointer to rte_eth_dev structure.
4768  * @param[in] action_flags
4769  *   Bit-fields that holds the actions detected until now.
4770  * @param[in] action
4771  *   Port_id RTE action structure.
4772  * @param[in] attr
4773  *   Attributes of flow that includes this action.
4774  * @param[out] error
4775  *   Pointer to error structure.
4776  *
4777  * @return
4778  *   0 on success, a negative errno value otherwise and rte_errno is set.
4779  */
4780 static int
4781 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4782                                 uint64_t action_flags,
4783                                 const struct rte_flow_action *action,
4784                                 const struct rte_flow_attr *attr,
4785                                 struct rte_flow_error *error)
4786 {
4787         const struct rte_flow_action_port_id *port_id;
4788         struct mlx5_priv *act_priv;
4789         struct mlx5_priv *dev_priv;
4790         uint16_t port;
4791
4792         if (!attr->transfer)
4793                 return rte_flow_error_set(error, ENOTSUP,
4794                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4795                                           NULL,
4796                                           "port id action is valid in transfer"
4797                                           " mode only");
4798         if (!action || !action->conf)
4799                 return rte_flow_error_set(error, ENOTSUP,
4800                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4801                                           NULL,
4802                                           "port id action parameters must be"
4803                                           " specified");
4804         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4805                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4806                 return rte_flow_error_set(error, EINVAL,
4807                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4808                                           "can have only one fate actions in"
4809                                           " a flow");
4810         dev_priv = mlx5_dev_to_eswitch_info(dev);
4811         if (!dev_priv)
4812                 return rte_flow_error_set(error, rte_errno,
4813                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4814                                           NULL,
4815                                           "failed to obtain E-Switch info");
4816         port_id = action->conf;
4817         port = port_id->original ? dev->data->port_id : port_id->id;
4818         act_priv = mlx5_port_to_eswitch_info(port, false);
4819         if (!act_priv)
4820                 return rte_flow_error_set
4821                                 (error, rte_errno,
4822                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4823                                  "failed to obtain E-Switch port id for port");
4824         if (act_priv->domain_id != dev_priv->domain_id)
4825                 return rte_flow_error_set
4826                                 (error, EINVAL,
4827                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4828                                  "port does not belong to"
4829                                  " E-Switch being configured");
4830         return 0;
4831 }
4832
4833 /**
4834  * Get the maximum number of modify header actions.
4835  *
4836  * @param dev
4837  *   Pointer to rte_eth_dev structure.
4838  * @param flags
4839  *   Flags bits to check if root level.
4840  *
4841  * @return
4842  *   Max number of modify header actions device can support.
4843  */
4844 static inline unsigned int
4845 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4846                               uint64_t flags)
4847 {
4848         /*
4849          * There's no way to directly query the max capacity from FW.
4850          * The maximal value on root table should be assumed to be supported.
4851          */
4852         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4853                 return MLX5_MAX_MODIFY_NUM;
4854         else
4855                 return MLX5_ROOT_TBL_MODIFY_NUM;
4856 }
4857
4858 /**
4859  * Validate the meter action.
4860  *
4861  * @param[in] dev
4862  *   Pointer to rte_eth_dev structure.
4863  * @param[in] action_flags
4864  *   Bit-fields that holds the actions detected until now.
4865  * @param[in] action
4866  *   Pointer to the meter action.
4867  * @param[in] attr
4868  *   Attributes of flow that includes this action.
4869  * @param[out] error
4870  *   Pointer to error structure.
4871  *
4872  * @return
4873  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4874  */
4875 static int
4876 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4877                                 uint64_t action_flags,
4878                                 const struct rte_flow_action *action,
4879                                 const struct rte_flow_attr *attr,
4880                                 bool *def_policy,
4881                                 struct rte_flow_error *error)
4882 {
4883         struct mlx5_priv *priv = dev->data->dev_private;
4884         const struct rte_flow_action_meter *am = action->conf;
4885         struct mlx5_flow_meter_info *fm;
4886         struct mlx5_flow_meter_policy *mtr_policy;
4887         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
4888
4889         if (!am)
4890                 return rte_flow_error_set(error, EINVAL,
4891                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4892                                           "meter action conf is NULL");
4893
4894         if (action_flags & MLX5_FLOW_ACTION_METER)
4895                 return rte_flow_error_set(error, ENOTSUP,
4896                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4897                                           "meter chaining not support");
4898         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4899                 return rte_flow_error_set(error, ENOTSUP,
4900                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4901                                           "meter with jump not support");
4902         if (!priv->mtr_en)
4903                 return rte_flow_error_set(error, ENOTSUP,
4904                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4905                                           NULL,
4906                                           "meter action not supported");
4907         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
4908         if (!fm)
4909                 return rte_flow_error_set(error, EINVAL,
4910                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4911                                           "Meter not found");
4912         /* aso meter can always be shared by different domains */
4913         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
4914             !(fm->transfer == attr->transfer ||
4915               (!fm->ingress && !attr->ingress && attr->egress) ||
4916               (!fm->egress && !attr->egress && attr->ingress)))
4917                 return rte_flow_error_set(error, EINVAL,
4918                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4919                         "Flow attributes domain are either invalid "
4920                         "or have a domain conflict with current "
4921                         "meter attributes");
4922         if (fm->def_policy) {
4923                 if (!((attr->transfer &&
4924                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
4925                         (attr->egress &&
4926                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
4927                         (attr->ingress &&
4928                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
4929                         return rte_flow_error_set(error, EINVAL,
4930                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4931                                           "Flow attributes domain "
4932                                           "have a conflict with current "
4933                                           "meter domain attributes");
4934                 *def_policy = true;
4935         } else {
4936                 mtr_policy = mlx5_flow_meter_policy_find(dev,
4937                                                 fm->policy_id, NULL);
4938                 if (!mtr_policy)
4939                         return rte_flow_error_set(error, EINVAL,
4940                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4941                                           "Invalid policy id for meter ");
4942                 if (!((attr->transfer && mtr_policy->transfer) ||
4943                         (attr->egress && mtr_policy->egress) ||
4944                         (attr->ingress && mtr_policy->ingress)))
4945                         return rte_flow_error_set(error, EINVAL,
4946                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4947                                           "Flow attributes domain "
4948                                           "have a conflict with current "
4949                                           "meter domain attributes");
4950                 *def_policy = false;
4951         }
4952         return 0;
4953 }
4954
4955 /**
4956  * Validate the age action.
4957  *
4958  * @param[in] action_flags
4959  *   Holds the actions detected until now.
4960  * @param[in] action
4961  *   Pointer to the age action.
4962  * @param[in] dev
4963  *   Pointer to the Ethernet device structure.
4964  * @param[out] error
4965  *   Pointer to error structure.
4966  *
4967  * @return
4968  *   0 on success, a negative errno value otherwise and rte_errno is set.
4969  */
4970 static int
4971 flow_dv_validate_action_age(uint64_t action_flags,
4972                             const struct rte_flow_action *action,
4973                             struct rte_eth_dev *dev,
4974                             struct rte_flow_error *error)
4975 {
4976         struct mlx5_priv *priv = dev->data->dev_private;
4977         const struct rte_flow_action_age *age = action->conf;
4978
4979         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4980             !priv->sh->aso_age_mng))
4981                 return rte_flow_error_set(error, ENOTSUP,
4982                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4983                                           NULL,
4984                                           "age action not supported");
4985         if (!(action->conf))
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4988                                           "configuration cannot be null");
4989         if (!(age->timeout))
4990                 return rte_flow_error_set(error, EINVAL,
4991                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4992                                           "invalid timeout value 0");
4993         if (action_flags & MLX5_FLOW_ACTION_AGE)
4994                 return rte_flow_error_set(error, EINVAL,
4995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4996                                           "duplicate age actions set");
4997         return 0;
4998 }
4999
5000 /**
5001  * Validate the modify-header IPv4 DSCP actions.
5002  *
5003  * @param[in] action_flags
5004  *   Holds the actions detected until now.
5005  * @param[in] action
5006  *   Pointer to the modify action.
5007  * @param[in] item_flags
5008  *   Holds the items detected.
5009  * @param[out] error
5010  *   Pointer to error structure.
5011  *
5012  * @return
5013  *   0 on success, a negative errno value otherwise and rte_errno is set.
5014  */
5015 static int
5016 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5017                                          const struct rte_flow_action *action,
5018                                          const uint64_t item_flags,
5019                                          struct rte_flow_error *error)
5020 {
5021         int ret = 0;
5022
5023         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5024         if (!ret) {
5025                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5026                         return rte_flow_error_set(error, EINVAL,
5027                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5028                                                   NULL,
5029                                                   "no ipv4 item in pattern");
5030         }
5031         return ret;
5032 }
5033
5034 /**
5035  * Validate the modify-header IPv6 DSCP actions.
5036  *
5037  * @param[in] action_flags
5038  *   Holds the actions detected until now.
5039  * @param[in] action
5040  *   Pointer to the modify action.
5041  * @param[in] item_flags
5042  *   Holds the items detected.
5043  * @param[out] error
5044  *   Pointer to error structure.
5045  *
5046  * @return
5047  *   0 on success, a negative errno value otherwise and rte_errno is set.
5048  */
5049 static int
5050 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5051                                          const struct rte_flow_action *action,
5052                                          const uint64_t item_flags,
5053                                          struct rte_flow_error *error)
5054 {
5055         int ret = 0;
5056
5057         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5058         if (!ret) {
5059                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5060                         return rte_flow_error_set(error, EINVAL,
5061                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5062                                                   NULL,
5063                                                   "no ipv6 item in pattern");
5064         }
5065         return ret;
5066 }
5067
5068 /**
5069  * Match modify-header resource.
5070  *
5071  * @param list
5072  *   Pointer to the hash list.
5073  * @param entry
5074  *   Pointer to exist resource entry object.
5075  * @param key
5076  *   Key of the new entry.
5077  * @param ctx
5078  *   Pointer to new modify-header resource.
5079  *
5080  * @return
5081  *   0 on matching, non-zero otherwise.
5082  */
5083 int
5084 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5085                         struct mlx5_hlist_entry *entry,
5086                         uint64_t key __rte_unused, void *cb_ctx)
5087 {
5088         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5089         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5090         struct mlx5_flow_dv_modify_hdr_resource *resource =
5091                         container_of(entry, typeof(*resource), entry);
5092         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5093
5094         key_len += ref->actions_num * sizeof(ref->actions[0]);
5095         return ref->actions_num != resource->actions_num ||
5096                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5097 }
5098
5099 struct mlx5_hlist_entry *
5100 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5101                          void *cb_ctx)
5102 {
5103         struct mlx5_dev_ctx_shared *sh = list->ctx;
5104         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5105         struct mlx5dv_dr_domain *ns;
5106         struct mlx5_flow_dv_modify_hdr_resource *entry;
5107         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5108         int ret;
5109         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5110         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5111
5112         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5113                             SOCKET_ID_ANY);
5114         if (!entry) {
5115                 rte_flow_error_set(ctx->error, ENOMEM,
5116                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5117                                    "cannot allocate resource memory");
5118                 return NULL;
5119         }
5120         rte_memcpy(&entry->ft_type,
5121                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5122                    key_len + data_len);
5123         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5124                 ns = sh->fdb_domain;
5125         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5126                 ns = sh->tx_domain;
5127         else
5128                 ns = sh->rx_domain;
5129         ret = mlx5_flow_os_create_flow_action_modify_header
5130                                         (sh->ctx, ns, entry,
5131                                          data_len, &entry->action);
5132         if (ret) {
5133                 mlx5_free(entry);
5134                 rte_flow_error_set(ctx->error, ENOMEM,
5135                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5136                                    NULL, "cannot create modification action");
5137                 return NULL;
5138         }
5139         return &entry->entry;
5140 }
5141
5142 /**
5143  * Validate the sample action.
5144  *
5145  * @param[in, out] action_flags
5146  *   Holds the actions detected until now.
5147  * @param[in] action
5148  *   Pointer to the sample action.
5149  * @param[in] dev
5150  *   Pointer to the Ethernet device structure.
5151  * @param[in] attr
5152  *   Attributes of flow that includes this action.
5153  * @param[in] item_flags
5154  *   Holds the items detected.
5155  * @param[in] rss
5156  *   Pointer to the RSS action.
5157  * @param[out] sample_rss
5158  *   Pointer to the RSS action in sample action list.
5159  * @param[out] count
5160  *   Pointer to the COUNT action in sample action list.
5161  * @param[out] fdb_mirror_limit
5162  *   Pointer to the FDB mirror limitation flag.
5163  * @param[out] error
5164  *   Pointer to error structure.
5165  *
5166  * @return
5167  *   0 on success, a negative errno value otherwise and rte_errno is set.
5168  */
5169 static int
5170 flow_dv_validate_action_sample(uint64_t *action_flags,
5171                                const struct rte_flow_action *action,
5172                                struct rte_eth_dev *dev,
5173                                const struct rte_flow_attr *attr,
5174                                uint64_t item_flags,
5175                                const struct rte_flow_action_rss *rss,
5176                                const struct rte_flow_action_rss **sample_rss,
5177                                const struct rte_flow_action_count **count,
5178                                int *fdb_mirror_limit,
5179                                struct rte_flow_error *error)
5180 {
5181         struct mlx5_priv *priv = dev->data->dev_private;
5182         struct mlx5_dev_config *dev_conf = &priv->config;
5183         const struct rte_flow_action_sample *sample = action->conf;
5184         const struct rte_flow_action *act;
5185         uint64_t sub_action_flags = 0;
5186         uint16_t queue_index = 0xFFFF;
5187         int actions_n = 0;
5188         int ret;
5189
5190         if (!sample)
5191                 return rte_flow_error_set(error, EINVAL,
5192                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5193                                           "configuration cannot be NULL");
5194         if (sample->ratio == 0)
5195                 return rte_flow_error_set(error, EINVAL,
5196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5197                                           "ratio value starts from 1");
5198         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5199                 return rte_flow_error_set(error, ENOTSUP,
5200                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5201                                           NULL,
5202                                           "sample action not supported");
5203         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5204                 return rte_flow_error_set(error, EINVAL,
5205                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5206                                           "Multiple sample actions not "
5207                                           "supported");
5208         if (*action_flags & MLX5_FLOW_ACTION_METER)
5209                 return rte_flow_error_set(error, EINVAL,
5210                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5211                                           "wrong action order, meter should "
5212                                           "be after sample action");
5213         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5214                 return rte_flow_error_set(error, EINVAL,
5215                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5216                                           "wrong action order, jump should "
5217                                           "be after sample action");
5218         act = sample->actions;
5219         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5220                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5221                         return rte_flow_error_set(error, ENOTSUP,
5222                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5223                                                   act, "too many actions");
5224                 switch (act->type) {
5225                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5226                         ret = mlx5_flow_validate_action_queue(act,
5227                                                               sub_action_flags,
5228                                                               dev,
5229                                                               attr, error);
5230                         if (ret < 0)
5231                                 return ret;
5232                         queue_index = ((const struct rte_flow_action_queue *)
5233                                                         (act->conf))->index;
5234                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5235                         ++actions_n;
5236                         break;
5237                 case RTE_FLOW_ACTION_TYPE_RSS:
5238                         *sample_rss = act->conf;
5239                         ret = mlx5_flow_validate_action_rss(act,
5240                                                             sub_action_flags,
5241                                                             dev, attr,
5242                                                             item_flags,
5243                                                             error);
5244                         if (ret < 0)
5245                                 return ret;
5246                         if (rss && *sample_rss &&
5247                             ((*sample_rss)->level != rss->level ||
5248                             (*sample_rss)->types != rss->types))
5249                                 return rte_flow_error_set(error, ENOTSUP,
5250                                         RTE_FLOW_ERROR_TYPE_ACTION,
5251                                         NULL,
5252                                         "Can't use the different RSS types "
5253                                         "or level in the same flow");
5254                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5255                                 queue_index = (*sample_rss)->queue[0];
5256                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5257                         ++actions_n;
5258                         break;
5259                 case RTE_FLOW_ACTION_TYPE_MARK:
5260                         ret = flow_dv_validate_action_mark(dev, act,
5261                                                            sub_action_flags,
5262                                                            attr, error);
5263                         if (ret < 0)
5264                                 return ret;
5265                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5266                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5267                                                 MLX5_FLOW_ACTION_MARK_EXT;
5268                         else
5269                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5270                         ++actions_n;
5271                         break;
5272                 case RTE_FLOW_ACTION_TYPE_COUNT:
5273                         ret = flow_dv_validate_action_count
5274                                 (dev, is_shared_action_count(act),
5275                                  *action_flags | sub_action_flags,
5276                                  error);
5277                         if (ret < 0)
5278                                 return ret;
5279                         *count = act->conf;
5280                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5281                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5282                         ++actions_n;
5283                         break;
5284                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5285                         ret = flow_dv_validate_action_port_id(dev,
5286                                                               sub_action_flags,
5287                                                               act,
5288                                                               attr,
5289                                                               error);
5290                         if (ret)
5291                                 return ret;
5292                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5293                         ++actions_n;
5294                         break;
5295                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5296                         ret = flow_dv_validate_action_raw_encap_decap
5297                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5298                                  &actions_n, action, item_flags, error);
5299                         if (ret < 0)
5300                                 return ret;
5301                         ++actions_n;
5302                         break;
5303                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5304                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5305                         ret = flow_dv_validate_action_l2_encap(dev,
5306                                                                sub_action_flags,
5307                                                                act, attr,
5308                                                                error);
5309                         if (ret < 0)
5310                                 return ret;
5311                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5312                         ++actions_n;
5313                         break;
5314                 default:
5315                         return rte_flow_error_set(error, ENOTSUP,
5316                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5317                                                   NULL,
5318                                                   "Doesn't support optional "
5319                                                   "action");
5320                 }
5321         }
5322         if (attr->ingress && !attr->transfer) {
5323                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5324                                           MLX5_FLOW_ACTION_RSS)))
5325                         return rte_flow_error_set(error, EINVAL,
5326                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5327                                                   NULL,
5328                                                   "Ingress must has a dest "
5329                                                   "QUEUE for Sample");
5330         } else if (attr->egress && !attr->transfer) {
5331                 return rte_flow_error_set(error, ENOTSUP,
5332                                           RTE_FLOW_ERROR_TYPE_ACTION,
5333                                           NULL,
5334                                           "Sample Only support Ingress "
5335                                           "or E-Switch");
5336         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5337                 MLX5_ASSERT(attr->transfer);
5338                 if (sample->ratio > 1)
5339                         return rte_flow_error_set(error, ENOTSUP,
5340                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5341                                                   NULL,
5342                                                   "E-Switch doesn't support "
5343                                                   "any optional action "
5344                                                   "for sampling");
5345                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5346                         return rte_flow_error_set(error, ENOTSUP,
5347                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5348                                                   NULL,
5349                                                   "unsupported action QUEUE");
5350                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5351                         return rte_flow_error_set(error, ENOTSUP,
5352                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5353                                                   NULL,
5354                                                   "unsupported action QUEUE");
5355                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5356                         return rte_flow_error_set(error, EINVAL,
5357                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5358                                                   NULL,
5359                                                   "E-Switch must has a dest "
5360                                                   "port for mirroring");
5361                 if (!priv->config.hca_attr.reg_c_preserve &&
5362                      priv->representor_id != -1)
5363                         *fdb_mirror_limit = 1;
5364         }
5365         /* Continue validation for Xcap actions.*/
5366         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5367             (queue_index == 0xFFFF ||
5368              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5369                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5370                      MLX5_FLOW_XCAP_ACTIONS)
5371                         return rte_flow_error_set(error, ENOTSUP,
5372                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5373                                                   NULL, "encap and decap "
5374                                                   "combination aren't "
5375                                                   "supported");
5376                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5377                                                         MLX5_FLOW_ACTION_ENCAP))
5378                         return rte_flow_error_set(error, ENOTSUP,
5379                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5380                                                   NULL, "encap is not supported"
5381                                                   " for ingress traffic");
5382         }
5383         return 0;
5384 }
5385
5386 /**
5387  * Find existing modify-header resource or create and register a new one.
5388  *
5389  * @param dev[in, out]
5390  *   Pointer to rte_eth_dev structure.
5391  * @param[in, out] resource
5392  *   Pointer to modify-header resource.
5393  * @parm[in, out] dev_flow
5394  *   Pointer to the dev_flow.
5395  * @param[out] error
5396  *   pointer to error structure.
5397  *
5398  * @return
5399  *   0 on success otherwise -errno and errno is set.
5400  */
5401 static int
5402 flow_dv_modify_hdr_resource_register
5403                         (struct rte_eth_dev *dev,
5404                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5405                          struct mlx5_flow *dev_flow,
5406                          struct rte_flow_error *error)
5407 {
5408         struct mlx5_priv *priv = dev->data->dev_private;
5409         struct mlx5_dev_ctx_shared *sh = priv->sh;
5410         uint32_t key_len = sizeof(*resource) -
5411                            offsetof(typeof(*resource), ft_type) +
5412                            resource->actions_num * sizeof(resource->actions[0]);
5413         struct mlx5_hlist_entry *entry;
5414         struct mlx5_flow_cb_ctx ctx = {
5415                 .error = error,
5416                 .data = resource,
5417         };
5418         uint64_t key64;
5419
5420         resource->flags = dev_flow->dv.group ? 0 :
5421                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5422         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5423                                     resource->flags))
5424                 return rte_flow_error_set(error, EOVERFLOW,
5425                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5426                                           "too many modify header items");
5427         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5428         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5429         if (!entry)
5430                 return -rte_errno;
5431         resource = container_of(entry, typeof(*resource), entry);
5432         dev_flow->handle->dvh.modify_hdr = resource;
5433         return 0;
5434 }
5435
5436 /**
5437  * Get DV flow counter by index.
5438  *
5439  * @param[in] dev
5440  *   Pointer to the Ethernet device structure.
5441  * @param[in] idx
5442  *   mlx5 flow counter index in the container.
5443  * @param[out] ppool
5444  *   mlx5 flow counter pool in the container.
5445  *
5446  * @return
5447  *   Pointer to the counter, NULL otherwise.
5448  */
5449 static struct mlx5_flow_counter *
5450 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5451                            uint32_t idx,
5452                            struct mlx5_flow_counter_pool **ppool)
5453 {
5454         struct mlx5_priv *priv = dev->data->dev_private;
5455         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5456         struct mlx5_flow_counter_pool *pool;
5457
5458         /* Decrease to original index and clear shared bit. */
5459         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5460         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5461         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5462         MLX5_ASSERT(pool);
5463         if (ppool)
5464                 *ppool = pool;
5465         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5466 }
5467
5468 /**
5469  * Check the devx counter belongs to the pool.
5470  *
5471  * @param[in] pool
5472  *   Pointer to the counter pool.
5473  * @param[in] id
5474  *   The counter devx ID.
5475  *
5476  * @return
5477  *   True if counter belongs to the pool, false otherwise.
5478  */
5479 static bool
5480 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5481 {
5482         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5483                    MLX5_COUNTERS_PER_POOL;
5484
5485         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5486                 return true;
5487         return false;
5488 }
5489
5490 /**
5491  * Get a pool by devx counter ID.
5492  *
5493  * @param[in] cmng
5494  *   Pointer to the counter management.
5495  * @param[in] id
5496  *   The counter devx ID.
5497  *
5498  * @return
5499  *   The counter pool pointer if exists, NULL otherwise,
5500  */
5501 static struct mlx5_flow_counter_pool *
5502 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5503 {
5504         uint32_t i;
5505         struct mlx5_flow_counter_pool *pool = NULL;
5506
5507         rte_spinlock_lock(&cmng->pool_update_sl);
5508         /* Check last used pool. */
5509         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5510             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5511                 pool = cmng->pools[cmng->last_pool_idx];
5512                 goto out;
5513         }
5514         /* ID out of range means no suitable pool in the container. */
5515         if (id > cmng->max_id || id < cmng->min_id)
5516                 goto out;
5517         /*
5518          * Find the pool from the end of the container, since mostly counter
5519          * ID is sequence increasing, and the last pool should be the needed
5520          * one.
5521          */
5522         i = cmng->n_valid;
5523         while (i--) {
5524                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5525
5526                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5527                         pool = pool_tmp;
5528                         break;
5529                 }
5530         }
5531 out:
5532         rte_spinlock_unlock(&cmng->pool_update_sl);
5533         return pool;
5534 }
5535
5536 /**
5537  * Resize a counter container.
5538  *
5539  * @param[in] dev
5540  *   Pointer to the Ethernet device structure.
5541  *
5542  * @return
5543  *   0 on success, otherwise negative errno value and rte_errno is set.
5544  */
5545 static int
5546 flow_dv_container_resize(struct rte_eth_dev *dev)
5547 {
5548         struct mlx5_priv *priv = dev->data->dev_private;
5549         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5550         void *old_pools = cmng->pools;
5551         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5552         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5553         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5554
5555         if (!pools) {
5556                 rte_errno = ENOMEM;
5557                 return -ENOMEM;
5558         }
5559         if (old_pools)
5560                 memcpy(pools, old_pools, cmng->n *
5561                                        sizeof(struct mlx5_flow_counter_pool *));
5562         cmng->n = resize;
5563         cmng->pools = pools;
5564         if (old_pools)
5565                 mlx5_free(old_pools);
5566         return 0;
5567 }
5568
5569 /**
5570  * Query a devx flow counter.
5571  *
5572  * @param[in] dev
5573  *   Pointer to the Ethernet device structure.
5574  * @param[in] counter
5575  *   Index to the flow counter.
5576  * @param[out] pkts
5577  *   The statistics value of packets.
5578  * @param[out] bytes
5579  *   The statistics value of bytes.
5580  *
5581  * @return
5582  *   0 on success, otherwise a negative errno value and rte_errno is set.
5583  */
5584 static inline int
5585 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5586                      uint64_t *bytes)
5587 {
5588         struct mlx5_priv *priv = dev->data->dev_private;
5589         struct mlx5_flow_counter_pool *pool = NULL;
5590         struct mlx5_flow_counter *cnt;
5591         int offset;
5592
5593         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5594         MLX5_ASSERT(pool);
5595         if (priv->sh->cmng.counter_fallback)
5596                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5597                                         0, pkts, bytes, 0, NULL, NULL, 0);
5598         rte_spinlock_lock(&pool->sl);
5599         if (!pool->raw) {
5600                 *pkts = 0;
5601                 *bytes = 0;
5602         } else {
5603                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5604                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5605                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5606         }
5607         rte_spinlock_unlock(&pool->sl);
5608         return 0;
5609 }
5610
5611 /**
5612  * Create and initialize a new counter pool.
5613  *
5614  * @param[in] dev
5615  *   Pointer to the Ethernet device structure.
5616  * @param[out] dcs
5617  *   The devX counter handle.
5618  * @param[in] age
5619  *   Whether the pool is for counter that was allocated for aging.
5620  * @param[in/out] cont_cur
5621  *   Pointer to the container pointer, it will be update in pool resize.
5622  *
5623  * @return
5624  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5625  */
5626 static struct mlx5_flow_counter_pool *
5627 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5628                     uint32_t age)
5629 {
5630         struct mlx5_priv *priv = dev->data->dev_private;
5631         struct mlx5_flow_counter_pool *pool;
5632         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5633         bool fallback = priv->sh->cmng.counter_fallback;
5634         uint32_t size = sizeof(*pool);
5635
5636         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5637         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5638         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5639         if (!pool) {
5640                 rte_errno = ENOMEM;
5641                 return NULL;
5642         }
5643         pool->raw = NULL;
5644         pool->is_aged = !!age;
5645         pool->query_gen = 0;
5646         pool->min_dcs = dcs;
5647         rte_spinlock_init(&pool->sl);
5648         rte_spinlock_init(&pool->csl);
5649         TAILQ_INIT(&pool->counters[0]);
5650         TAILQ_INIT(&pool->counters[1]);
5651         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5652         rte_spinlock_lock(&cmng->pool_update_sl);
5653         pool->index = cmng->n_valid;
5654         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5655                 mlx5_free(pool);
5656                 rte_spinlock_unlock(&cmng->pool_update_sl);
5657                 return NULL;
5658         }
5659         cmng->pools[pool->index] = pool;
5660         cmng->n_valid++;
5661         if (unlikely(fallback)) {
5662                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5663
5664                 if (base < cmng->min_id)
5665                         cmng->min_id = base;
5666                 if (base > cmng->max_id)
5667                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5668                 cmng->last_pool_idx = pool->index;
5669         }
5670         rte_spinlock_unlock(&cmng->pool_update_sl);
5671         return pool;
5672 }
5673
5674 /**
5675  * Prepare a new counter and/or a new counter pool.
5676  *
5677  * @param[in] dev
5678  *   Pointer to the Ethernet device structure.
5679  * @param[out] cnt_free
5680  *   Where to put the pointer of a new counter.
5681  * @param[in] age
5682  *   Whether the pool is for counter that was allocated for aging.
5683  *
5684  * @return
5685  *   The counter pool pointer and @p cnt_free is set on success,
5686  *   NULL otherwise and rte_errno is set.
5687  */
5688 static struct mlx5_flow_counter_pool *
5689 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5690                              struct mlx5_flow_counter **cnt_free,
5691                              uint32_t age)
5692 {
5693         struct mlx5_priv *priv = dev->data->dev_private;
5694         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5695         struct mlx5_flow_counter_pool *pool;
5696         struct mlx5_counters tmp_tq;
5697         struct mlx5_devx_obj *dcs = NULL;
5698         struct mlx5_flow_counter *cnt;
5699         enum mlx5_counter_type cnt_type =
5700                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5701         bool fallback = priv->sh->cmng.counter_fallback;
5702         uint32_t i;
5703
5704         if (fallback) {
5705                 /* bulk_bitmap must be 0 for single counter allocation. */
5706                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5707                 if (!dcs)
5708                         return NULL;
5709                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5710                 if (!pool) {
5711                         pool = flow_dv_pool_create(dev, dcs, age);
5712                         if (!pool) {
5713                                 mlx5_devx_cmd_destroy(dcs);
5714                                 return NULL;
5715                         }
5716                 }
5717                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5718                 cnt = MLX5_POOL_GET_CNT(pool, i);
5719                 cnt->pool = pool;
5720                 cnt->dcs_when_free = dcs;
5721                 *cnt_free = cnt;
5722                 return pool;
5723         }
5724         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5725         if (!dcs) {
5726                 rte_errno = ENODATA;
5727                 return NULL;
5728         }
5729         pool = flow_dv_pool_create(dev, dcs, age);
5730         if (!pool) {
5731                 mlx5_devx_cmd_destroy(dcs);
5732                 return NULL;
5733         }
5734         TAILQ_INIT(&tmp_tq);
5735         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5736                 cnt = MLX5_POOL_GET_CNT(pool, i);
5737                 cnt->pool = pool;
5738                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5739         }
5740         rte_spinlock_lock(&cmng->csl[cnt_type]);
5741         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5742         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5743         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5744         (*cnt_free)->pool = pool;
5745         return pool;
5746 }
5747
5748 /**
5749  * Allocate a flow counter.
5750  *
5751  * @param[in] dev
5752  *   Pointer to the Ethernet device structure.
5753  * @param[in] age
5754  *   Whether the counter was allocated for aging.
5755  *
5756  * @return
5757  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5758  */
5759 static uint32_t
5760 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5761 {
5762         struct mlx5_priv *priv = dev->data->dev_private;
5763         struct mlx5_flow_counter_pool *pool = NULL;
5764         struct mlx5_flow_counter *cnt_free = NULL;
5765         bool fallback = priv->sh->cmng.counter_fallback;
5766         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5767         enum mlx5_counter_type cnt_type =
5768                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5769         uint32_t cnt_idx;
5770
5771         if (!priv->config.devx) {
5772                 rte_errno = ENOTSUP;
5773                 return 0;
5774         }
5775         /* Get free counters from container. */
5776         rte_spinlock_lock(&cmng->csl[cnt_type]);
5777         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5778         if (cnt_free)
5779                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5780         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5781         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5782                 goto err;
5783         pool = cnt_free->pool;
5784         if (fallback)
5785                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5786         /* Create a DV counter action only in the first time usage. */
5787         if (!cnt_free->action) {
5788                 uint16_t offset;
5789                 struct mlx5_devx_obj *dcs;
5790                 int ret;
5791
5792                 if (!fallback) {
5793                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5794                         dcs = pool->min_dcs;
5795                 } else {
5796                         offset = 0;
5797                         dcs = cnt_free->dcs_when_free;
5798                 }
5799                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5800                                                             &cnt_free->action);
5801                 if (ret) {
5802                         rte_errno = errno;
5803                         goto err;
5804                 }
5805         }
5806         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5807                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5808         /* Update the counter reset values. */
5809         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5810                                  &cnt_free->bytes))
5811                 goto err;
5812         if (!fallback && !priv->sh->cmng.query_thread_on)
5813                 /* Start the asynchronous batch query by the host thread. */
5814                 mlx5_set_query_alarm(priv->sh);
5815         /*
5816          * When the count action isn't shared (by ID), shared_info field is
5817          * used for indirect action API's refcnt.
5818          * When the counter action is not shared neither by ID nor by indirect
5819          * action API, shared info must be 1.
5820          */
5821         cnt_free->shared_info.refcnt = 1;
5822         return cnt_idx;
5823 err:
5824         if (cnt_free) {
5825                 cnt_free->pool = pool;
5826                 if (fallback)
5827                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5828                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5829                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5830                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5831         }
5832         return 0;
5833 }
5834
5835 /**
5836  * Allocate a shared flow counter.
5837  *
5838  * @param[in] ctx
5839  *   Pointer to the shared counter configuration.
5840  * @param[in] data
5841  *   Pointer to save the allocated counter index.
5842  *
5843  * @return
5844  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5845  */
5846
5847 static int32_t
5848 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5849 {
5850         struct mlx5_shared_counter_conf *conf = ctx;
5851         struct rte_eth_dev *dev = conf->dev;
5852         struct mlx5_flow_counter *cnt;
5853
5854         data->dword = flow_dv_counter_alloc(dev, 0);
5855         data->dword |= MLX5_CNT_SHARED_OFFSET;
5856         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5857         cnt->shared_info.id = conf->id;
5858         return 0;
5859 }
5860
5861 /**
5862  * Get a shared flow counter.
5863  *
5864  * @param[in] dev
5865  *   Pointer to the Ethernet device structure.
5866  * @param[in] id
5867  *   Counter identifier.
5868  *
5869  * @return
5870  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5871  */
5872 static uint32_t
5873 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5874 {
5875         struct mlx5_priv *priv = dev->data->dev_private;
5876         struct mlx5_shared_counter_conf conf = {
5877                 .dev = dev,
5878                 .id = id,
5879         };
5880         union mlx5_l3t_data data = {
5881                 .dword = 0,
5882         };
5883
5884         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5885                                flow_dv_counter_alloc_shared_cb, &conf);
5886         return data.dword;
5887 }
5888
5889 /**
5890  * Get age param from counter index.
5891  *
5892  * @param[in] dev
5893  *   Pointer to the Ethernet device structure.
5894  * @param[in] counter
5895  *   Index to the counter handler.
5896  *
5897  * @return
5898  *   The aging parameter specified for the counter index.
5899  */
5900 static struct mlx5_age_param*
5901 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5902                                 uint32_t counter)
5903 {
5904         struct mlx5_flow_counter *cnt;
5905         struct mlx5_flow_counter_pool *pool = NULL;
5906
5907         flow_dv_counter_get_by_idx(dev, counter, &pool);
5908         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5909         cnt = MLX5_POOL_GET_CNT(pool, counter);
5910         return MLX5_CNT_TO_AGE(cnt);
5911 }
5912
5913 /**
5914  * Remove a flow counter from aged counter list.
5915  *
5916  * @param[in] dev
5917  *   Pointer to the Ethernet device structure.
5918  * @param[in] counter
5919  *   Index to the counter handler.
5920  * @param[in] cnt
5921  *   Pointer to the counter handler.
5922  */
5923 static void
5924 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5925                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5926 {
5927         struct mlx5_age_info *age_info;
5928         struct mlx5_age_param *age_param;
5929         struct mlx5_priv *priv = dev->data->dev_private;
5930         uint16_t expected = AGE_CANDIDATE;
5931
5932         age_info = GET_PORT_AGE_INFO(priv);
5933         age_param = flow_dv_counter_idx_get_age(dev, counter);
5934         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5935                                          AGE_FREE, false, __ATOMIC_RELAXED,
5936                                          __ATOMIC_RELAXED)) {
5937                 /**
5938                  * We need the lock even it is age timeout,
5939                  * since counter may still in process.
5940                  */
5941                 rte_spinlock_lock(&age_info->aged_sl);
5942                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5943                 rte_spinlock_unlock(&age_info->aged_sl);
5944                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5945         }
5946 }
5947
5948 /**
5949  * Release a flow counter.
5950  *
5951  * @param[in] dev
5952  *   Pointer to the Ethernet device structure.
5953  * @param[in] counter
5954  *   Index to the counter handler.
5955  */
5956 static void
5957 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5958 {
5959         struct mlx5_priv *priv = dev->data->dev_private;
5960         struct mlx5_flow_counter_pool *pool = NULL;
5961         struct mlx5_flow_counter *cnt;
5962         enum mlx5_counter_type cnt_type;
5963
5964         if (!counter)
5965                 return;
5966         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5967         MLX5_ASSERT(pool);
5968         /*
5969          * If the counter action is shared by ID, the l3t_clear_entry function
5970          * reduces its references counter. If after the reduction the action is
5971          * still referenced, the function returns here and does not release it.
5972          */
5973         if (IS_LEGACY_SHARED_CNT(counter) &&
5974             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5975                 return;
5976         /*
5977          * If the counter action is shared by indirect action API, the atomic
5978          * function reduces its references counter. If after the reduction the
5979          * action is still referenced, the function returns here and does not
5980          * release it.
5981          * When the counter action is not shared neither by ID nor by indirect
5982          * action API, shared info is 1 before the reduction, so this condition
5983          * is failed and function doesn't return here.
5984          */
5985         if (!IS_LEGACY_SHARED_CNT(counter) &&
5986             __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED))
5987                 return;
5988         if (pool->is_aged)
5989                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5990         cnt->pool = pool;
5991         /*
5992          * Put the counter back to list to be updated in none fallback mode.
5993          * Currently, we are using two list alternately, while one is in query,
5994          * add the freed counter to the other list based on the pool query_gen
5995          * value. After query finishes, add counter the list to the global
5996          * container counter list. The list changes while query starts. In
5997          * this case, lock will not be needed as query callback and release
5998          * function both operate with the different list.
5999          */
6000         if (!priv->sh->cmng.counter_fallback) {
6001                 rte_spinlock_lock(&pool->csl);
6002                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6003                 rte_spinlock_unlock(&pool->csl);
6004         } else {
6005                 cnt->dcs_when_free = cnt->dcs_when_active;
6006                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6007                                            MLX5_COUNTER_TYPE_ORIGIN;
6008                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6009                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6010                                   cnt, next);
6011                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6012         }
6013 }
6014
6015 /**
6016  * Resize a meter id container.
6017  *
6018  * @param[in] dev
6019  *   Pointer to the Ethernet device structure.
6020  *
6021  * @return
6022  *   0 on success, otherwise negative errno value and rte_errno is set.
6023  */
6024 static int
6025 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6026 {
6027         struct mlx5_priv *priv = dev->data->dev_private;
6028         struct mlx5_aso_mtr_pools_mng *pools_mng =
6029                                 &priv->sh->mtrmng->pools_mng;
6030         void *old_pools = pools_mng->pools;
6031         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6032         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6033         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6034
6035         if (!pools) {
6036                 rte_errno = ENOMEM;
6037                 return -ENOMEM;
6038         }
6039         if (!pools_mng->n)
6040                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6041                         mlx5_free(pools);
6042                         return -ENOMEM;
6043                 }
6044         if (old_pools)
6045                 memcpy(pools, old_pools, pools_mng->n *
6046                                        sizeof(struct mlx5_aso_mtr_pool *));
6047         pools_mng->n = resize;
6048         pools_mng->pools = pools;
6049         if (old_pools)
6050                 mlx5_free(old_pools);
6051         return 0;
6052 }
6053
6054 /**
6055  * Prepare a new meter and/or a new meter pool.
6056  *
6057  * @param[in] dev
6058  *   Pointer to the Ethernet device structure.
6059  * @param[out] mtr_free
6060  *   Where to put the pointer of a new meter.g.
6061  *
6062  * @return
6063  *   The meter pool pointer and @mtr_free is set on success,
6064  *   NULL otherwise and rte_errno is set.
6065  */
6066 static struct mlx5_aso_mtr_pool *
6067 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6068                              struct mlx5_aso_mtr **mtr_free)
6069 {
6070         struct mlx5_priv *priv = dev->data->dev_private;
6071         struct mlx5_aso_mtr_pools_mng *pools_mng =
6072                                 &priv->sh->mtrmng->pools_mng;
6073         struct mlx5_aso_mtr_pool *pool = NULL;
6074         struct mlx5_devx_obj *dcs = NULL;
6075         uint32_t i;
6076         uint32_t log_obj_size;
6077
6078         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6079         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6080                         priv->sh->pdn, log_obj_size);
6081         if (!dcs) {
6082                 rte_errno = ENODATA;
6083                 return NULL;
6084         }
6085         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6086         if (!pool) {
6087                 rte_errno = ENOMEM;
6088                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6089                 return NULL;
6090         }
6091         pool->devx_obj = dcs;
6092         pool->index = pools_mng->n_valid;
6093         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6094                 mlx5_free(pool);
6095                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6096                 return NULL;
6097         }
6098         pools_mng->pools[pool->index] = pool;
6099         pools_mng->n_valid++;
6100         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6101                 pool->mtrs[i].offset = i;
6102                 LIST_INSERT_HEAD(&pools_mng->meters,
6103                                                 &pool->mtrs[i], next);
6104         }
6105         pool->mtrs[0].offset = 0;
6106         *mtr_free = &pool->mtrs[0];
6107         return pool;
6108 }
6109
6110 /**
6111  * Release a flow meter into pool.
6112  *
6113  * @param[in] dev
6114  *   Pointer to the Ethernet device structure.
6115  * @param[in] mtr_idx
6116  *   Index to aso flow meter.
6117  */
6118 static void
6119 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6120 {
6121         struct mlx5_priv *priv = dev->data->dev_private;
6122         struct mlx5_aso_mtr_pools_mng *pools_mng =
6123                                 &priv->sh->mtrmng->pools_mng;
6124         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6125
6126         MLX5_ASSERT(aso_mtr);
6127         rte_spinlock_lock(&pools_mng->mtrsl);
6128         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6129         aso_mtr->state = ASO_METER_FREE;
6130         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6131         rte_spinlock_unlock(&pools_mng->mtrsl);
6132 }
6133
6134 /**
6135  * Allocate a aso flow meter.
6136  *
6137  * @param[in] dev
6138  *   Pointer to the Ethernet device structure.
6139  *
6140  * @return
6141  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6142  */
6143 static uint32_t
6144 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6145 {
6146         struct mlx5_priv *priv = dev->data->dev_private;
6147         struct mlx5_aso_mtr *mtr_free = NULL;
6148         struct mlx5_aso_mtr_pools_mng *pools_mng =
6149                                 &priv->sh->mtrmng->pools_mng;
6150         struct mlx5_aso_mtr_pool *pool;
6151         uint32_t mtr_idx = 0;
6152
6153         if (!priv->config.devx) {
6154                 rte_errno = ENOTSUP;
6155                 return 0;
6156         }
6157         /* Allocate the flow meter memory. */
6158         /* Get free meters from management. */
6159         rte_spinlock_lock(&pools_mng->mtrsl);
6160         mtr_free = LIST_FIRST(&pools_mng->meters);
6161         if (mtr_free)
6162                 LIST_REMOVE(mtr_free, next);
6163         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6164                 rte_spinlock_unlock(&pools_mng->mtrsl);
6165                 return 0;
6166         }
6167         mtr_free->state = ASO_METER_WAIT;
6168         rte_spinlock_unlock(&pools_mng->mtrsl);
6169         pool = container_of(mtr_free,
6170                         struct mlx5_aso_mtr_pool,
6171                         mtrs[mtr_free->offset]);
6172         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6173         if (!mtr_free->fm.meter_action) {
6174 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6175                 struct rte_flow_error error;
6176                 uint8_t reg_id;
6177
6178                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6179                 mtr_free->fm.meter_action =
6180                         mlx5_glue->dv_create_flow_action_aso
6181                                                 (priv->sh->rx_domain,
6182                                                  pool->devx_obj->obj,
6183                                                  mtr_free->offset,
6184                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6185                                                  reg_id - REG_C_0);
6186 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6187                 if (!mtr_free->fm.meter_action) {
6188                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6189                         return 0;
6190                 }
6191         }
6192         return mtr_idx;
6193 }
6194
6195 /**
6196  * Verify the @p attributes will be correctly understood by the NIC and store
6197  * them in the @p flow if everything is correct.
6198  *
6199  * @param[in] dev
6200  *   Pointer to dev struct.
6201  * @param[in] attributes
6202  *   Pointer to flow attributes
6203  * @param[in] external
6204  *   This flow rule is created by request external to PMD.
6205  * @param[out] error
6206  *   Pointer to error structure.
6207  *
6208  * @return
6209  *   - 0 on success and non root table.
6210  *   - 1 on success and root table.
6211  *   - a negative errno value otherwise and rte_errno is set.
6212  */
6213 static int
6214 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6215                             const struct mlx5_flow_tunnel *tunnel,
6216                             const struct rte_flow_attr *attributes,
6217                             const struct flow_grp_info *grp_info,
6218                             struct rte_flow_error *error)
6219 {
6220         struct mlx5_priv *priv = dev->data->dev_private;
6221         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6222         int ret = 0;
6223
6224 #ifndef HAVE_MLX5DV_DR
6225         RTE_SET_USED(tunnel);
6226         RTE_SET_USED(grp_info);
6227         if (attributes->group)
6228                 return rte_flow_error_set(error, ENOTSUP,
6229                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6230                                           NULL,
6231                                           "groups are not supported");
6232 #else
6233         uint32_t table = 0;
6234
6235         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6236                                        grp_info, error);
6237         if (ret)
6238                 return ret;
6239         if (!table)
6240                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6241 #endif
6242         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6243             attributes->priority > lowest_priority)
6244                 return rte_flow_error_set(error, ENOTSUP,
6245                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6246                                           NULL,
6247                                           "priority out of range");
6248         if (attributes->transfer) {
6249                 if (!priv->config.dv_esw_en)
6250                         return rte_flow_error_set
6251                                 (error, ENOTSUP,
6252                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6253                                  "E-Switch dr is not supported");
6254                 if (!(priv->representor || priv->master))
6255                         return rte_flow_error_set
6256                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6257                                  NULL, "E-Switch configuration can only be"
6258                                  " done by a master or a representor device");
6259                 if (attributes->egress)
6260                         return rte_flow_error_set
6261                                 (error, ENOTSUP,
6262                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6263                                  "egress is not supported");
6264         }
6265         if (!(attributes->egress ^ attributes->ingress))
6266                 return rte_flow_error_set(error, ENOTSUP,
6267                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6268                                           "must specify exactly one of "
6269                                           "ingress or egress");
6270         return ret;
6271 }
6272
6273 /**
6274  * Internal validation function. For validating both actions and items.
6275  *
6276  * @param[in] dev
6277  *   Pointer to the rte_eth_dev structure.
6278  * @param[in] attr
6279  *   Pointer to the flow attributes.
6280  * @param[in] items
6281  *   Pointer to the list of items.
6282  * @param[in] actions
6283  *   Pointer to the list of actions.
6284  * @param[in] external
6285  *   This flow rule is created by request external to PMD.
6286  * @param[in] hairpin
6287  *   Number of hairpin TX actions, 0 means classic flow.
6288  * @param[out] error
6289  *   Pointer to the error structure.
6290  *
6291  * @return
6292  *   0 on success, a negative errno value otherwise and rte_errno is set.
6293  */
6294 static int
6295 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6296                  const struct rte_flow_item items[],
6297                  const struct rte_flow_action actions[],
6298                  bool external, int hairpin, struct rte_flow_error *error)
6299 {
6300         int ret;
6301         uint64_t action_flags = 0;
6302         uint64_t item_flags = 0;
6303         uint64_t last_item = 0;
6304         uint8_t next_protocol = 0xff;
6305         uint16_t ether_type = 0;
6306         int actions_n = 0;
6307         uint8_t item_ipv6_proto = 0;
6308         int fdb_mirror_limit = 0;
6309         int modify_after_mirror = 0;
6310         const struct rte_flow_item *geneve_item = NULL;
6311         const struct rte_flow_item *gre_item = NULL;
6312         const struct rte_flow_item *gtp_item = NULL;
6313         const struct rte_flow_action_raw_decap *decap;
6314         const struct rte_flow_action_raw_encap *encap;
6315         const struct rte_flow_action_rss *rss = NULL;
6316         const struct rte_flow_action_rss *sample_rss = NULL;
6317         const struct rte_flow_action_count *sample_count = NULL;
6318         const struct rte_flow_item_tcp nic_tcp_mask = {
6319                 .hdr = {
6320                         .tcp_flags = 0xFF,
6321                         .src_port = RTE_BE16(UINT16_MAX),
6322                         .dst_port = RTE_BE16(UINT16_MAX),
6323                 }
6324         };
6325         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6326                 .hdr = {
6327                         .src_addr =
6328                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6329                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6330                         .dst_addr =
6331                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6332                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6333                         .vtc_flow = RTE_BE32(0xffffffff),
6334                         .proto = 0xff,
6335                         .hop_limits = 0xff,
6336                 },
6337                 .has_frag_ext = 1,
6338         };
6339         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6340                 .hdr = {
6341                         .common = {
6342                                 .u32 =
6343                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6344                                         .type = 0xFF,
6345                                         }).u32),
6346                         },
6347                         .dummy[0] = 0xffffffff,
6348                 },
6349         };
6350         struct mlx5_priv *priv = dev->data->dev_private;
6351         struct mlx5_dev_config *dev_conf = &priv->config;
6352         uint16_t queue_index = 0xFFFF;
6353         const struct rte_flow_item_vlan *vlan_m = NULL;
6354         uint32_t rw_act_num = 0;
6355         uint64_t is_root;
6356         const struct mlx5_flow_tunnel *tunnel;
6357         struct flow_grp_info grp_info = {
6358                 .external = !!external,
6359                 .transfer = !!attr->transfer,
6360                 .fdb_def_rule = !!priv->fdb_def_rule,
6361         };
6362         const struct rte_eth_hairpin_conf *conf;
6363         bool def_policy = false;
6364
6365         if (items == NULL)
6366                 return -1;
6367         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
6368                 tunnel = flow_items_to_tunnel(items);
6369                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6370                                 MLX5_FLOW_ACTION_DECAP;
6371         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
6372                 tunnel = flow_actions_to_tunnel(actions);
6373                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6374         } else {
6375                 tunnel = NULL;
6376         }
6377         if (tunnel && priv->representor)
6378                 return rte_flow_error_set(error, ENOTSUP,
6379                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6380                                           "decap not supported "
6381                                           "for VF representor");
6382         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6383                                 (dev, tunnel, attr, items, actions);
6384         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6385         if (ret < 0)
6386                 return ret;
6387         is_root = (uint64_t)ret;
6388         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6389                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6390                 int type = items->type;
6391
6392                 if (!mlx5_flow_os_item_supported(type))
6393                         return rte_flow_error_set(error, ENOTSUP,
6394                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6395                                                   NULL, "item not supported");
6396                 switch (type) {
6397                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6398                         if (items[0].type != (typeof(items[0].type))
6399                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
6400                                 return rte_flow_error_set
6401                                                 (error, EINVAL,
6402                                                 RTE_FLOW_ERROR_TYPE_ITEM,
6403                                                 NULL, "MLX5 private items "
6404                                                 "must be the first");
6405                         break;
6406                 case RTE_FLOW_ITEM_TYPE_VOID:
6407                         break;
6408                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6409                         ret = flow_dv_validate_item_port_id
6410                                         (dev, items, attr, item_flags, error);
6411                         if (ret < 0)
6412                                 return ret;
6413                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6414                         break;
6415                 case RTE_FLOW_ITEM_TYPE_ETH:
6416                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6417                                                           true, error);
6418                         if (ret < 0)
6419                                 return ret;
6420                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6421                                              MLX5_FLOW_LAYER_OUTER_L2;
6422                         if (items->mask != NULL && items->spec != NULL) {
6423                                 ether_type =
6424                                         ((const struct rte_flow_item_eth *)
6425                                          items->spec)->type;
6426                                 ether_type &=
6427                                         ((const struct rte_flow_item_eth *)
6428                                          items->mask)->type;
6429                                 ether_type = rte_be_to_cpu_16(ether_type);
6430                         } else {
6431                                 ether_type = 0;
6432                         }
6433                         break;
6434                 case RTE_FLOW_ITEM_TYPE_VLAN:
6435                         ret = flow_dv_validate_item_vlan(items, item_flags,
6436                                                          dev, error);
6437                         if (ret < 0)
6438                                 return ret;
6439                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6440                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6441                         if (items->mask != NULL && items->spec != NULL) {
6442                                 ether_type =
6443                                         ((const struct rte_flow_item_vlan *)
6444                                          items->spec)->inner_type;
6445                                 ether_type &=
6446                                         ((const struct rte_flow_item_vlan *)
6447                                          items->mask)->inner_type;
6448                                 ether_type = rte_be_to_cpu_16(ether_type);
6449                         } else {
6450                                 ether_type = 0;
6451                         }
6452                         /* Store outer VLAN mask for of_push_vlan action. */
6453                         if (!tunnel)
6454                                 vlan_m = items->mask;
6455                         break;
6456                 case RTE_FLOW_ITEM_TYPE_IPV4:
6457                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6458                                                   &item_flags, &tunnel);
6459                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6460                                                          last_item, ether_type,
6461                                                          error);
6462                         if (ret < 0)
6463                                 return ret;
6464                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6465                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6466                         if (items->mask != NULL &&
6467                             ((const struct rte_flow_item_ipv4 *)
6468                              items->mask)->hdr.next_proto_id) {
6469                                 next_protocol =
6470                                         ((const struct rte_flow_item_ipv4 *)
6471                                          (items->spec))->hdr.next_proto_id;
6472                                 next_protocol &=
6473                                         ((const struct rte_flow_item_ipv4 *)
6474                                          (items->mask))->hdr.next_proto_id;
6475                         } else {
6476                                 /* Reset for inner layer. */
6477                                 next_protocol = 0xff;
6478                         }
6479                         break;
6480                 case RTE_FLOW_ITEM_TYPE_IPV6:
6481                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6482                                                   &item_flags, &tunnel);
6483                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6484                                                            last_item,
6485                                                            ether_type,
6486                                                            &nic_ipv6_mask,
6487                                                            error);
6488                         if (ret < 0)
6489                                 return ret;
6490                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6491                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6492                         if (items->mask != NULL &&
6493                             ((const struct rte_flow_item_ipv6 *)
6494                              items->mask)->hdr.proto) {
6495                                 item_ipv6_proto =
6496                                         ((const struct rte_flow_item_ipv6 *)
6497                                          items->spec)->hdr.proto;
6498                                 next_protocol =
6499                                         ((const struct rte_flow_item_ipv6 *)
6500                                          items->spec)->hdr.proto;
6501                                 next_protocol &=
6502                                         ((const struct rte_flow_item_ipv6 *)
6503                                          items->mask)->hdr.proto;
6504                         } else {
6505                                 /* Reset for inner layer. */
6506                                 next_protocol = 0xff;
6507                         }
6508                         break;
6509                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6510                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6511                                                                   item_flags,
6512                                                                   error);
6513                         if (ret < 0)
6514                                 return ret;
6515                         last_item = tunnel ?
6516                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6517                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6518                         if (items->mask != NULL &&
6519                             ((const struct rte_flow_item_ipv6_frag_ext *)
6520                              items->mask)->hdr.next_header) {
6521                                 next_protocol =
6522                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6523                                  items->spec)->hdr.next_header;
6524                                 next_protocol &=
6525                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6526                                  items->mask)->hdr.next_header;
6527                         } else {
6528                                 /* Reset for inner layer. */
6529                                 next_protocol = 0xff;
6530                         }
6531                         break;
6532                 case RTE_FLOW_ITEM_TYPE_TCP:
6533                         ret = mlx5_flow_validate_item_tcp
6534                                                 (items, item_flags,
6535                                                  next_protocol,
6536                                                  &nic_tcp_mask,
6537                                                  error);
6538                         if (ret < 0)
6539                                 return ret;
6540                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6541                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6542                         break;
6543                 case RTE_FLOW_ITEM_TYPE_UDP:
6544                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6545                                                           next_protocol,
6546                                                           error);
6547                         if (ret < 0)
6548                                 return ret;
6549                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6550                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6551                         break;
6552                 case RTE_FLOW_ITEM_TYPE_GRE:
6553                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6554                                                           next_protocol, error);
6555                         if (ret < 0)
6556                                 return ret;
6557                         gre_item = items;
6558                         last_item = MLX5_FLOW_LAYER_GRE;
6559                         break;
6560                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6561                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6562                                                             next_protocol,
6563                                                             error);
6564                         if (ret < 0)
6565                                 return ret;
6566                         last_item = MLX5_FLOW_LAYER_NVGRE;
6567                         break;
6568                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6569                         ret = mlx5_flow_validate_item_gre_key
6570                                 (items, item_flags, gre_item, error);
6571                         if (ret < 0)
6572                                 return ret;
6573                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6574                         break;
6575                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6576                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6577                                                             error);
6578                         if (ret < 0)
6579                                 return ret;
6580                         last_item = MLX5_FLOW_LAYER_VXLAN;
6581                         break;
6582                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6583                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6584                                                                 item_flags, dev,
6585                                                                 error);
6586                         if (ret < 0)
6587                                 return ret;
6588                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6589                         break;
6590                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6591                         ret = mlx5_flow_validate_item_geneve(items,
6592                                                              item_flags, dev,
6593                                                              error);
6594                         if (ret < 0)
6595                                 return ret;
6596                         geneve_item = items;
6597                         last_item = MLX5_FLOW_LAYER_GENEVE;
6598                         break;
6599                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6600                         ret = mlx5_flow_validate_item_geneve_opt(items,
6601                                                                  last_item,
6602                                                                  geneve_item,
6603                                                                  dev,
6604                                                                  error);
6605                         if (ret < 0)
6606                                 return ret;
6607                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6608                         break;
6609                 case RTE_FLOW_ITEM_TYPE_MPLS:
6610                         ret = mlx5_flow_validate_item_mpls(dev, items,
6611                                                            item_flags,
6612                                                            last_item, error);
6613                         if (ret < 0)
6614                                 return ret;
6615                         last_item = MLX5_FLOW_LAYER_MPLS;
6616                         break;
6617
6618                 case RTE_FLOW_ITEM_TYPE_MARK:
6619                         ret = flow_dv_validate_item_mark(dev, items, attr,
6620                                                          error);
6621                         if (ret < 0)
6622                                 return ret;
6623                         last_item = MLX5_FLOW_ITEM_MARK;
6624                         break;
6625                 case RTE_FLOW_ITEM_TYPE_META:
6626                         ret = flow_dv_validate_item_meta(dev, items, attr,
6627                                                          error);
6628                         if (ret < 0)
6629                                 return ret;
6630                         last_item = MLX5_FLOW_ITEM_METADATA;
6631                         break;
6632                 case RTE_FLOW_ITEM_TYPE_ICMP:
6633                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6634                                                            next_protocol,
6635                                                            error);
6636                         if (ret < 0)
6637                                 return ret;
6638                         last_item = MLX5_FLOW_LAYER_ICMP;
6639                         break;
6640                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6641                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6642                                                             next_protocol,
6643                                                             error);
6644                         if (ret < 0)
6645                                 return ret;
6646                         item_ipv6_proto = IPPROTO_ICMPV6;
6647                         last_item = MLX5_FLOW_LAYER_ICMP6;
6648                         break;
6649                 case RTE_FLOW_ITEM_TYPE_TAG:
6650                         ret = flow_dv_validate_item_tag(dev, items,
6651                                                         attr, error);
6652                         if (ret < 0)
6653                                 return ret;
6654                         last_item = MLX5_FLOW_ITEM_TAG;
6655                         break;
6656                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6657                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6658                         break;
6659                 case RTE_FLOW_ITEM_TYPE_GTP:
6660                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6661                                                         error);
6662                         if (ret < 0)
6663                                 return ret;
6664                         gtp_item = items;
6665                         last_item = MLX5_FLOW_LAYER_GTP;
6666                         break;
6667                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6668                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6669                                                             gtp_item, attr,
6670                                                             error);
6671                         if (ret < 0)
6672                                 return ret;
6673                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6674                         break;
6675                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6676                         /* Capacity will be checked in the translate stage. */
6677                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6678                                                             last_item,
6679                                                             ether_type,
6680                                                             &nic_ecpri_mask,
6681                                                             error);
6682                         if (ret < 0)
6683                                 return ret;
6684                         last_item = MLX5_FLOW_LAYER_ECPRI;
6685                         break;
6686                 default:
6687                         return rte_flow_error_set(error, ENOTSUP,
6688                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6689                                                   NULL, "item not supported");
6690                 }
6691                 item_flags |= last_item;
6692         }
6693         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6694                 int type = actions->type;
6695                 bool shared_count = false;
6696
6697                 if (!mlx5_flow_os_action_supported(type))
6698                         return rte_flow_error_set(error, ENOTSUP,
6699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6700                                                   actions,
6701                                                   "action not supported");
6702                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
6703                         return rte_flow_error_set(error, ENOTSUP,
6704                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6705                                                   actions, "too many actions");
6706                 if (action_flags &
6707                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
6708                         return rte_flow_error_set(error, ENOTSUP,
6709                                 RTE_FLOW_ERROR_TYPE_ACTION,
6710                                 NULL, "meter action with policy "
6711                                 "must be the last action");
6712                 switch (type) {
6713                 case RTE_FLOW_ACTION_TYPE_VOID:
6714                         break;
6715                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
6716                         ret = flow_dv_validate_action_port_id(dev,
6717                                                               action_flags,
6718                                                               actions,
6719                                                               attr,
6720                                                               error);
6721                         if (ret)
6722                                 return ret;
6723                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6724                         ++actions_n;
6725                         break;
6726                 case RTE_FLOW_ACTION_TYPE_FLAG:
6727                         ret = flow_dv_validate_action_flag(dev, action_flags,
6728                                                            attr, error);
6729                         if (ret < 0)
6730                                 return ret;
6731                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6732                                 /* Count all modify-header actions as one. */
6733                                 if (!(action_flags &
6734                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6735                                         ++actions_n;
6736                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
6737                                                 MLX5_FLOW_ACTION_MARK_EXT;
6738                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6739                                         modify_after_mirror = 1;
6740
6741                         } else {
6742                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
6743                                 ++actions_n;
6744                         }
6745                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6746                         break;
6747                 case RTE_FLOW_ACTION_TYPE_MARK:
6748                         ret = flow_dv_validate_action_mark(dev, actions,
6749                                                            action_flags,
6750                                                            attr, error);
6751                         if (ret < 0)
6752                                 return ret;
6753                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6754                                 /* Count all modify-header actions as one. */
6755                                 if (!(action_flags &
6756                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6757                                         ++actions_n;
6758                                 action_flags |= MLX5_FLOW_ACTION_MARK |
6759                                                 MLX5_FLOW_ACTION_MARK_EXT;
6760                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6761                                         modify_after_mirror = 1;
6762                         } else {
6763                                 action_flags |= MLX5_FLOW_ACTION_MARK;
6764                                 ++actions_n;
6765                         }
6766                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6767                         break;
6768                 case RTE_FLOW_ACTION_TYPE_SET_META:
6769                         ret = flow_dv_validate_action_set_meta(dev, actions,
6770                                                                action_flags,
6771                                                                attr, error);
6772                         if (ret < 0)
6773                                 return ret;
6774                         /* Count all modify-header actions as one action. */
6775                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6776                                 ++actions_n;
6777                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6778                                 modify_after_mirror = 1;
6779                         action_flags |= MLX5_FLOW_ACTION_SET_META;
6780                         rw_act_num += MLX5_ACT_NUM_SET_META;
6781                         break;
6782                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
6783                         ret = flow_dv_validate_action_set_tag(dev, actions,
6784                                                               action_flags,
6785                                                               attr, error);
6786                         if (ret < 0)
6787                                 return ret;
6788                         /* Count all modify-header actions as one action. */
6789                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6790                                 ++actions_n;
6791                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6792                                 modify_after_mirror = 1;
6793                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6794                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6795                         break;
6796                 case RTE_FLOW_ACTION_TYPE_DROP:
6797                         ret = mlx5_flow_validate_action_drop(action_flags,
6798                                                              attr, error);
6799                         if (ret < 0)
6800                                 return ret;
6801                         action_flags |= MLX5_FLOW_ACTION_DROP;
6802                         ++actions_n;
6803                         break;
6804                 case RTE_FLOW_ACTION_TYPE_QUEUE:
6805                         ret = mlx5_flow_validate_action_queue(actions,
6806                                                               action_flags, dev,
6807                                                               attr, error);
6808                         if (ret < 0)
6809                                 return ret;
6810                         queue_index = ((const struct rte_flow_action_queue *)
6811                                                         (actions->conf))->index;
6812                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
6813                         ++actions_n;
6814                         break;
6815                 case RTE_FLOW_ACTION_TYPE_RSS:
6816                         rss = actions->conf;
6817                         ret = mlx5_flow_validate_action_rss(actions,
6818                                                             action_flags, dev,
6819                                                             attr, item_flags,
6820                                                             error);
6821                         if (ret < 0)
6822                                 return ret;
6823                         if (rss && sample_rss &&
6824                             (sample_rss->level != rss->level ||
6825                             sample_rss->types != rss->types))
6826                                 return rte_flow_error_set(error, ENOTSUP,
6827                                         RTE_FLOW_ERROR_TYPE_ACTION,
6828                                         NULL,
6829                                         "Can't use the different RSS types "
6830                                         "or level in the same flow");
6831                         if (rss != NULL && rss->queue_num)
6832                                 queue_index = rss->queue[0];
6833                         action_flags |= MLX5_FLOW_ACTION_RSS;
6834                         ++actions_n;
6835                         break;
6836                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6837                         ret =
6838                         mlx5_flow_validate_action_default_miss(action_flags,
6839                                         attr, error);
6840                         if (ret < 0)
6841                                 return ret;
6842                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6843                         ++actions_n;
6844                         break;
6845                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
6846                 case RTE_FLOW_ACTION_TYPE_COUNT:
6847                         shared_count = is_shared_action_count(actions);
6848                         ret = flow_dv_validate_action_count(dev, shared_count,
6849                                                             action_flags,
6850                                                             error);
6851                         if (ret < 0)
6852                                 return ret;
6853                         action_flags |= MLX5_FLOW_ACTION_COUNT;
6854                         ++actions_n;
6855                         break;
6856                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6857                         if (flow_dv_validate_action_pop_vlan(dev,
6858                                                              action_flags,
6859                                                              actions,
6860                                                              item_flags, attr,
6861                                                              error))
6862                                 return -rte_errno;
6863                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6864                                 modify_after_mirror = 1;
6865                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6866                         ++actions_n;
6867                         break;
6868                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6869                         ret = flow_dv_validate_action_push_vlan(dev,
6870                                                                 action_flags,
6871                                                                 vlan_m,
6872                                                                 actions, attr,
6873                                                                 error);
6874                         if (ret < 0)
6875                                 return ret;
6876                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6877                                 modify_after_mirror = 1;
6878                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6879                         ++actions_n;
6880                         break;
6881                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6882                         ret = flow_dv_validate_action_set_vlan_pcp
6883                                                 (action_flags, actions, error);
6884                         if (ret < 0)
6885                                 return ret;
6886                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6887                                 modify_after_mirror = 1;
6888                         /* Count PCP with push_vlan command. */
6889                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
6890                         break;
6891                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6892                         ret = flow_dv_validate_action_set_vlan_vid
6893                                                 (item_flags, action_flags,
6894                                                  actions, error);
6895                         if (ret < 0)
6896                                 return ret;
6897                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6898                                 modify_after_mirror = 1;
6899                         /* Count VID with push_vlan command. */
6900                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6901                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
6902                         break;
6903                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6904                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6905                         ret = flow_dv_validate_action_l2_encap(dev,
6906                                                                action_flags,
6907                                                                actions, attr,
6908                                                                error);
6909                         if (ret < 0)
6910                                 return ret;
6911                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
6912                         ++actions_n;
6913                         break;
6914                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6915                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6916                         ret = flow_dv_validate_action_decap(dev, action_flags,
6917                                                             actions, item_flags,
6918                                                             attr, error);
6919                         if (ret < 0)
6920                                 return ret;
6921                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6922                                 modify_after_mirror = 1;
6923                         action_flags |= MLX5_FLOW_ACTION_DECAP;
6924                         ++actions_n;
6925                         break;
6926                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6927                         ret = flow_dv_validate_action_raw_encap_decap
6928                                 (dev, NULL, actions->conf, attr, &action_flags,
6929                                  &actions_n, actions, item_flags, error);
6930                         if (ret < 0)
6931                                 return ret;
6932                         break;
6933                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6934                         decap = actions->conf;
6935                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
6936                                 ;
6937                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6938                                 encap = NULL;
6939                                 actions--;
6940                         } else {
6941                                 encap = actions->conf;
6942                         }
6943                         ret = flow_dv_validate_action_raw_encap_decap
6944                                            (dev,
6945                                             decap ? decap : &empty_decap, encap,
6946                                             attr, &action_flags, &actions_n,
6947                                             actions, item_flags, error);
6948                         if (ret < 0)
6949                                 return ret;
6950                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
6951                             (action_flags & MLX5_FLOW_ACTION_DECAP))
6952                                 modify_after_mirror = 1;
6953                         break;
6954                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6955                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6956                         ret = flow_dv_validate_action_modify_mac(action_flags,
6957                                                                  actions,
6958                                                                  item_flags,
6959                                                                  error);
6960                         if (ret < 0)
6961                                 return ret;
6962                         /* Count all modify-header actions as one action. */
6963                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6964                                 ++actions_n;
6965                         action_flags |= actions->type ==
6966                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
6967                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
6968                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
6969                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6970                                 modify_after_mirror = 1;
6971                         /*
6972                          * Even if the source and destination MAC addresses have
6973                          * overlap in the header with 4B alignment, the convert
6974                          * function will handle them separately and 4 SW actions
6975                          * will be created. And 2 actions will be added each
6976                          * time no matter how many bytes of address will be set.
6977                          */
6978                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
6979                         break;
6980                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6981                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6982                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
6983                                                                   actions,
6984                                                                   item_flags,
6985                                                                   error);
6986                         if (ret < 0)
6987                                 return ret;
6988                         /* Count all modify-header actions as one action. */
6989                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6990                                 ++actions_n;
6991                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6992                                 modify_after_mirror = 1;
6993                         action_flags |= actions->type ==
6994                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
6995                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
6996                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
6997                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
6998                         break;
6999                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7000                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7001                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7002                                                                   actions,
7003                                                                   item_flags,
7004                                                                   error);
7005                         if (ret < 0)
7006                                 return ret;
7007                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7008                                 return rte_flow_error_set(error, ENOTSUP,
7009                                         RTE_FLOW_ERROR_TYPE_ACTION,
7010                                         actions,
7011                                         "Can't change header "
7012                                         "with ICMPv6 proto");
7013                         /* Count all modify-header actions as one action. */
7014                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7015                                 ++actions_n;
7016                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7017                                 modify_after_mirror = 1;
7018                         action_flags |= actions->type ==
7019                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7020                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7021                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7022                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7023                         break;
7024                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7025                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7026                         ret = flow_dv_validate_action_modify_tp(action_flags,
7027                                                                 actions,
7028                                                                 item_flags,
7029                                                                 error);
7030                         if (ret < 0)
7031                                 return ret;
7032                         /* Count all modify-header actions as one action. */
7033                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7034                                 ++actions_n;
7035                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7036                                 modify_after_mirror = 1;
7037                         action_flags |= actions->type ==
7038                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7039                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7040                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7041                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7042                         break;
7043                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7044                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7045                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7046                                                                  actions,
7047                                                                  item_flags,
7048                                                                  error);
7049                         if (ret < 0)
7050                                 return ret;
7051                         /* Count all modify-header actions as one action. */
7052                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7053                                 ++actions_n;
7054                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7055                                 modify_after_mirror = 1;
7056                         action_flags |= actions->type ==
7057                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7058                                                 MLX5_FLOW_ACTION_SET_TTL :
7059                                                 MLX5_FLOW_ACTION_DEC_TTL;
7060                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7061                         break;
7062                 case RTE_FLOW_ACTION_TYPE_JUMP:
7063                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7064                                                            action_flags,
7065                                                            attr, external,
7066                                                            error);
7067                         if (ret)
7068                                 return ret;
7069                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7070                             fdb_mirror_limit)
7071                                 return rte_flow_error_set(error, EINVAL,
7072                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7073                                                   NULL,
7074                                                   "sample and jump action combination is not supported");
7075                         ++actions_n;
7076                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7077                         break;
7078                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7079                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7080                         ret = flow_dv_validate_action_modify_tcp_seq
7081                                                                 (action_flags,
7082                                                                  actions,
7083                                                                  item_flags,
7084                                                                  error);
7085                         if (ret < 0)
7086                                 return ret;
7087                         /* Count all modify-header actions as one action. */
7088                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7089                                 ++actions_n;
7090                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7091                                 modify_after_mirror = 1;
7092                         action_flags |= actions->type ==
7093                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7094                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7095                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7096                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7097                         break;
7098                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7099                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7100                         ret = flow_dv_validate_action_modify_tcp_ack
7101                                                                 (action_flags,
7102                                                                  actions,
7103                                                                  item_flags,
7104                                                                  error);
7105                         if (ret < 0)
7106                                 return ret;
7107                         /* Count all modify-header actions as one action. */
7108                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7109                                 ++actions_n;
7110                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7111                                 modify_after_mirror = 1;
7112                         action_flags |= actions->type ==
7113                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7114                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7115                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7116                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7117                         break;
7118                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7119                         break;
7120                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7121                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7122                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7123                         break;
7124                 case RTE_FLOW_ACTION_TYPE_METER:
7125                         ret = mlx5_flow_validate_action_meter(dev,
7126                                                               action_flags,
7127                                                               actions, attr,
7128                                                               &def_policy,
7129                                                               error);
7130                         if (ret < 0)
7131                                 return ret;
7132                         action_flags |= MLX5_FLOW_ACTION_METER;
7133                         if (!def_policy)
7134                                 action_flags |=
7135                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7136                         ++actions_n;
7137                         /* Meter action will add one more TAG action. */
7138                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7139                         break;
7140                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7141                         if (!attr->transfer && !attr->group)
7142                                 return rte_flow_error_set(error, ENOTSUP,
7143                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7144                                                                            NULL,
7145                           "Shared ASO age action is not supported for group 0");
7146                         action_flags |= MLX5_FLOW_ACTION_AGE;
7147                         ++actions_n;
7148                         break;
7149                 case RTE_FLOW_ACTION_TYPE_AGE:
7150                         ret = flow_dv_validate_action_age(action_flags,
7151                                                           actions, dev,
7152                                                           error);
7153                         if (ret < 0)
7154                                 return ret;
7155                         /*
7156                          * Validate the regular AGE action (using counter)
7157                          * mutual exclusion with share counter actions.
7158                          */
7159                         if (!priv->sh->flow_hit_aso_en) {
7160                                 if (shared_count)
7161                                         return rte_flow_error_set
7162                                                 (error, EINVAL,
7163                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7164                                                 NULL,
7165                                                 "old age and shared count combination is not supported");
7166                                 if (sample_count)
7167                                         return rte_flow_error_set
7168                                                 (error, EINVAL,
7169                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7170                                                 NULL,
7171                                                 "old age action and count must be in the same sub flow");
7172                         }
7173                         action_flags |= MLX5_FLOW_ACTION_AGE;
7174                         ++actions_n;
7175                         break;
7176                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7177                         ret = flow_dv_validate_action_modify_ipv4_dscp
7178                                                          (action_flags,
7179                                                           actions,
7180                                                           item_flags,
7181                                                           error);
7182                         if (ret < 0)
7183                                 return ret;
7184                         /* Count all modify-header actions as one action. */
7185                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7186                                 ++actions_n;
7187                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7188                                 modify_after_mirror = 1;
7189                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7190                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7191                         break;
7192                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7193                         ret = flow_dv_validate_action_modify_ipv6_dscp
7194                                                                 (action_flags,
7195                                                                  actions,
7196                                                                  item_flags,
7197                                                                  error);
7198                         if (ret < 0)
7199                                 return ret;
7200                         /* Count all modify-header actions as one action. */
7201                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7202                                 ++actions_n;
7203                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7204                                 modify_after_mirror = 1;
7205                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7206                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7207                         break;
7208                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7209                         ret = flow_dv_validate_action_sample(&action_flags,
7210                                                              actions, dev,
7211                                                              attr, item_flags,
7212                                                              rss, &sample_rss,
7213                                                              &sample_count,
7214                                                              &fdb_mirror_limit,
7215                                                              error);
7216                         if (ret < 0)
7217                                 return ret;
7218                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7219                         ++actions_n;
7220                         break;
7221                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7222                         if (actions[0].type != (typeof(actions[0].type))
7223                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
7224                                 return rte_flow_error_set
7225                                                 (error, EINVAL,
7226                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7227                                                 NULL, "MLX5 private action "
7228                                                 "must be the first");
7229
7230                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
7231                         break;
7232                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7233                         ret = flow_dv_validate_action_modify_field(dev,
7234                                                                    action_flags,
7235                                                                    actions,
7236                                                                    attr,
7237                                                                    error);
7238                         if (ret < 0)
7239                                 return ret;
7240                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7241                                 modify_after_mirror = 1;
7242                         /* Count all modify-header actions as one action. */
7243                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
7244                                 ++actions_n;
7245                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7246                         rw_act_num += ret;
7247                         break;
7248                 default:
7249                         return rte_flow_error_set(error, ENOTSUP,
7250                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7251                                                   actions,
7252                                                   "action not supported");
7253                 }
7254         }
7255         /*
7256          * Validate actions in flow rules
7257          * - Explicit decap action is prohibited by the tunnel offload API.
7258          * - Drop action in tunnel steer rule is prohibited by the API.
7259          * - Application cannot use MARK action because it's value can mask
7260          *   tunnel default miss nitification.
7261          * - JUMP in tunnel match rule has no support in current PMD
7262          *   implementation.
7263          * - TAG & META are reserved for future uses.
7264          */
7265         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7266                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7267                                             MLX5_FLOW_ACTION_MARK     |
7268                                             MLX5_FLOW_ACTION_SET_TAG  |
7269                                             MLX5_FLOW_ACTION_SET_META |
7270                                             MLX5_FLOW_ACTION_DROP;
7271
7272                 if (action_flags & bad_actions_mask)
7273                         return rte_flow_error_set
7274                                         (error, EINVAL,
7275                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7276                                         "Invalid RTE action in tunnel "
7277                                         "set decap rule");
7278                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7279                         return rte_flow_error_set
7280                                         (error, EINVAL,
7281                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7282                                         "tunnel set decap rule must terminate "
7283                                         "with JUMP");
7284                 if (!attr->ingress)
7285                         return rte_flow_error_set
7286                                         (error, EINVAL,
7287                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7288                                         "tunnel flows for ingress traffic only");
7289         }
7290         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7291                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7292                                             MLX5_FLOW_ACTION_MARK    |
7293                                             MLX5_FLOW_ACTION_SET_TAG |
7294                                             MLX5_FLOW_ACTION_SET_META;
7295
7296                 if (action_flags & bad_actions_mask)
7297                         return rte_flow_error_set
7298                                         (error, EINVAL,
7299                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7300                                         "Invalid RTE action in tunnel "
7301                                         "set match rule");
7302         }
7303         /*
7304          * Validate the drop action mutual exclusion with other actions.
7305          * Drop action is mutually-exclusive with any other action, except for
7306          * Count action.
7307          * Drop action compatibility with tunnel offload was already validated.
7308          */
7309         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7310                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7311         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7312             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7313                 return rte_flow_error_set(error, EINVAL,
7314                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7315                                           "Drop action is mutually-exclusive "
7316                                           "with any other action, except for "
7317                                           "Count action");
7318         /* Eswitch has few restrictions on using items and actions */
7319         if (attr->transfer) {
7320                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7321                     action_flags & MLX5_FLOW_ACTION_FLAG)
7322                         return rte_flow_error_set(error, ENOTSUP,
7323                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7324                                                   NULL,
7325                                                   "unsupported action FLAG");
7326                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7327                     action_flags & MLX5_FLOW_ACTION_MARK)
7328                         return rte_flow_error_set(error, ENOTSUP,
7329                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7330                                                   NULL,
7331                                                   "unsupported action MARK");
7332                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7333                         return rte_flow_error_set(error, ENOTSUP,
7334                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7335                                                   NULL,
7336                                                   "unsupported action QUEUE");
7337                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7338                         return rte_flow_error_set(error, ENOTSUP,
7339                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7340                                                   NULL,
7341                                                   "unsupported action RSS");
7342                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7343                         return rte_flow_error_set(error, EINVAL,
7344                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7345                                                   actions,
7346                                                   "no fate action is found");
7347         } else {
7348                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7349                         return rte_flow_error_set(error, EINVAL,
7350                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7351                                                   actions,
7352                                                   "no fate action is found");
7353         }
7354         /*
7355          * Continue validation for Xcap and VLAN actions.
7356          * If hairpin is working in explicit TX rule mode, there is no actions
7357          * splitting and the validation of hairpin ingress flow should be the
7358          * same as other standard flows.
7359          */
7360         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7361                              MLX5_FLOW_VLAN_ACTIONS)) &&
7362             (queue_index == 0xFFFF ||
7363              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7364              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7365              conf->tx_explicit != 0))) {
7366                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7367                     MLX5_FLOW_XCAP_ACTIONS)
7368                         return rte_flow_error_set(error, ENOTSUP,
7369                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7370                                                   NULL, "encap and decap "
7371                                                   "combination aren't supported");
7372                 if (!attr->transfer && attr->ingress) {
7373                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7374                                 return rte_flow_error_set
7375                                                 (error, ENOTSUP,
7376                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7377                                                  NULL, "encap is not supported"
7378                                                  " for ingress traffic");
7379                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7380                                 return rte_flow_error_set
7381                                                 (error, ENOTSUP,
7382                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7383                                                  NULL, "push VLAN action not "
7384                                                  "supported for ingress");
7385                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7386                                         MLX5_FLOW_VLAN_ACTIONS)
7387                                 return rte_flow_error_set
7388                                                 (error, ENOTSUP,
7389                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7390                                                  NULL, "no support for "
7391                                                  "multiple VLAN actions");
7392                 }
7393         }
7394         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7395                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7396                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7397                         attr->ingress)
7398                         return rte_flow_error_set
7399                                 (error, ENOTSUP,
7400                                 RTE_FLOW_ERROR_TYPE_ACTION,
7401                                 NULL, "fate action not supported for "
7402                                 "meter with policy");
7403                 if (attr->egress) {
7404                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7405                                 return rte_flow_error_set
7406                                         (error, ENOTSUP,
7407                                         RTE_FLOW_ERROR_TYPE_ACTION,
7408                                         NULL, "modify header action in egress "
7409                                         "cannot be done before meter action");
7410                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7411                                 return rte_flow_error_set
7412                                         (error, ENOTSUP,
7413                                         RTE_FLOW_ERROR_TYPE_ACTION,
7414                                         NULL, "encap action in egress "
7415                                         "cannot be done before meter action");
7416                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7417                                 return rte_flow_error_set
7418                                         (error, ENOTSUP,
7419                                         RTE_FLOW_ERROR_TYPE_ACTION,
7420                                         NULL, "push vlan action in egress "
7421                                         "cannot be done before meter action");
7422                 }
7423         }
7424         /*
7425          * Hairpin flow will add one more TAG action in TX implicit mode.
7426          * In TX explicit mode, there will be no hairpin flow ID.
7427          */
7428         if (hairpin > 0)
7429                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7430         /* extra metadata enabled: one more TAG action will be add. */
7431         if (dev_conf->dv_flow_en &&
7432             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7433             mlx5_flow_ext_mreg_supported(dev))
7434                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7435         if (rw_act_num >
7436                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7437                 return rte_flow_error_set(error, ENOTSUP,
7438                                           RTE_FLOW_ERROR_TYPE_ACTION,
7439                                           NULL, "too many header modify"
7440                                           " actions to support");
7441         }
7442         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7443         if (fdb_mirror_limit && modify_after_mirror)
7444                 return rte_flow_error_set(error, EINVAL,
7445                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7446                                 "sample before modify action is not supported");
7447         return 0;
7448 }
7449
7450 /**
7451  * Internal preparation function. Allocates the DV flow size,
7452  * this size is constant.
7453  *
7454  * @param[in] dev
7455  *   Pointer to the rte_eth_dev structure.
7456  * @param[in] attr
7457  *   Pointer to the flow attributes.
7458  * @param[in] items
7459  *   Pointer to the list of items.
7460  * @param[in] actions
7461  *   Pointer to the list of actions.
7462  * @param[out] error
7463  *   Pointer to the error structure.
7464  *
7465  * @return
7466  *   Pointer to mlx5_flow object on success,
7467  *   otherwise NULL and rte_errno is set.
7468  */
7469 static struct mlx5_flow *
7470 flow_dv_prepare(struct rte_eth_dev *dev,
7471                 const struct rte_flow_attr *attr __rte_unused,
7472                 const struct rte_flow_item items[] __rte_unused,
7473                 const struct rte_flow_action actions[] __rte_unused,
7474                 struct rte_flow_error *error)
7475 {
7476         uint32_t handle_idx = 0;
7477         struct mlx5_flow *dev_flow;
7478         struct mlx5_flow_handle *dev_handle;
7479         struct mlx5_priv *priv = dev->data->dev_private;
7480         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7481
7482         MLX5_ASSERT(wks);
7483         wks->skip_matcher_reg = 0;
7484         /* In case of corrupting the memory. */
7485         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7486                 rte_flow_error_set(error, ENOSPC,
7487                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7488                                    "not free temporary device flow");
7489                 return NULL;
7490         }
7491         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7492                                    &handle_idx);
7493         if (!dev_handle) {
7494                 rte_flow_error_set(error, ENOMEM,
7495                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7496                                    "not enough memory to create flow handle");
7497                 return NULL;
7498         }
7499         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7500         dev_flow = &wks->flows[wks->flow_idx++];
7501         memset(dev_flow, 0, sizeof(*dev_flow));
7502         dev_flow->handle = dev_handle;
7503         dev_flow->handle_idx = handle_idx;
7504         /*
7505          * In some old rdma-core releases, before continuing, a check of the
7506          * length of matching parameter will be done at first. It needs to use
7507          * the length without misc4 param. If the flow has misc4 support, then
7508          * the length needs to be adjusted accordingly. Each param member is
7509          * aligned with a 64B boundary naturally.
7510          */
7511         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7512                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7513         dev_flow->ingress = attr->ingress;
7514         dev_flow->dv.transfer = attr->transfer;
7515         return dev_flow;
7516 }
7517
7518 #ifdef RTE_LIBRTE_MLX5_DEBUG
7519 /**
7520  * Sanity check for match mask and value. Similar to check_valid_spec() in
7521  * kernel driver. If unmasked bit is present in value, it returns failure.
7522  *
7523  * @param match_mask
7524  *   pointer to match mask buffer.
7525  * @param match_value
7526  *   pointer to match value buffer.
7527  *
7528  * @return
7529  *   0 if valid, -EINVAL otherwise.
7530  */
7531 static int
7532 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7533 {
7534         uint8_t *m = match_mask;
7535         uint8_t *v = match_value;
7536         unsigned int i;
7537
7538         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7539                 if (v[i] & ~m[i]) {
7540                         DRV_LOG(ERR,
7541                                 "match_value differs from match_criteria"
7542                                 " %p[%u] != %p[%u]",
7543                                 match_value, i, match_mask, i);
7544                         return -EINVAL;
7545                 }
7546         }
7547         return 0;
7548 }
7549 #endif
7550
7551 /**
7552  * Add match of ip_version.
7553  *
7554  * @param[in] group
7555  *   Flow group.
7556  * @param[in] headers_v
7557  *   Values header pointer.
7558  * @param[in] headers_m
7559  *   Masks header pointer.
7560  * @param[in] ip_version
7561  *   The IP version to set.
7562  */
7563 static inline void
7564 flow_dv_set_match_ip_version(uint32_t group,
7565                              void *headers_v,
7566                              void *headers_m,
7567                              uint8_t ip_version)
7568 {
7569         if (group == 0)
7570                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7571         else
7572                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7573                          ip_version);
7574         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7575         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7576         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7577 }
7578
7579 /**
7580  * Add Ethernet item to matcher and to the value.
7581  *
7582  * @param[in, out] matcher
7583  *   Flow matcher.
7584  * @param[in, out] key
7585  *   Flow matcher value.
7586  * @param[in] item
7587  *   Flow pattern to translate.
7588  * @param[in] inner
7589  *   Item is inner pattern.
7590  */
7591 static void
7592 flow_dv_translate_item_eth(void *matcher, void *key,
7593                            const struct rte_flow_item *item, int inner,
7594                            uint32_t group)
7595 {
7596         const struct rte_flow_item_eth *eth_m = item->mask;
7597         const struct rte_flow_item_eth *eth_v = item->spec;
7598         const struct rte_flow_item_eth nic_mask = {
7599                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7600                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7601                 .type = RTE_BE16(0xffff),
7602                 .has_vlan = 0,
7603         };
7604         void *hdrs_m;
7605         void *hdrs_v;
7606         char *l24_v;
7607         unsigned int i;
7608
7609         if (!eth_v)
7610                 return;
7611         if (!eth_m)
7612                 eth_m = &nic_mask;
7613         if (inner) {
7614                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7615                                          inner_headers);
7616                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7617         } else {
7618                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7619                                          outer_headers);
7620                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7621         }
7622         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7623                &eth_m->dst, sizeof(eth_m->dst));
7624         /* The value must be in the range of the mask. */
7625         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7626         for (i = 0; i < sizeof(eth_m->dst); ++i)
7627                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7628         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7629                &eth_m->src, sizeof(eth_m->src));
7630         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7631         /* The value must be in the range of the mask. */
7632         for (i = 0; i < sizeof(eth_m->dst); ++i)
7633                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7634         /*
7635          * HW supports match on one Ethertype, the Ethertype following the last
7636          * VLAN tag of the packet (see PRM).
7637          * Set match on ethertype only if ETH header is not followed by VLAN.
7638          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7639          * ethertype, and use ip_version field instead.
7640          * eCPRI over Ether layer will use type value 0xAEFE.
7641          */
7642         if (eth_m->type == 0xFFFF) {
7643                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7644                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7645                 switch (eth_v->type) {
7646                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7647                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7648                         return;
7649                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7650                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7651                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7652                         return;
7653                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7654                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7655                         return;
7656                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7657                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7658                         return;
7659                 default:
7660                         break;
7661                 }
7662         }
7663         if (eth_m->has_vlan) {
7664                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7665                 if (eth_v->has_vlan) {
7666                         /*
7667                          * Here, when also has_more_vlan field in VLAN item is
7668                          * not set, only single-tagged packets will be matched.
7669                          */
7670                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7671                         return;
7672                 }
7673         }
7674         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7675                  rte_be_to_cpu_16(eth_m->type));
7676         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7677         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7678 }
7679
7680 /**
7681  * Add VLAN item to matcher and to the value.
7682  *
7683  * @param[in, out] dev_flow
7684  *   Flow descriptor.
7685  * @param[in, out] matcher
7686  *   Flow matcher.
7687  * @param[in, out] key
7688  *   Flow matcher value.
7689  * @param[in] item
7690  *   Flow pattern to translate.
7691  * @param[in] inner
7692  *   Item is inner pattern.
7693  */
7694 static void
7695 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
7696                             void *matcher, void *key,
7697                             const struct rte_flow_item *item,
7698                             int inner, uint32_t group)
7699 {
7700         const struct rte_flow_item_vlan *vlan_m = item->mask;
7701         const struct rte_flow_item_vlan *vlan_v = item->spec;
7702         void *hdrs_m;
7703         void *hdrs_v;
7704         uint16_t tci_m;
7705         uint16_t tci_v;
7706
7707         if (inner) {
7708                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7709                                          inner_headers);
7710                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7711         } else {
7712                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7713                                          outer_headers);
7714                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7715                 /*
7716                  * This is workaround, masks are not supported,
7717                  * and pre-validated.
7718                  */
7719                 if (vlan_v)
7720                         dev_flow->handle->vf_vlan.tag =
7721                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
7722         }
7723         /*
7724          * When VLAN item exists in flow, mark packet as tagged,
7725          * even if TCI is not specified.
7726          */
7727         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
7728                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7729                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7730         }
7731         if (!vlan_v)
7732                 return;
7733         if (!vlan_m)
7734                 vlan_m = &rte_flow_item_vlan_mask;
7735         tci_m = rte_be_to_cpu_16(vlan_m->tci);
7736         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
7737         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
7738         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
7739         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
7740         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
7741         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
7742         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
7743         /*
7744          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7745          * ethertype, and use ip_version field instead.
7746          */
7747         if (vlan_m->inner_type == 0xFFFF) {
7748                 switch (vlan_v->inner_type) {
7749                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7750                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7751                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7752                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7753                         return;
7754                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7755                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7756                         return;
7757                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7758                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7759                         return;
7760                 default:
7761                         break;
7762                 }
7763         }
7764         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
7765                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7766                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7767                 /* Only one vlan_tag bit can be set. */
7768                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7769                 return;
7770         }
7771         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7772                  rte_be_to_cpu_16(vlan_m->inner_type));
7773         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
7774                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
7775 }
7776
7777 /**
7778  * Add IPV4 item to matcher and to the value.
7779  *
7780  * @param[in, out] matcher
7781  *   Flow matcher.
7782  * @param[in, out] key
7783  *   Flow matcher value.
7784  * @param[in] item
7785  *   Flow pattern to translate.
7786  * @param[in] inner
7787  *   Item is inner pattern.
7788  * @param[in] group
7789  *   The group to insert the rule.
7790  */
7791 static void
7792 flow_dv_translate_item_ipv4(void *matcher, void *key,
7793                             const struct rte_flow_item *item,
7794                             int inner, uint32_t group)
7795 {
7796         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
7797         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
7798         const struct rte_flow_item_ipv4 nic_mask = {
7799                 .hdr = {
7800                         .src_addr = RTE_BE32(0xffffffff),
7801                         .dst_addr = RTE_BE32(0xffffffff),
7802                         .type_of_service = 0xff,
7803                         .next_proto_id = 0xff,
7804                         .time_to_live = 0xff,
7805                 },
7806         };
7807         void *headers_m;
7808         void *headers_v;
7809         char *l24_m;
7810         char *l24_v;
7811         uint8_t tos;
7812
7813         if (inner) {
7814                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7815                                          inner_headers);
7816                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7817         } else {
7818                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7819                                          outer_headers);
7820                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7821         }
7822         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
7823         if (!ipv4_v)
7824                 return;
7825         if (!ipv4_m)
7826                 ipv4_m = &nic_mask;
7827         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7828                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7829         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7830                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7831         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
7832         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
7833         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7834                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7835         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7836                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7837         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
7838         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
7839         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
7840         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
7841                  ipv4_m->hdr.type_of_service);
7842         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
7843         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
7844                  ipv4_m->hdr.type_of_service >> 2);
7845         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
7846         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7847                  ipv4_m->hdr.next_proto_id);
7848         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7849                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
7850         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7851                  ipv4_m->hdr.time_to_live);
7852         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7853                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
7854         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7855                  !!(ipv4_m->hdr.fragment_offset));
7856         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7857                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
7858 }
7859
7860 /**
7861  * Add IPV6 item to matcher and to the value.
7862  *
7863  * @param[in, out] matcher
7864  *   Flow matcher.
7865  * @param[in, out] key
7866  *   Flow matcher value.
7867  * @param[in] item
7868  *   Flow pattern to translate.
7869  * @param[in] inner
7870  *   Item is inner pattern.
7871  * @param[in] group
7872  *   The group to insert the rule.
7873  */
7874 static void
7875 flow_dv_translate_item_ipv6(void *matcher, void *key,
7876                             const struct rte_flow_item *item,
7877                             int inner, uint32_t group)
7878 {
7879         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
7880         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
7881         const struct rte_flow_item_ipv6 nic_mask = {
7882                 .hdr = {
7883                         .src_addr =
7884                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7885                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7886                         .dst_addr =
7887                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7888                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7889                         .vtc_flow = RTE_BE32(0xffffffff),
7890                         .proto = 0xff,
7891                         .hop_limits = 0xff,
7892                 },
7893         };
7894         void *headers_m;
7895         void *headers_v;
7896         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7897         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7898         char *l24_m;
7899         char *l24_v;
7900         uint32_t vtc_m;
7901         uint32_t vtc_v;
7902         int i;
7903         int size;
7904
7905         if (inner) {
7906                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7907                                          inner_headers);
7908                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7909         } else {
7910                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7911                                          outer_headers);
7912                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7913         }
7914         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
7915         if (!ipv6_v)
7916                 return;
7917         if (!ipv6_m)
7918                 ipv6_m = &nic_mask;
7919         size = sizeof(ipv6_m->hdr.dst_addr);
7920         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7921                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7922         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7923                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7924         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
7925         for (i = 0; i < size; ++i)
7926                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
7927         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7928                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7929         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7930                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7931         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
7932         for (i = 0; i < size; ++i)
7933                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
7934         /* TOS. */
7935         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
7936         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
7937         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
7938         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
7939         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
7940         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
7941         /* Label. */
7942         if (inner) {
7943                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
7944                          vtc_m);
7945                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
7946                          vtc_v);
7947         } else {
7948                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
7949                          vtc_m);
7950                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
7951                          vtc_v);
7952         }
7953         /* Protocol. */
7954         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7955                  ipv6_m->hdr.proto);
7956         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7957                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
7958         /* Hop limit. */
7959         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7960                  ipv6_m->hdr.hop_limits);
7961         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7962                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
7963         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7964                  !!(ipv6_m->has_frag_ext));
7965         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7966                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
7967 }
7968
7969 /**
7970  * Add IPV6 fragment extension item to matcher and to the value.
7971  *
7972  * @param[in, out] matcher
7973  *   Flow matcher.
7974  * @param[in, out] key
7975  *   Flow matcher value.
7976  * @param[in] item
7977  *   Flow pattern to translate.
7978  * @param[in] inner
7979  *   Item is inner pattern.
7980  */
7981 static void
7982 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
7983                                      const struct rte_flow_item *item,
7984                                      int inner)
7985 {
7986         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
7987         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
7988         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
7989                 .hdr = {
7990                         .next_header = 0xff,
7991                         .frag_data = RTE_BE16(0xffff),
7992                 },
7993         };
7994         void *headers_m;
7995         void *headers_v;
7996
7997         if (inner) {
7998                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7999                                          inner_headers);
8000                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8001         } else {
8002                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8003                                          outer_headers);
8004                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8005         }
8006         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8007         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8008         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8009         if (!ipv6_frag_ext_v)
8010                 return;
8011         if (!ipv6_frag_ext_m)
8012                 ipv6_frag_ext_m = &nic_mask;
8013         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8014                  ipv6_frag_ext_m->hdr.next_header);
8015         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8016                  ipv6_frag_ext_v->hdr.next_header &
8017                  ipv6_frag_ext_m->hdr.next_header);
8018 }
8019
8020 /**
8021  * Add TCP item to matcher and to the value.
8022  *
8023  * @param[in, out] matcher
8024  *   Flow matcher.
8025  * @param[in, out] key
8026  *   Flow matcher value.
8027  * @param[in] item
8028  *   Flow pattern to translate.
8029  * @param[in] inner
8030  *   Item is inner pattern.
8031  */
8032 static void
8033 flow_dv_translate_item_tcp(void *matcher, void *key,
8034                            const struct rte_flow_item *item,
8035                            int inner)
8036 {
8037         const struct rte_flow_item_tcp *tcp_m = item->mask;
8038         const struct rte_flow_item_tcp *tcp_v = item->spec;
8039         void *headers_m;
8040         void *headers_v;
8041
8042         if (inner) {
8043                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8044                                          inner_headers);
8045                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8046         } else {
8047                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8048                                          outer_headers);
8049                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8050         }
8051         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8052         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8053         if (!tcp_v)
8054                 return;
8055         if (!tcp_m)
8056                 tcp_m = &rte_flow_item_tcp_mask;
8057         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8058                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8059         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8060                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8061         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8062                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8063         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8064                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8065         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8066                  tcp_m->hdr.tcp_flags);
8067         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8068                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8069 }
8070
8071 /**
8072  * Add UDP item to matcher and to the value.
8073  *
8074  * @param[in, out] matcher
8075  *   Flow matcher.
8076  * @param[in, out] key
8077  *   Flow matcher value.
8078  * @param[in] item
8079  *   Flow pattern to translate.
8080  * @param[in] inner
8081  *   Item is inner pattern.
8082  */
8083 static void
8084 flow_dv_translate_item_udp(void *matcher, void *key,
8085                            const struct rte_flow_item *item,
8086                            int inner)
8087 {
8088         const struct rte_flow_item_udp *udp_m = item->mask;
8089         const struct rte_flow_item_udp *udp_v = item->spec;
8090         void *headers_m;
8091         void *headers_v;
8092
8093         if (inner) {
8094                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8095                                          inner_headers);
8096                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8097         } else {
8098                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8099                                          outer_headers);
8100                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8101         }
8102         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8103         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8104         if (!udp_v)
8105                 return;
8106         if (!udp_m)
8107                 udp_m = &rte_flow_item_udp_mask;
8108         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8109                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8110         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8111                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8112         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8113                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8114         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8115                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8116 }
8117
8118 /**
8119  * Add GRE optional Key item to matcher and to the value.
8120  *
8121  * @param[in, out] matcher
8122  *   Flow matcher.
8123  * @param[in, out] key
8124  *   Flow matcher value.
8125  * @param[in] item
8126  *   Flow pattern to translate.
8127  * @param[in] inner
8128  *   Item is inner pattern.
8129  */
8130 static void
8131 flow_dv_translate_item_gre_key(void *matcher, void *key,
8132                                    const struct rte_flow_item *item)
8133 {
8134         const rte_be32_t *key_m = item->mask;
8135         const rte_be32_t *key_v = item->spec;
8136         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8137         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8138         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8139
8140         /* GRE K bit must be on and should already be validated */
8141         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8142         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8143         if (!key_v)
8144                 return;
8145         if (!key_m)
8146                 key_m = &gre_key_default_mask;
8147         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8148                  rte_be_to_cpu_32(*key_m) >> 8);
8149         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8150                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8151         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8152                  rte_be_to_cpu_32(*key_m) & 0xFF);
8153         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8154                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8155 }
8156
8157 /**
8158  * Add GRE item to matcher and to the value.
8159  *
8160  * @param[in, out] matcher
8161  *   Flow matcher.
8162  * @param[in, out] key
8163  *   Flow matcher value.
8164  * @param[in] item
8165  *   Flow pattern to translate.
8166  * @param[in] inner
8167  *   Item is inner pattern.
8168  */
8169 static void
8170 flow_dv_translate_item_gre(void *matcher, void *key,
8171                            const struct rte_flow_item *item,
8172                            int inner)
8173 {
8174         const struct rte_flow_item_gre *gre_m = item->mask;
8175         const struct rte_flow_item_gre *gre_v = item->spec;
8176         void *headers_m;
8177         void *headers_v;
8178         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8179         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8180         struct {
8181                 union {
8182                         __extension__
8183                         struct {
8184                                 uint16_t version:3;
8185                                 uint16_t rsvd0:9;
8186                                 uint16_t s_present:1;
8187                                 uint16_t k_present:1;
8188                                 uint16_t rsvd_bit1:1;
8189                                 uint16_t c_present:1;
8190                         };
8191                         uint16_t value;
8192                 };
8193         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8194
8195         if (inner) {
8196                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8197                                          inner_headers);
8198                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8199         } else {
8200                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8201                                          outer_headers);
8202                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8203         }
8204         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8205         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8206         if (!gre_v)
8207                 return;
8208         if (!gre_m)
8209                 gre_m = &rte_flow_item_gre_mask;
8210         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8211                  rte_be_to_cpu_16(gre_m->protocol));
8212         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8213                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8214         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8215         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8216         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8217                  gre_crks_rsvd0_ver_m.c_present);
8218         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8219                  gre_crks_rsvd0_ver_v.c_present &
8220                  gre_crks_rsvd0_ver_m.c_present);
8221         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8222                  gre_crks_rsvd0_ver_m.k_present);
8223         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8224                  gre_crks_rsvd0_ver_v.k_present &
8225                  gre_crks_rsvd0_ver_m.k_present);
8226         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8227                  gre_crks_rsvd0_ver_m.s_present);
8228         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8229                  gre_crks_rsvd0_ver_v.s_present &
8230                  gre_crks_rsvd0_ver_m.s_present);
8231 }
8232
8233 /**
8234  * Add NVGRE item to matcher and to the value.
8235  *
8236  * @param[in, out] matcher
8237  *   Flow matcher.
8238  * @param[in, out] key
8239  *   Flow matcher value.
8240  * @param[in] item
8241  *   Flow pattern to translate.
8242  * @param[in] inner
8243  *   Item is inner pattern.
8244  */
8245 static void
8246 flow_dv_translate_item_nvgre(void *matcher, void *key,
8247                              const struct rte_flow_item *item,
8248                              int inner)
8249 {
8250         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8251         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8252         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8253         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8254         const char *tni_flow_id_m;
8255         const char *tni_flow_id_v;
8256         char *gre_key_m;
8257         char *gre_key_v;
8258         int size;
8259         int i;
8260
8261         /* For NVGRE, GRE header fields must be set with defined values. */
8262         const struct rte_flow_item_gre gre_spec = {
8263                 .c_rsvd0_ver = RTE_BE16(0x2000),
8264                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8265         };
8266         const struct rte_flow_item_gre gre_mask = {
8267                 .c_rsvd0_ver = RTE_BE16(0xB000),
8268                 .protocol = RTE_BE16(UINT16_MAX),
8269         };
8270         const struct rte_flow_item gre_item = {
8271                 .spec = &gre_spec,
8272                 .mask = &gre_mask,
8273                 .last = NULL,
8274         };
8275         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8276         if (!nvgre_v)
8277                 return;
8278         if (!nvgre_m)
8279                 nvgre_m = &rte_flow_item_nvgre_mask;
8280         tni_flow_id_m = (const char *)nvgre_m->tni;
8281         tni_flow_id_v = (const char *)nvgre_v->tni;
8282         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8283         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8284         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8285         memcpy(gre_key_m, tni_flow_id_m, size);
8286         for (i = 0; i < size; ++i)
8287                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8288 }
8289
8290 /**
8291  * Add VXLAN item to matcher and to the value.
8292  *
8293  * @param[in, out] matcher
8294  *   Flow matcher.
8295  * @param[in, out] key
8296  *   Flow matcher value.
8297  * @param[in] item
8298  *   Flow pattern to translate.
8299  * @param[in] inner
8300  *   Item is inner pattern.
8301  */
8302 static void
8303 flow_dv_translate_item_vxlan(void *matcher, void *key,
8304                              const struct rte_flow_item *item,
8305                              int inner)
8306 {
8307         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8308         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8309         void *headers_m;
8310         void *headers_v;
8311         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8312         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8313         char *vni_m;
8314         char *vni_v;
8315         uint16_t dport;
8316         int size;
8317         int i;
8318
8319         if (inner) {
8320                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8321                                          inner_headers);
8322                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8323         } else {
8324                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8325                                          outer_headers);
8326                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8327         }
8328         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8329                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8330         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8331                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8332                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8333         }
8334         if (!vxlan_v)
8335                 return;
8336         if (!vxlan_m)
8337                 vxlan_m = &rte_flow_item_vxlan_mask;
8338         size = sizeof(vxlan_m->vni);
8339         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8340         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8341         memcpy(vni_m, vxlan_m->vni, size);
8342         for (i = 0; i < size; ++i)
8343                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8344 }
8345
8346 /**
8347  * Add VXLAN-GPE item to matcher and to the value.
8348  *
8349  * @param[in, out] matcher
8350  *   Flow matcher.
8351  * @param[in, out] key
8352  *   Flow matcher value.
8353  * @param[in] item
8354  *   Flow pattern to translate.
8355  * @param[in] inner
8356  *   Item is inner pattern.
8357  */
8358
8359 static void
8360 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8361                                  const struct rte_flow_item *item, int inner)
8362 {
8363         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8364         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8365         void *headers_m;
8366         void *headers_v;
8367         void *misc_m =
8368                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8369         void *misc_v =
8370                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8371         char *vni_m;
8372         char *vni_v;
8373         uint16_t dport;
8374         int size;
8375         int i;
8376         uint8_t flags_m = 0xff;
8377         uint8_t flags_v = 0xc;
8378
8379         if (inner) {
8380                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8381                                          inner_headers);
8382                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8383         } else {
8384                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8385                                          outer_headers);
8386                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8387         }
8388         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8389                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8390         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8391                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8392                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8393         }
8394         if (!vxlan_v)
8395                 return;
8396         if (!vxlan_m)
8397                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8398         size = sizeof(vxlan_m->vni);
8399         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8400         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8401         memcpy(vni_m, vxlan_m->vni, size);
8402         for (i = 0; i < size; ++i)
8403                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8404         if (vxlan_m->flags) {
8405                 flags_m = vxlan_m->flags;
8406                 flags_v = vxlan_v->flags;
8407         }
8408         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8409         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8410         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8411                  vxlan_m->protocol);
8412         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8413                  vxlan_v->protocol);
8414 }
8415
8416 /**
8417  * Add Geneve item to matcher and to the value.
8418  *
8419  * @param[in, out] matcher
8420  *   Flow matcher.
8421  * @param[in, out] key
8422  *   Flow matcher value.
8423  * @param[in] item
8424  *   Flow pattern to translate.
8425  * @param[in] inner
8426  *   Item is inner pattern.
8427  */
8428
8429 static void
8430 flow_dv_translate_item_geneve(void *matcher, void *key,
8431                               const struct rte_flow_item *item, int inner)
8432 {
8433         const struct rte_flow_item_geneve *geneve_m = item->mask;
8434         const struct rte_flow_item_geneve *geneve_v = item->spec;
8435         void *headers_m;
8436         void *headers_v;
8437         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8438         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8439         uint16_t dport;
8440         uint16_t gbhdr_m;
8441         uint16_t gbhdr_v;
8442         char *vni_m;
8443         char *vni_v;
8444         size_t size, i;
8445
8446         if (inner) {
8447                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8448                                          inner_headers);
8449                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8450         } else {
8451                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8452                                          outer_headers);
8453                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8454         }
8455         dport = MLX5_UDP_PORT_GENEVE;
8456         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8457                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8458                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8459         }
8460         if (!geneve_v)
8461                 return;
8462         if (!geneve_m)
8463                 geneve_m = &rte_flow_item_geneve_mask;
8464         size = sizeof(geneve_m->vni);
8465         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8466         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8467         memcpy(vni_m, geneve_m->vni, size);
8468         for (i = 0; i < size; ++i)
8469                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8470         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8471                  rte_be_to_cpu_16(geneve_m->protocol));
8472         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8473                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8474         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8475         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8476         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8477                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8478         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8479                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8480         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8481                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8482         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8483                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8484                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8485 }
8486
8487 /**
8488  * Create Geneve TLV option resource.
8489  *
8490  * @param dev[in, out]
8491  *   Pointer to rte_eth_dev structure.
8492  * @param[in, out] tag_be24
8493  *   Tag value in big endian then R-shift 8.
8494  * @parm[in, out] dev_flow
8495  *   Pointer to the dev_flow.
8496  * @param[out] error
8497  *   pointer to error structure.
8498  *
8499  * @return
8500  *   0 on success otherwise -errno and errno is set.
8501  */
8502
8503 int
8504 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8505                                              const struct rte_flow_item *item,
8506                                              struct rte_flow_error *error)
8507 {
8508         struct mlx5_priv *priv = dev->data->dev_private;
8509         struct mlx5_dev_ctx_shared *sh = priv->sh;
8510         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8511                         sh->geneve_tlv_option_resource;
8512         struct mlx5_devx_obj *obj;
8513         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8514         int ret = 0;
8515
8516         if (!geneve_opt_v)
8517                 return -1;
8518         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8519         if (geneve_opt_resource != NULL) {
8520                 if (geneve_opt_resource->option_class ==
8521                         geneve_opt_v->option_class &&
8522                         geneve_opt_resource->option_type ==
8523                         geneve_opt_v->option_type &&
8524                         geneve_opt_resource->length ==
8525                         geneve_opt_v->option_len) {
8526                         /* We already have GENVE TLV option obj allocated. */
8527                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8528                                            __ATOMIC_RELAXED);
8529                 } else {
8530                         ret = rte_flow_error_set(error, ENOMEM,
8531                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8532                                 "Only one GENEVE TLV option supported");
8533                         goto exit;
8534                 }
8535         } else {
8536                 /* Create a GENEVE TLV object and resource. */
8537                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8538                                 geneve_opt_v->option_class,
8539                                 geneve_opt_v->option_type,
8540                                 geneve_opt_v->option_len);
8541                 if (!obj) {
8542                         ret = rte_flow_error_set(error, ENODATA,
8543                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8544                                 "Failed to create GENEVE TLV Devx object");
8545                         goto exit;
8546                 }
8547                 sh->geneve_tlv_option_resource =
8548                                 mlx5_malloc(MLX5_MEM_ZERO,
8549                                                 sizeof(*geneve_opt_resource),
8550                                                 0, SOCKET_ID_ANY);
8551                 if (!sh->geneve_tlv_option_resource) {
8552                         claim_zero(mlx5_devx_cmd_destroy(obj));
8553                         ret = rte_flow_error_set(error, ENOMEM,
8554                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8555                                 "GENEVE TLV object memory allocation failed");
8556                         goto exit;
8557                 }
8558                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8559                 geneve_opt_resource->obj = obj;
8560                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8561                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8562                 geneve_opt_resource->length = geneve_opt_v->option_len;
8563                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8564                                 __ATOMIC_RELAXED);
8565         }
8566 exit:
8567         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8568         return ret;
8569 }
8570
8571 /**
8572  * Add Geneve TLV option item to matcher.
8573  *
8574  * @param[in, out] dev
8575  *   Pointer to rte_eth_dev structure.
8576  * @param[in, out] matcher
8577  *   Flow matcher.
8578  * @param[in, out] key
8579  *   Flow matcher value.
8580  * @param[in] item
8581  *   Flow pattern to translate.
8582  * @param[out] error
8583  *   Pointer to error structure.
8584  */
8585 static int
8586 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8587                                   void *key, const struct rte_flow_item *item,
8588                                   struct rte_flow_error *error)
8589 {
8590         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8591         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8592         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8593         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8594         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8595                         misc_parameters_3);
8596         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8597         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8598         int ret = 0;
8599
8600         if (!geneve_opt_v)
8601                 return -1;
8602         if (!geneve_opt_m)
8603                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8604         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8605                                                            error);
8606         if (ret) {
8607                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8608                 return ret;
8609         }
8610         /*
8611          * Set the option length in GENEVE header if not requested.
8612          * The GENEVE TLV option length is expressed by the option length field
8613          * in the GENEVE header.
8614          * If the option length was not requested but the GENEVE TLV option item
8615          * is present we set the option length field implicitly.
8616          */
8617         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8618                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8619                          MLX5_GENEVE_OPTLEN_MASK);
8620                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8621                          geneve_opt_v->option_len + 1);
8622         }
8623         /* Set the data. */
8624         if (geneve_opt_v->data) {
8625                 memcpy(&opt_data_key, geneve_opt_v->data,
8626                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8627                                 sizeof(opt_data_key)));
8628                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8629                                 sizeof(opt_data_key));
8630                 memcpy(&opt_data_mask, geneve_opt_m->data,
8631                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8632                                 sizeof(opt_data_mask)));
8633                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8634                                 sizeof(opt_data_mask));
8635                 MLX5_SET(fte_match_set_misc3, misc3_m,
8636                                 geneve_tlv_option_0_data,
8637                                 rte_be_to_cpu_32(opt_data_mask));
8638                 MLX5_SET(fte_match_set_misc3, misc3_v,
8639                                 geneve_tlv_option_0_data,
8640                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8641         }
8642         return ret;
8643 }
8644
8645 /**
8646  * Add MPLS item to matcher and to the value.
8647  *
8648  * @param[in, out] matcher
8649  *   Flow matcher.
8650  * @param[in, out] key
8651  *   Flow matcher value.
8652  * @param[in] item
8653  *   Flow pattern to translate.
8654  * @param[in] prev_layer
8655  *   The protocol layer indicated in previous item.
8656  * @param[in] inner
8657  *   Item is inner pattern.
8658  */
8659 static void
8660 flow_dv_translate_item_mpls(void *matcher, void *key,
8661                             const struct rte_flow_item *item,
8662                             uint64_t prev_layer,
8663                             int inner)
8664 {
8665         const uint32_t *in_mpls_m = item->mask;
8666         const uint32_t *in_mpls_v = item->spec;
8667         uint32_t *out_mpls_m = 0;
8668         uint32_t *out_mpls_v = 0;
8669         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8670         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8671         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8672                                      misc_parameters_2);
8673         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8674         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8675         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8676
8677         switch (prev_layer) {
8678         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8679                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
8680                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8681                          MLX5_UDP_PORT_MPLS);
8682                 break;
8683         case MLX5_FLOW_LAYER_GRE:
8684                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
8685                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8686                          RTE_ETHER_TYPE_MPLS);
8687                 break;
8688         default:
8689                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8690                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8691                          IPPROTO_MPLS);
8692                 break;
8693         }
8694         if (!in_mpls_v)
8695                 return;
8696         if (!in_mpls_m)
8697                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
8698         switch (prev_layer) {
8699         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8700                 out_mpls_m =
8701                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8702                                                  outer_first_mpls_over_udp);
8703                 out_mpls_v =
8704                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8705                                                  outer_first_mpls_over_udp);
8706                 break;
8707         case MLX5_FLOW_LAYER_GRE:
8708                 out_mpls_m =
8709                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8710                                                  outer_first_mpls_over_gre);
8711                 out_mpls_v =
8712                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8713                                                  outer_first_mpls_over_gre);
8714                 break;
8715         default:
8716                 /* Inner MPLS not over GRE is not supported. */
8717                 if (!inner) {
8718                         out_mpls_m =
8719                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8720                                                          misc2_m,
8721                                                          outer_first_mpls);
8722                         out_mpls_v =
8723                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8724                                                          misc2_v,
8725                                                          outer_first_mpls);
8726                 }
8727                 break;
8728         }
8729         if (out_mpls_m && out_mpls_v) {
8730                 *out_mpls_m = *in_mpls_m;
8731                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
8732         }
8733 }
8734
8735 /**
8736  * Add metadata register item to matcher
8737  *
8738  * @param[in, out] matcher
8739  *   Flow matcher.
8740  * @param[in, out] key
8741  *   Flow matcher value.
8742  * @param[in] reg_type
8743  *   Type of device metadata register
8744  * @param[in] value
8745  *   Register value
8746  * @param[in] mask
8747  *   Register mask
8748  */
8749 static void
8750 flow_dv_match_meta_reg(void *matcher, void *key,
8751                        enum modify_reg reg_type,
8752                        uint32_t data, uint32_t mask)
8753 {
8754         void *misc2_m =
8755                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
8756         void *misc2_v =
8757                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8758         uint32_t temp;
8759
8760         data &= mask;
8761         switch (reg_type) {
8762         case REG_A:
8763                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
8764                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
8765                 break;
8766         case REG_B:
8767                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
8768                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
8769                 break;
8770         case REG_C_0:
8771                 /*
8772                  * The metadata register C0 field might be divided into
8773                  * source vport index and META item value, we should set
8774                  * this field according to specified mask, not as whole one.
8775                  */
8776                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
8777                 temp |= mask;
8778                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
8779                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
8780                 temp &= ~mask;
8781                 temp |= data;
8782                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
8783                 break;
8784         case REG_C_1:
8785                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
8786                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
8787                 break;
8788         case REG_C_2:
8789                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
8790                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
8791                 break;
8792         case REG_C_3:
8793                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
8794                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
8795                 break;
8796         case REG_C_4:
8797                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
8798                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
8799                 break;
8800         case REG_C_5:
8801                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
8802                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
8803                 break;
8804         case REG_C_6:
8805                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
8806                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
8807                 break;
8808         case REG_C_7:
8809                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
8810                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
8811                 break;
8812         default:
8813                 MLX5_ASSERT(false);
8814                 break;
8815         }
8816 }
8817
8818 /**
8819  * Add MARK item to matcher
8820  *
8821  * @param[in] dev
8822  *   The device to configure through.
8823  * @param[in, out] matcher
8824  *   Flow matcher.
8825  * @param[in, out] key
8826  *   Flow matcher value.
8827  * @param[in] item
8828  *   Flow pattern to translate.
8829  */
8830 static void
8831 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
8832                             void *matcher, void *key,
8833                             const struct rte_flow_item *item)
8834 {
8835         struct mlx5_priv *priv = dev->data->dev_private;
8836         const struct rte_flow_item_mark *mark;
8837         uint32_t value;
8838         uint32_t mask;
8839
8840         mark = item->mask ? (const void *)item->mask :
8841                             &rte_flow_item_mark_mask;
8842         mask = mark->id & priv->sh->dv_mark_mask;
8843         mark = (const void *)item->spec;
8844         MLX5_ASSERT(mark);
8845         value = mark->id & priv->sh->dv_mark_mask & mask;
8846         if (mask) {
8847                 enum modify_reg reg;
8848
8849                 /* Get the metadata register index for the mark. */
8850                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
8851                 MLX5_ASSERT(reg > 0);
8852                 if (reg == REG_C_0) {
8853                         struct mlx5_priv *priv = dev->data->dev_private;
8854                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8855                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8856
8857                         mask &= msk_c0;
8858                         mask <<= shl_c0;
8859                         value <<= shl_c0;
8860                 }
8861                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8862         }
8863 }
8864
8865 /**
8866  * Add META item to matcher
8867  *
8868  * @param[in] dev
8869  *   The devich to configure through.
8870  * @param[in, out] matcher
8871  *   Flow matcher.
8872  * @param[in, out] key
8873  *   Flow matcher value.
8874  * @param[in] attr
8875  *   Attributes of flow that includes this item.
8876  * @param[in] item
8877  *   Flow pattern to translate.
8878  */
8879 static void
8880 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
8881                             void *matcher, void *key,
8882                             const struct rte_flow_attr *attr,
8883                             const struct rte_flow_item *item)
8884 {
8885         const struct rte_flow_item_meta *meta_m;
8886         const struct rte_flow_item_meta *meta_v;
8887
8888         meta_m = (const void *)item->mask;
8889         if (!meta_m)
8890                 meta_m = &rte_flow_item_meta_mask;
8891         meta_v = (const void *)item->spec;
8892         if (meta_v) {
8893                 int reg;
8894                 uint32_t value = meta_v->data;
8895                 uint32_t mask = meta_m->data;
8896
8897                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
8898                 if (reg < 0)
8899                         return;
8900                 MLX5_ASSERT(reg != REG_NON);
8901                 /*
8902                  * In datapath code there is no endianness
8903                  * coversions for perfromance reasons, all
8904                  * pattern conversions are done in rte_flow.
8905                  */
8906                 value = rte_cpu_to_be_32(value);
8907                 mask = rte_cpu_to_be_32(mask);
8908                 if (reg == REG_C_0) {
8909                         struct mlx5_priv *priv = dev->data->dev_private;
8910                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8911                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8912 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
8913                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
8914
8915                         value >>= shr_c0;
8916                         mask >>= shr_c0;
8917 #endif
8918                         value <<= shl_c0;
8919                         mask <<= shl_c0;
8920                         MLX5_ASSERT(msk_c0);
8921                         MLX5_ASSERT(!(~msk_c0 & mask));
8922                 }
8923                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8924         }
8925 }
8926
8927 /**
8928  * Add vport metadata Reg C0 item to matcher
8929  *
8930  * @param[in, out] matcher
8931  *   Flow matcher.
8932  * @param[in, out] key
8933  *   Flow matcher value.
8934  * @param[in] reg
8935  *   Flow pattern to translate.
8936  */
8937 static void
8938 flow_dv_translate_item_meta_vport(void *matcher, void *key,
8939                                   uint32_t value, uint32_t mask)
8940 {
8941         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
8942 }
8943
8944 /**
8945  * Add tag item to matcher
8946  *
8947  * @param[in] dev
8948  *   The devich to configure through.
8949  * @param[in, out] matcher
8950  *   Flow matcher.
8951  * @param[in, out] key
8952  *   Flow matcher value.
8953  * @param[in] item
8954  *   Flow pattern to translate.
8955  */
8956 static void
8957 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
8958                                 void *matcher, void *key,
8959                                 const struct rte_flow_item *item)
8960 {
8961         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
8962         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
8963         uint32_t mask, value;
8964
8965         MLX5_ASSERT(tag_v);
8966         value = tag_v->data;
8967         mask = tag_m ? tag_m->data : UINT32_MAX;
8968         if (tag_v->id == REG_C_0) {
8969                 struct mlx5_priv *priv = dev->data->dev_private;
8970                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8971                 uint32_t shl_c0 = rte_bsf32(msk_c0);
8972
8973                 mask &= msk_c0;
8974                 mask <<= shl_c0;
8975                 value <<= shl_c0;
8976         }
8977         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
8978 }
8979
8980 /**
8981  * Add TAG item to matcher
8982  *
8983  * @param[in] dev
8984  *   The devich to configure through.
8985  * @param[in, out] matcher
8986  *   Flow matcher.
8987  * @param[in, out] key
8988  *   Flow matcher value.
8989  * @param[in] item
8990  *   Flow pattern to translate.
8991  */
8992 static void
8993 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
8994                            void *matcher, void *key,
8995                            const struct rte_flow_item *item)
8996 {
8997         const struct rte_flow_item_tag *tag_v = item->spec;
8998         const struct rte_flow_item_tag *tag_m = item->mask;
8999         enum modify_reg reg;
9000
9001         MLX5_ASSERT(tag_v);
9002         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9003         /* Get the metadata register index for the tag. */
9004         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9005         MLX5_ASSERT(reg > 0);
9006         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9007 }
9008
9009 /**
9010  * Add source vport match to the specified matcher.
9011  *
9012  * @param[in, out] matcher
9013  *   Flow matcher.
9014  * @param[in, out] key
9015  *   Flow matcher value.
9016  * @param[in] port
9017  *   Source vport value to match
9018  * @param[in] mask
9019  *   Mask
9020  */
9021 static void
9022 flow_dv_translate_item_source_vport(void *matcher, void *key,
9023                                     int16_t port, uint16_t mask)
9024 {
9025         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9026         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9027
9028         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9029         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9030 }
9031
9032 /**
9033  * Translate port-id item to eswitch match on  port-id.
9034  *
9035  * @param[in] dev
9036  *   The devich to configure through.
9037  * @param[in, out] matcher
9038  *   Flow matcher.
9039  * @param[in, out] key
9040  *   Flow matcher value.
9041  * @param[in] item
9042  *   Flow pattern to translate.
9043  * @param[in]
9044  *   Flow attributes.
9045  *
9046  * @return
9047  *   0 on success, a negative errno value otherwise.
9048  */
9049 static int
9050 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9051                                void *key, const struct rte_flow_item *item,
9052                                const struct rte_flow_attr *attr)
9053 {
9054         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9055         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9056         struct mlx5_priv *priv;
9057         uint16_t mask, id;
9058
9059         mask = pid_m ? pid_m->id : 0xffff;
9060         id = pid_v ? pid_v->id : dev->data->port_id;
9061         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9062         if (!priv)
9063                 return -rte_errno;
9064         /*
9065          * Translate to vport field or to metadata, depending on mode.
9066          * Kernel can use either misc.source_port or half of C0 metadata
9067          * register.
9068          */
9069         if (priv->vport_meta_mask) {
9070                 /*
9071                  * Provide the hint for SW steering library
9072                  * to insert the flow into ingress domain and
9073                  * save the extra vport match.
9074                  */
9075                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9076                     priv->pf_bond < 0 && attr->transfer)
9077                         flow_dv_translate_item_source_vport
9078                                 (matcher, key, priv->vport_id, mask);
9079                 /*
9080                  * We should always set the vport metadata register,
9081                  * otherwise the SW steering library can drop
9082                  * the rule if wire vport metadata value is not zero,
9083                  * it depends on kernel configuration.
9084                  */
9085                 flow_dv_translate_item_meta_vport(matcher, key,
9086                                                   priv->vport_meta_tag,
9087                                                   priv->vport_meta_mask);
9088         } else {
9089                 flow_dv_translate_item_source_vport(matcher, key,
9090                                                     priv->vport_id, mask);
9091         }
9092         return 0;
9093 }
9094
9095 /**
9096  * Add ICMP6 item to matcher and to the value.
9097  *
9098  * @param[in, out] matcher
9099  *   Flow matcher.
9100  * @param[in, out] key
9101  *   Flow matcher value.
9102  * @param[in] item
9103  *   Flow pattern to translate.
9104  * @param[in] inner
9105  *   Item is inner pattern.
9106  */
9107 static void
9108 flow_dv_translate_item_icmp6(void *matcher, void *key,
9109                               const struct rte_flow_item *item,
9110                               int inner)
9111 {
9112         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9113         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9114         void *headers_m;
9115         void *headers_v;
9116         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9117                                      misc_parameters_3);
9118         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9119         if (inner) {
9120                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9121                                          inner_headers);
9122                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9123         } else {
9124                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9125                                          outer_headers);
9126                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9127         }
9128         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9129         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9130         if (!icmp6_v)
9131                 return;
9132         if (!icmp6_m)
9133                 icmp6_m = &rte_flow_item_icmp6_mask;
9134         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9135         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9136                  icmp6_v->type & icmp6_m->type);
9137         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9138         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9139                  icmp6_v->code & icmp6_m->code);
9140 }
9141
9142 /**
9143  * Add ICMP item to matcher and to the value.
9144  *
9145  * @param[in, out] matcher
9146  *   Flow matcher.
9147  * @param[in, out] key
9148  *   Flow matcher value.
9149  * @param[in] item
9150  *   Flow pattern to translate.
9151  * @param[in] inner
9152  *   Item is inner pattern.
9153  */
9154 static void
9155 flow_dv_translate_item_icmp(void *matcher, void *key,
9156                             const struct rte_flow_item *item,
9157                             int inner)
9158 {
9159         const struct rte_flow_item_icmp *icmp_m = item->mask;
9160         const struct rte_flow_item_icmp *icmp_v = item->spec;
9161         uint32_t icmp_header_data_m = 0;
9162         uint32_t icmp_header_data_v = 0;
9163         void *headers_m;
9164         void *headers_v;
9165         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9166                                      misc_parameters_3);
9167         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9168         if (inner) {
9169                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9170                                          inner_headers);
9171                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9172         } else {
9173                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9174                                          outer_headers);
9175                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9176         }
9177         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9178         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9179         if (!icmp_v)
9180                 return;
9181         if (!icmp_m)
9182                 icmp_m = &rte_flow_item_icmp_mask;
9183         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9184                  icmp_m->hdr.icmp_type);
9185         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9186                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9187         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9188                  icmp_m->hdr.icmp_code);
9189         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9190                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9191         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9192         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9193         if (icmp_header_data_m) {
9194                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9195                 icmp_header_data_v |=
9196                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9197                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9198                          icmp_header_data_m);
9199                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9200                          icmp_header_data_v & icmp_header_data_m);
9201         }
9202 }
9203
9204 /**
9205  * Add GTP item to matcher and to the value.
9206  *
9207  * @param[in, out] matcher
9208  *   Flow matcher.
9209  * @param[in, out] key
9210  *   Flow matcher value.
9211  * @param[in] item
9212  *   Flow pattern to translate.
9213  * @param[in] inner
9214  *   Item is inner pattern.
9215  */
9216 static void
9217 flow_dv_translate_item_gtp(void *matcher, void *key,
9218                            const struct rte_flow_item *item, int inner)
9219 {
9220         const struct rte_flow_item_gtp *gtp_m = item->mask;
9221         const struct rte_flow_item_gtp *gtp_v = item->spec;
9222         void *headers_m;
9223         void *headers_v;
9224         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9225                                      misc_parameters_3);
9226         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9227         uint16_t dport = RTE_GTPU_UDP_PORT;
9228
9229         if (inner) {
9230                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9231                                          inner_headers);
9232                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9233         } else {
9234                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9235                                          outer_headers);
9236                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9237         }
9238         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9239                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9240                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9241         }
9242         if (!gtp_v)
9243                 return;
9244         if (!gtp_m)
9245                 gtp_m = &rte_flow_item_gtp_mask;
9246         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9247                  gtp_m->v_pt_rsv_flags);
9248         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9249                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9250         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9251         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9252                  gtp_v->msg_type & gtp_m->msg_type);
9253         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9254                  rte_be_to_cpu_32(gtp_m->teid));
9255         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9256                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9257 }
9258
9259 /**
9260  * Add GTP PSC item to matcher.
9261  *
9262  * @param[in, out] matcher
9263  *   Flow matcher.
9264  * @param[in, out] key
9265  *   Flow matcher value.
9266  * @param[in] item
9267  *   Flow pattern to translate.
9268  */
9269 static int
9270 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9271                                const struct rte_flow_item *item)
9272 {
9273         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9274         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9275         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9276                         misc_parameters_3);
9277         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9278         union {
9279                 uint32_t w32;
9280                 struct {
9281                         uint16_t seq_num;
9282                         uint8_t npdu_num;
9283                         uint8_t next_ext_header_type;
9284                 };
9285         } dw_2;
9286         uint8_t gtp_flags;
9287
9288         /* Always set E-flag match on one, regardless of GTP item settings. */
9289         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9290         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9291         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9292         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9293         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9294         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9295         /*Set next extension header type. */
9296         dw_2.seq_num = 0;
9297         dw_2.npdu_num = 0;
9298         dw_2.next_ext_header_type = 0xff;
9299         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9300                  rte_cpu_to_be_32(dw_2.w32));
9301         dw_2.seq_num = 0;
9302         dw_2.npdu_num = 0;
9303         dw_2.next_ext_header_type = 0x85;
9304         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9305                  rte_cpu_to_be_32(dw_2.w32));
9306         if (gtp_psc_v) {
9307                 union {
9308                         uint32_t w32;
9309                         struct {
9310                                 uint8_t len;
9311                                 uint8_t type_flags;
9312                                 uint8_t qfi;
9313                                 uint8_t reserved;
9314                         };
9315                 } dw_0;
9316
9317                 /*Set extension header PDU type and Qos. */
9318                 if (!gtp_psc_m)
9319                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9320                 dw_0.w32 = 0;
9321                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9322                 dw_0.qfi = gtp_psc_m->qfi;
9323                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9324                          rte_cpu_to_be_32(dw_0.w32));
9325                 dw_0.w32 = 0;
9326                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9327                                                         gtp_psc_m->pdu_type);
9328                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9329                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9330                          rte_cpu_to_be_32(dw_0.w32));
9331         }
9332         return 0;
9333 }
9334
9335 /**
9336  * Add eCPRI item to matcher and to the value.
9337  *
9338  * @param[in] dev
9339  *   The devich to configure through.
9340  * @param[in, out] matcher
9341  *   Flow matcher.
9342  * @param[in, out] key
9343  *   Flow matcher value.
9344  * @param[in] item
9345  *   Flow pattern to translate.
9346  * @param[in] samples
9347  *   Sample IDs to be used in the matching.
9348  */
9349 static void
9350 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9351                              void *key, const struct rte_flow_item *item)
9352 {
9353         struct mlx5_priv *priv = dev->data->dev_private;
9354         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9355         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9356         struct rte_ecpri_common_hdr common;
9357         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9358                                      misc_parameters_4);
9359         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9360         uint32_t *samples;
9361         void *dw_m;
9362         void *dw_v;
9363
9364         if (!ecpri_v)
9365                 return;
9366         if (!ecpri_m)
9367                 ecpri_m = &rte_flow_item_ecpri_mask;
9368         /*
9369          * Maximal four DW samples are supported in a single matching now.
9370          * Two are used now for a eCPRI matching:
9371          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9372          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9373          *    if any.
9374          */
9375         if (!ecpri_m->hdr.common.u32)
9376                 return;
9377         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9378         /* Need to take the whole DW as the mask to fill the entry. */
9379         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9380                             prog_sample_field_value_0);
9381         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9382                             prog_sample_field_value_0);
9383         /* Already big endian (network order) in the header. */
9384         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9385         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9386         /* Sample#0, used for matching type, offset 0. */
9387         MLX5_SET(fte_match_set_misc4, misc4_m,
9388                  prog_sample_field_id_0, samples[0]);
9389         /* It makes no sense to set the sample ID in the mask field. */
9390         MLX5_SET(fte_match_set_misc4, misc4_v,
9391                  prog_sample_field_id_0, samples[0]);
9392         /*
9393          * Checking if message body part needs to be matched.
9394          * Some wildcard rules only matching type field should be supported.
9395          */
9396         if (ecpri_m->hdr.dummy[0]) {
9397                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9398                 switch (common.type) {
9399                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9400                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9401                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9402                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9403                                             prog_sample_field_value_1);
9404                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9405                                             prog_sample_field_value_1);
9406                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9407                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9408                                             ecpri_m->hdr.dummy[0];
9409                         /* Sample#1, to match message body, offset 4. */
9410                         MLX5_SET(fte_match_set_misc4, misc4_m,
9411                                  prog_sample_field_id_1, samples[1]);
9412                         MLX5_SET(fte_match_set_misc4, misc4_v,
9413                                  prog_sample_field_id_1, samples[1]);
9414                         break;
9415                 default:
9416                         /* Others, do not match any sample ID. */
9417                         break;
9418                 }
9419         }
9420 }
9421
9422 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9423
9424 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9425         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9426                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9427
9428 /**
9429  * Calculate flow matcher enable bitmap.
9430  *
9431  * @param match_criteria
9432  *   Pointer to flow matcher criteria.
9433  *
9434  * @return
9435  *   Bitmap of enabled fields.
9436  */
9437 static uint8_t
9438 flow_dv_matcher_enable(uint32_t *match_criteria)
9439 {
9440         uint8_t match_criteria_enable;
9441
9442         match_criteria_enable =
9443                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9444                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9445         match_criteria_enable |=
9446                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9447                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9448         match_criteria_enable |=
9449                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9450                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9451         match_criteria_enable |=
9452                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9453                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9454         match_criteria_enable |=
9455                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9456                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9457         match_criteria_enable |=
9458                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9459                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9460         return match_criteria_enable;
9461 }
9462
9463 struct mlx5_hlist_entry *
9464 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9465 {
9466         struct mlx5_dev_ctx_shared *sh = list->ctx;
9467         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9468         struct rte_eth_dev *dev = ctx->dev;
9469         struct mlx5_flow_tbl_data_entry *tbl_data;
9470         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9471         struct rte_flow_error *error = ctx->error;
9472         union mlx5_flow_tbl_key key = { .v64 = key64 };
9473         struct mlx5_flow_tbl_resource *tbl;
9474         void *domain;
9475         uint32_t idx = 0;
9476         int ret;
9477
9478         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9479         if (!tbl_data) {
9480                 rte_flow_error_set(error, ENOMEM,
9481                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9482                                    NULL,
9483                                    "cannot allocate flow table data entry");
9484                 return NULL;
9485         }
9486         tbl_data->idx = idx;
9487         tbl_data->tunnel = tt_prm->tunnel;
9488         tbl_data->group_id = tt_prm->group_id;
9489         tbl_data->external = !!tt_prm->external;
9490         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9491         tbl_data->is_egress = !!key.is_egress;
9492         tbl_data->is_transfer = !!key.is_fdb;
9493         tbl_data->dummy = !!key.dummy;
9494         tbl_data->level = key.level;
9495         tbl_data->id = key.id;
9496         tbl = &tbl_data->tbl;
9497         if (key.dummy)
9498                 return &tbl_data->entry;
9499         if (key.is_fdb)
9500                 domain = sh->fdb_domain;
9501         else if (key.is_egress)
9502                 domain = sh->tx_domain;
9503         else
9504                 domain = sh->rx_domain;
9505         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9506         if (ret) {
9507                 rte_flow_error_set(error, ENOMEM,
9508                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9509                                    NULL, "cannot create flow table object");
9510                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9511                 return NULL;
9512         }
9513         if (key.level != 0) {
9514                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9515                                         (tbl->obj, &tbl_data->jump.action);
9516                 if (ret) {
9517                         rte_flow_error_set(error, ENOMEM,
9518                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9519                                            NULL,
9520                                            "cannot create flow jump action");
9521                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9522                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9523                         return NULL;
9524                 }
9525         }
9526         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9527               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9528               key.level, key.id);
9529         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9530                              flow_dv_matcher_create_cb,
9531                              flow_dv_matcher_match_cb,
9532                              flow_dv_matcher_remove_cb);
9533         return &tbl_data->entry;
9534 }
9535
9536 int
9537 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9538                      struct mlx5_hlist_entry *entry, uint64_t key64,
9539                      void *cb_ctx __rte_unused)
9540 {
9541         struct mlx5_flow_tbl_data_entry *tbl_data =
9542                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9543         union mlx5_flow_tbl_key key = { .v64 = key64 };
9544
9545         return tbl_data->level != key.level ||
9546                tbl_data->id != key.id ||
9547                tbl_data->dummy != key.dummy ||
9548                tbl_data->is_transfer != !!key.is_fdb ||
9549                tbl_data->is_egress != !!key.is_egress;
9550 }
9551
9552 /**
9553  * Get a flow table.
9554  *
9555  * @param[in, out] dev
9556  *   Pointer to rte_eth_dev structure.
9557  * @param[in] table_level
9558  *   Table level to use.
9559  * @param[in] egress
9560  *   Direction of the table.
9561  * @param[in] transfer
9562  *   E-Switch or NIC flow.
9563  * @param[in] dummy
9564  *   Dummy entry for dv API.
9565  * @param[in] table_id
9566  *   Table id to use.
9567  * @param[out] error
9568  *   pointer to error structure.
9569  *
9570  * @return
9571  *   Returns tables resource based on the index, NULL in case of failed.
9572  */
9573 struct mlx5_flow_tbl_resource *
9574 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9575                          uint32_t table_level, uint8_t egress,
9576                          uint8_t transfer,
9577                          bool external,
9578                          const struct mlx5_flow_tunnel *tunnel,
9579                          uint32_t group_id, uint8_t dummy,
9580                          uint32_t table_id,
9581                          struct rte_flow_error *error)
9582 {
9583         struct mlx5_priv *priv = dev->data->dev_private;
9584         union mlx5_flow_tbl_key table_key = {
9585                 {
9586                         .level = table_level,
9587                         .id = table_id,
9588                         .reserved = 0,
9589                         .dummy = !!dummy,
9590                         .is_fdb = !!transfer,
9591                         .is_egress = !!egress,
9592                 }
9593         };
9594         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9595                 .tunnel = tunnel,
9596                 .group_id = group_id,
9597                 .external = external,
9598         };
9599         struct mlx5_flow_cb_ctx ctx = {
9600                 .dev = dev,
9601                 .error = error,
9602                 .data = &tt_prm,
9603         };
9604         struct mlx5_hlist_entry *entry;
9605         struct mlx5_flow_tbl_data_entry *tbl_data;
9606
9607         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9608         if (!entry) {
9609                 rte_flow_error_set(error, ENOMEM,
9610                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9611                                    "cannot get table");
9612                 return NULL;
9613         }
9614         DRV_LOG(DEBUG, "table_level %u table_id %u "
9615                 "tunnel %u group %u registered.",
9616                 table_level, table_id,
9617                 tunnel ? tunnel->tunnel_id : 0, group_id);
9618         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9619         return &tbl_data->tbl;
9620 }
9621
9622 void
9623 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
9624                       struct mlx5_hlist_entry *entry)
9625 {
9626         struct mlx5_dev_ctx_shared *sh = list->ctx;
9627         struct mlx5_flow_tbl_data_entry *tbl_data =
9628                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9629
9630         MLX5_ASSERT(entry && sh);
9631         if (tbl_data->jump.action)
9632                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
9633         if (tbl_data->tbl.obj)
9634                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
9635         if (tbl_data->tunnel_offload && tbl_data->external) {
9636                 struct mlx5_hlist_entry *he;
9637                 struct mlx5_hlist *tunnel_grp_hash;
9638                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
9639                 union tunnel_tbl_key tunnel_key = {
9640                         .tunnel_id = tbl_data->tunnel ?
9641                                         tbl_data->tunnel->tunnel_id : 0,
9642                         .group = tbl_data->group_id
9643                 };
9644                 uint32_t table_level = tbl_data->level;
9645
9646                 tunnel_grp_hash = tbl_data->tunnel ?
9647                                         tbl_data->tunnel->groups :
9648                                         thub->groups;
9649                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
9650                 if (he)
9651                         mlx5_hlist_unregister(tunnel_grp_hash, he);
9652                 DRV_LOG(DEBUG,
9653                         "table_level %u id %u tunnel %u group %u released.",
9654                         table_level,
9655                         tbl_data->id,
9656                         tbl_data->tunnel ?
9657                         tbl_data->tunnel->tunnel_id : 0,
9658                         tbl_data->group_id);
9659         }
9660         mlx5_cache_list_destroy(&tbl_data->matchers);
9661         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
9662 }
9663
9664 /**
9665  * Release a flow table.
9666  *
9667  * @param[in] sh
9668  *   Pointer to device shared structure.
9669  * @param[in] tbl
9670  *   Table resource to be released.
9671  *
9672  * @return
9673  *   Returns 0 if table was released, else return 1;
9674  */
9675 static int
9676 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
9677                              struct mlx5_flow_tbl_resource *tbl)
9678 {
9679         struct mlx5_flow_tbl_data_entry *tbl_data =
9680                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9681
9682         if (!tbl)
9683                 return 0;
9684         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
9685 }
9686
9687 int
9688 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
9689                          struct mlx5_cache_entry *entry, void *cb_ctx)
9690 {
9691         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9692         struct mlx5_flow_dv_matcher *ref = ctx->data;
9693         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
9694                                                         entry);
9695
9696         return cur->crc != ref->crc ||
9697                cur->priority != ref->priority ||
9698                memcmp((const void *)cur->mask.buf,
9699                       (const void *)ref->mask.buf, ref->mask.size);
9700 }
9701
9702 struct mlx5_cache_entry *
9703 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
9704                           struct mlx5_cache_entry *entry __rte_unused,
9705                           void *cb_ctx)
9706 {
9707         struct mlx5_dev_ctx_shared *sh = list->ctx;
9708         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9709         struct mlx5_flow_dv_matcher *ref = ctx->data;
9710         struct mlx5_flow_dv_matcher *cache;
9711         struct mlx5dv_flow_matcher_attr dv_attr = {
9712                 .type = IBV_FLOW_ATTR_NORMAL,
9713                 .match_mask = (void *)&ref->mask,
9714         };
9715         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
9716                                                             typeof(*tbl), tbl);
9717         int ret;
9718
9719         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
9720         if (!cache) {
9721                 rte_flow_error_set(ctx->error, ENOMEM,
9722                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9723                                    "cannot create matcher");
9724                 return NULL;
9725         }
9726         *cache = *ref;
9727         dv_attr.match_criteria_enable =
9728                 flow_dv_matcher_enable(cache->mask.buf);
9729         dv_attr.priority = ref->priority;
9730         if (tbl->is_egress)
9731                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
9732         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
9733                                                &cache->matcher_object);
9734         if (ret) {
9735                 mlx5_free(cache);
9736                 rte_flow_error_set(ctx->error, ENOMEM,
9737                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9738                                    "cannot create matcher");
9739                 return NULL;
9740         }
9741         return &cache->entry;
9742 }
9743
9744 /**
9745  * Register the flow matcher.
9746  *
9747  * @param[in, out] dev
9748  *   Pointer to rte_eth_dev structure.
9749  * @param[in, out] matcher
9750  *   Pointer to flow matcher.
9751  * @param[in, out] key
9752  *   Pointer to flow table key.
9753  * @parm[in, out] dev_flow
9754  *   Pointer to the dev_flow.
9755  * @param[out] error
9756  *   pointer to error structure.
9757  *
9758  * @return
9759  *   0 on success otherwise -errno and errno is set.
9760  */
9761 static int
9762 flow_dv_matcher_register(struct rte_eth_dev *dev,
9763                          struct mlx5_flow_dv_matcher *ref,
9764                          union mlx5_flow_tbl_key *key,
9765                          struct mlx5_flow *dev_flow,
9766                          const struct mlx5_flow_tunnel *tunnel,
9767                          uint32_t group_id,
9768                          struct rte_flow_error *error)
9769 {
9770         struct mlx5_cache_entry *entry;
9771         struct mlx5_flow_dv_matcher *cache;
9772         struct mlx5_flow_tbl_resource *tbl;
9773         struct mlx5_flow_tbl_data_entry *tbl_data;
9774         struct mlx5_flow_cb_ctx ctx = {
9775                 .error = error,
9776                 .data = ref,
9777         };
9778
9779         /**
9780          * tunnel offload API requires this registration for cases when
9781          * tunnel match rule was inserted before tunnel set rule.
9782          */
9783         tbl = flow_dv_tbl_resource_get(dev, key->level,
9784                                        key->is_egress, key->is_fdb,
9785                                        dev_flow->external, tunnel,
9786                                        group_id, 0, key->id, error);
9787         if (!tbl)
9788                 return -rte_errno;      /* No need to refill the error info */
9789         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9790         ref->tbl = tbl;
9791         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
9792         if (!entry) {
9793                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9794                 return rte_flow_error_set(error, ENOMEM,
9795                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9796                                           "cannot allocate ref memory");
9797         }
9798         cache = container_of(entry, typeof(*cache), entry);
9799         dev_flow->handle->dvh.matcher = cache;
9800         return 0;
9801 }
9802
9803 struct mlx5_hlist_entry *
9804 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
9805 {
9806         struct mlx5_dev_ctx_shared *sh = list->ctx;
9807         struct rte_flow_error *error = ctx;
9808         struct mlx5_flow_dv_tag_resource *entry;
9809         uint32_t idx = 0;
9810         int ret;
9811
9812         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
9813         if (!entry) {
9814                 rte_flow_error_set(error, ENOMEM,
9815                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9816                                    "cannot allocate resource memory");
9817                 return NULL;
9818         }
9819         entry->idx = idx;
9820         entry->tag_id = key;
9821         ret = mlx5_flow_os_create_flow_action_tag(key,
9822                                                   &entry->action);
9823         if (ret) {
9824                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
9825                 rte_flow_error_set(error, ENOMEM,
9826                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9827                                    NULL, "cannot create action");
9828                 return NULL;
9829         }
9830         return &entry->entry;
9831 }
9832
9833 int
9834 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
9835                      struct mlx5_hlist_entry *entry, uint64_t key,
9836                      void *cb_ctx __rte_unused)
9837 {
9838         struct mlx5_flow_dv_tag_resource *tag =
9839                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9840
9841         return key != tag->tag_id;
9842 }
9843
9844 /**
9845  * Find existing tag resource or create and register a new one.
9846  *
9847  * @param dev[in, out]
9848  *   Pointer to rte_eth_dev structure.
9849  * @param[in, out] tag_be24
9850  *   Tag value in big endian then R-shift 8.
9851  * @parm[in, out] dev_flow
9852  *   Pointer to the dev_flow.
9853  * @param[out] error
9854  *   pointer to error structure.
9855  *
9856  * @return
9857  *   0 on success otherwise -errno and errno is set.
9858  */
9859 static int
9860 flow_dv_tag_resource_register
9861                         (struct rte_eth_dev *dev,
9862                          uint32_t tag_be24,
9863                          struct mlx5_flow *dev_flow,
9864                          struct rte_flow_error *error)
9865 {
9866         struct mlx5_priv *priv = dev->data->dev_private;
9867         struct mlx5_flow_dv_tag_resource *cache_resource;
9868         struct mlx5_hlist_entry *entry;
9869
9870         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
9871         if (entry) {
9872                 cache_resource = container_of
9873                         (entry, struct mlx5_flow_dv_tag_resource, entry);
9874                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
9875                 dev_flow->dv.tag_resource = cache_resource;
9876                 return 0;
9877         }
9878         return -rte_errno;
9879 }
9880
9881 void
9882 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
9883                       struct mlx5_hlist_entry *entry)
9884 {
9885         struct mlx5_dev_ctx_shared *sh = list->ctx;
9886         struct mlx5_flow_dv_tag_resource *tag =
9887                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9888
9889         MLX5_ASSERT(tag && sh && tag->action);
9890         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
9891         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
9892         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
9893 }
9894
9895 /**
9896  * Release the tag.
9897  *
9898  * @param dev
9899  *   Pointer to Ethernet device.
9900  * @param tag_idx
9901  *   Tag index.
9902  *
9903  * @return
9904  *   1 while a reference on it exists, 0 when freed.
9905  */
9906 static int
9907 flow_dv_tag_release(struct rte_eth_dev *dev,
9908                     uint32_t tag_idx)
9909 {
9910         struct mlx5_priv *priv = dev->data->dev_private;
9911         struct mlx5_flow_dv_tag_resource *tag;
9912
9913         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
9914         if (!tag)
9915                 return 0;
9916         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
9917                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
9918         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
9919 }
9920
9921 /**
9922  * Translate port ID action to vport.
9923  *
9924  * @param[in] dev
9925  *   Pointer to rte_eth_dev structure.
9926  * @param[in] action
9927  *   Pointer to the port ID action.
9928  * @param[out] dst_port_id
9929  *   The target port ID.
9930  * @param[out] error
9931  *   Pointer to the error structure.
9932  *
9933  * @return
9934  *   0 on success, a negative errno value otherwise and rte_errno is set.
9935  */
9936 static int
9937 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
9938                                  const struct rte_flow_action *action,
9939                                  uint32_t *dst_port_id,
9940                                  struct rte_flow_error *error)
9941 {
9942         uint32_t port;
9943         struct mlx5_priv *priv;
9944         const struct rte_flow_action_port_id *conf =
9945                         (const struct rte_flow_action_port_id *)action->conf;
9946
9947         port = conf->original ? dev->data->port_id : conf->id;
9948         priv = mlx5_port_to_eswitch_info(port, false);
9949         if (!priv)
9950                 return rte_flow_error_set(error, -rte_errno,
9951                                           RTE_FLOW_ERROR_TYPE_ACTION,
9952                                           NULL,
9953                                           "No eswitch info was found for port");
9954 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
9955         /*
9956          * This parameter is transferred to
9957          * mlx5dv_dr_action_create_dest_ib_port().
9958          */
9959         *dst_port_id = priv->dev_port;
9960 #else
9961         /*
9962          * Legacy mode, no LAG configurations is supported.
9963          * This parameter is transferred to
9964          * mlx5dv_dr_action_create_dest_vport().
9965          */
9966         *dst_port_id = priv->vport_id;
9967 #endif
9968         return 0;
9969 }
9970
9971 /**
9972  * Create a counter with aging configuration.
9973  *
9974  * @param[in] dev
9975  *   Pointer to rte_eth_dev structure.
9976  * @param[in] dev_flow
9977  *   Pointer to the mlx5_flow.
9978  * @param[out] count
9979  *   Pointer to the counter action configuration.
9980  * @param[in] age
9981  *   Pointer to the aging action configuration.
9982  *
9983  * @return
9984  *   Index to flow counter on success, 0 otherwise.
9985  */
9986 static uint32_t
9987 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
9988                                 struct mlx5_flow *dev_flow,
9989                                 const struct rte_flow_action_count *count,
9990                                 const struct rte_flow_action_age *age)
9991 {
9992         uint32_t counter;
9993         struct mlx5_age_param *age_param;
9994
9995         if (count && count->shared)
9996                 counter = flow_dv_counter_get_shared(dev, count->id);
9997         else
9998                 counter = flow_dv_counter_alloc(dev, !!age);
9999         if (!counter || age == NULL)
10000                 return counter;
10001         age_param = flow_dv_counter_idx_get_age(dev, counter);
10002         age_param->context = age->context ? age->context :
10003                 (void *)(uintptr_t)(dev_flow->flow_idx);
10004         age_param->timeout = age->timeout;
10005         age_param->port_id = dev->data->port_id;
10006         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10007         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10008         return counter;
10009 }
10010
10011 /**
10012  * Add Tx queue matcher
10013  *
10014  * @param[in] dev
10015  *   Pointer to the dev struct.
10016  * @param[in, out] matcher
10017  *   Flow matcher.
10018  * @param[in, out] key
10019  *   Flow matcher value.
10020  * @param[in] item
10021  *   Flow pattern to translate.
10022  * @param[in] inner
10023  *   Item is inner pattern.
10024  */
10025 static void
10026 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10027                                 void *matcher, void *key,
10028                                 const struct rte_flow_item *item)
10029 {
10030         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10031         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10032         void *misc_m =
10033                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10034         void *misc_v =
10035                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10036         struct mlx5_txq_ctrl *txq;
10037         uint32_t queue;
10038
10039
10040         queue_m = (const void *)item->mask;
10041         if (!queue_m)
10042                 return;
10043         queue_v = (const void *)item->spec;
10044         if (!queue_v)
10045                 return;
10046         txq = mlx5_txq_get(dev, queue_v->queue);
10047         if (!txq)
10048                 return;
10049         queue = txq->obj->sq->id;
10050         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10051         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10052                  queue & queue_m->queue);
10053         mlx5_txq_release(dev, queue_v->queue);
10054 }
10055
10056 /**
10057  * Set the hash fields according to the @p flow information.
10058  *
10059  * @param[in] dev_flow
10060  *   Pointer to the mlx5_flow.
10061  * @param[in] rss_desc
10062  *   Pointer to the mlx5_flow_rss_desc.
10063  */
10064 static void
10065 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10066                        struct mlx5_flow_rss_desc *rss_desc)
10067 {
10068         uint64_t items = dev_flow->handle->layers;
10069         int rss_inner = 0;
10070         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10071
10072         dev_flow->hash_fields = 0;
10073 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10074         if (rss_desc->level >= 2) {
10075                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10076                 rss_inner = 1;
10077         }
10078 #endif
10079         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10080             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10081                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10082                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10083                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10084                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10085                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10086                         else
10087                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10088                 }
10089         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10090                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10091                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10092                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10093                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10094                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10095                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10096                         else
10097                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10098                 }
10099         }
10100         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10101             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10102                 if (rss_types & ETH_RSS_UDP) {
10103                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10104                                 dev_flow->hash_fields |=
10105                                                 IBV_RX_HASH_SRC_PORT_UDP;
10106                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10107                                 dev_flow->hash_fields |=
10108                                                 IBV_RX_HASH_DST_PORT_UDP;
10109                         else
10110                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10111                 }
10112         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10113                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10114                 if (rss_types & ETH_RSS_TCP) {
10115                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10116                                 dev_flow->hash_fields |=
10117                                                 IBV_RX_HASH_SRC_PORT_TCP;
10118                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10119                                 dev_flow->hash_fields |=
10120                                                 IBV_RX_HASH_DST_PORT_TCP;
10121                         else
10122                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10123                 }
10124         }
10125 }
10126
10127 /**
10128  * Prepare an Rx Hash queue.
10129  *
10130  * @param dev
10131  *   Pointer to Ethernet device.
10132  * @param[in] dev_flow
10133  *   Pointer to the mlx5_flow.
10134  * @param[in] rss_desc
10135  *   Pointer to the mlx5_flow_rss_desc.
10136  * @param[out] hrxq_idx
10137  *   Hash Rx queue index.
10138  *
10139  * @return
10140  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10141  */
10142 static struct mlx5_hrxq *
10143 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10144                      struct mlx5_flow *dev_flow,
10145                      struct mlx5_flow_rss_desc *rss_desc,
10146                      uint32_t *hrxq_idx)
10147 {
10148         struct mlx5_priv *priv = dev->data->dev_private;
10149         struct mlx5_flow_handle *dh = dev_flow->handle;
10150         struct mlx5_hrxq *hrxq;
10151
10152         MLX5_ASSERT(rss_desc->queue_num);
10153         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10154         rss_desc->hash_fields = dev_flow->hash_fields;
10155         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10156         rss_desc->shared_rss = 0;
10157         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10158         if (!*hrxq_idx)
10159                 return NULL;
10160         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10161                               *hrxq_idx);
10162         return hrxq;
10163 }
10164
10165 /**
10166  * Release sample sub action resource.
10167  *
10168  * @param[in, out] dev
10169  *   Pointer to rte_eth_dev structure.
10170  * @param[in] act_res
10171  *   Pointer to sample sub action resource.
10172  */
10173 static void
10174 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10175                                    struct mlx5_flow_sub_actions_idx *act_res)
10176 {
10177         if (act_res->rix_hrxq) {
10178                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10179                 act_res->rix_hrxq = 0;
10180         }
10181         if (act_res->rix_encap_decap) {
10182                 flow_dv_encap_decap_resource_release(dev,
10183                                                      act_res->rix_encap_decap);
10184                 act_res->rix_encap_decap = 0;
10185         }
10186         if (act_res->rix_port_id_action) {
10187                 flow_dv_port_id_action_resource_release(dev,
10188                                                 act_res->rix_port_id_action);
10189                 act_res->rix_port_id_action = 0;
10190         }
10191         if (act_res->rix_tag) {
10192                 flow_dv_tag_release(dev, act_res->rix_tag);
10193                 act_res->rix_tag = 0;
10194         }
10195         if (act_res->rix_jump) {
10196                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10197                 act_res->rix_jump = 0;
10198         }
10199 }
10200
10201 int
10202 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10203                         struct mlx5_cache_entry *entry, void *cb_ctx)
10204 {
10205         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10206         struct rte_eth_dev *dev = ctx->dev;
10207         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10208         struct mlx5_flow_dv_sample_resource *cache_resource =
10209                         container_of(entry, typeof(*cache_resource), entry);
10210
10211         if (resource->ratio == cache_resource->ratio &&
10212             resource->ft_type == cache_resource->ft_type &&
10213             resource->ft_id == cache_resource->ft_id &&
10214             resource->set_action == cache_resource->set_action &&
10215             !memcmp((void *)&resource->sample_act,
10216                     (void *)&cache_resource->sample_act,
10217                     sizeof(struct mlx5_flow_sub_actions_list))) {
10218                 /*
10219                  * Existing sample action should release the prepared
10220                  * sub-actions reference counter.
10221                  */
10222                 flow_dv_sample_sub_actions_release(dev,
10223                                                 &resource->sample_idx);
10224                 return 0;
10225         }
10226         return 1;
10227 }
10228
10229 struct mlx5_cache_entry *
10230 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10231                          struct mlx5_cache_entry *entry __rte_unused,
10232                          void *cb_ctx)
10233 {
10234         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10235         struct rte_eth_dev *dev = ctx->dev;
10236         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10237         void **sample_dv_actions = resource->sub_actions;
10238         struct mlx5_flow_dv_sample_resource *cache_resource;
10239         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10240         struct mlx5_priv *priv = dev->data->dev_private;
10241         struct mlx5_dev_ctx_shared *sh = priv->sh;
10242         struct mlx5_flow_tbl_resource *tbl;
10243         uint32_t idx = 0;
10244         const uint32_t next_ft_step = 1;
10245         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10246         uint8_t is_egress = 0;
10247         uint8_t is_transfer = 0;
10248         struct rte_flow_error *error = ctx->error;
10249
10250         /* Register new sample resource. */
10251         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10252         if (!cache_resource) {
10253                 rte_flow_error_set(error, ENOMEM,
10254                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10255                                           NULL,
10256                                           "cannot allocate resource memory");
10257                 return NULL;
10258         }
10259         *cache_resource = *resource;
10260         /* Create normal path table level */
10261         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10262                 is_transfer = 1;
10263         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10264                 is_egress = 1;
10265         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10266                                         is_egress, is_transfer,
10267                                         true, NULL, 0, 0, 0, error);
10268         if (!tbl) {
10269                 rte_flow_error_set(error, ENOMEM,
10270                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10271                                           NULL,
10272                                           "fail to create normal path table "
10273                                           "for sample");
10274                 goto error;
10275         }
10276         cache_resource->normal_path_tbl = tbl;
10277         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10278                 if (!sh->default_miss_action) {
10279                         rte_flow_error_set(error, ENOMEM,
10280                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10281                                                 NULL,
10282                                                 "default miss action was not "
10283                                                 "created");
10284                         goto error;
10285                 }
10286                 sample_dv_actions[resource->sample_act.actions_num++] =
10287                                                 sh->default_miss_action;
10288         }
10289         /* Create a DR sample action */
10290         sampler_attr.sample_ratio = cache_resource->ratio;
10291         sampler_attr.default_next_table = tbl->obj;
10292         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10293         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10294                                                         &sample_dv_actions[0];
10295         sampler_attr.action = cache_resource->set_action;
10296         if (mlx5_os_flow_dr_create_flow_action_sampler
10297                         (&sampler_attr, &cache_resource->verbs_action)) {
10298                 rte_flow_error_set(error, ENOMEM,
10299                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10300                                         NULL, "cannot create sample action");
10301                 goto error;
10302         }
10303         cache_resource->idx = idx;
10304         cache_resource->dev = dev;
10305         return &cache_resource->entry;
10306 error:
10307         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10308                 flow_dv_sample_sub_actions_release(dev,
10309                                                    &cache_resource->sample_idx);
10310         if (cache_resource->normal_path_tbl)
10311                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10312                                 cache_resource->normal_path_tbl);
10313         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10314         return NULL;
10315
10316 }
10317
10318 /**
10319  * Find existing sample resource or create and register a new one.
10320  *
10321  * @param[in, out] dev
10322  *   Pointer to rte_eth_dev structure.
10323  * @param[in] resource
10324  *   Pointer to sample resource.
10325  * @parm[in, out] dev_flow
10326  *   Pointer to the dev_flow.
10327  * @param[out] error
10328  *   pointer to error structure.
10329  *
10330  * @return
10331  *   0 on success otherwise -errno and errno is set.
10332  */
10333 static int
10334 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10335                          struct mlx5_flow_dv_sample_resource *resource,
10336                          struct mlx5_flow *dev_flow,
10337                          struct rte_flow_error *error)
10338 {
10339         struct mlx5_flow_dv_sample_resource *cache_resource;
10340         struct mlx5_cache_entry *entry;
10341         struct mlx5_priv *priv = dev->data->dev_private;
10342         struct mlx5_flow_cb_ctx ctx = {
10343                 .dev = dev,
10344                 .error = error,
10345                 .data = resource,
10346         };
10347
10348         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10349         if (!entry)
10350                 return -rte_errno;
10351         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10352         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10353         dev_flow->dv.sample_res = cache_resource;
10354         return 0;
10355 }
10356
10357 int
10358 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10359                             struct mlx5_cache_entry *entry, void *cb_ctx)
10360 {
10361         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10362         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10363         struct rte_eth_dev *dev = ctx->dev;
10364         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10365                         container_of(entry, typeof(*cache_resource), entry);
10366         uint32_t idx = 0;
10367
10368         if (resource->num_of_dest == cache_resource->num_of_dest &&
10369             resource->ft_type == cache_resource->ft_type &&
10370             !memcmp((void *)cache_resource->sample_act,
10371                     (void *)resource->sample_act,
10372                    (resource->num_of_dest *
10373                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10374                 /*
10375                  * Existing sample action should release the prepared
10376                  * sub-actions reference counter.
10377                  */
10378                 for (idx = 0; idx < resource->num_of_dest; idx++)
10379                         flow_dv_sample_sub_actions_release(dev,
10380                                         &resource->sample_idx[idx]);
10381                 return 0;
10382         }
10383         return 1;
10384 }
10385
10386 struct mlx5_cache_entry *
10387 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10388                          struct mlx5_cache_entry *entry __rte_unused,
10389                          void *cb_ctx)
10390 {
10391         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10392         struct rte_eth_dev *dev = ctx->dev;
10393         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10394         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10395         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10396         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10397         struct mlx5_priv *priv = dev->data->dev_private;
10398         struct mlx5_dev_ctx_shared *sh = priv->sh;
10399         struct mlx5_flow_sub_actions_list *sample_act;
10400         struct mlx5dv_dr_domain *domain;
10401         uint32_t idx = 0, res_idx = 0;
10402         struct rte_flow_error *error = ctx->error;
10403         uint64_t action_flags;
10404         int ret;
10405
10406         /* Register new destination array resource. */
10407         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10408                                             &res_idx);
10409         if (!cache_resource) {
10410                 rte_flow_error_set(error, ENOMEM,
10411                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10412                                           NULL,
10413                                           "cannot allocate resource memory");
10414                 return NULL;
10415         }
10416         *cache_resource = *resource;
10417         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10418                 domain = sh->fdb_domain;
10419         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10420                 domain = sh->rx_domain;
10421         else
10422                 domain = sh->tx_domain;
10423         for (idx = 0; idx < resource->num_of_dest; idx++) {
10424                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10425                                  mlx5_malloc(MLX5_MEM_ZERO,
10426                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10427                                  0, SOCKET_ID_ANY);
10428                 if (!dest_attr[idx]) {
10429                         rte_flow_error_set(error, ENOMEM,
10430                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10431                                            NULL,
10432                                            "cannot allocate resource memory");
10433                         goto error;
10434                 }
10435                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10436                 sample_act = &resource->sample_act[idx];
10437                 action_flags = sample_act->action_flags;
10438                 switch (action_flags) {
10439                 case MLX5_FLOW_ACTION_QUEUE:
10440                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10441                         break;
10442                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10443                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10444                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10445                         dest_attr[idx]->dest_reformat->reformat =
10446                                         sample_act->dr_encap_action;
10447                         dest_attr[idx]->dest_reformat->dest =
10448                                         sample_act->dr_port_id_action;
10449                         break;
10450                 case MLX5_FLOW_ACTION_PORT_ID:
10451                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10452                         break;
10453                 case MLX5_FLOW_ACTION_JUMP:
10454                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10455                         break;
10456                 default:
10457                         rte_flow_error_set(error, EINVAL,
10458                                            RTE_FLOW_ERROR_TYPE_ACTION,
10459                                            NULL,
10460                                            "unsupported actions type");
10461                         goto error;
10462                 }
10463         }
10464         /* create a dest array actioin */
10465         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10466                                                 (domain,
10467                                                  cache_resource->num_of_dest,
10468                                                  dest_attr,
10469                                                  &cache_resource->action);
10470         if (ret) {
10471                 rte_flow_error_set(error, ENOMEM,
10472                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10473                                    NULL,
10474                                    "cannot create destination array action");
10475                 goto error;
10476         }
10477         cache_resource->idx = res_idx;
10478         cache_resource->dev = dev;
10479         for (idx = 0; idx < resource->num_of_dest; idx++)
10480                 mlx5_free(dest_attr[idx]);
10481         return &cache_resource->entry;
10482 error:
10483         for (idx = 0; idx < resource->num_of_dest; idx++) {
10484                 flow_dv_sample_sub_actions_release(dev,
10485                                 &cache_resource->sample_idx[idx]);
10486                 if (dest_attr[idx])
10487                         mlx5_free(dest_attr[idx]);
10488         }
10489
10490         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10491         return NULL;
10492 }
10493
10494 /**
10495  * Find existing destination array resource or create and register a new one.
10496  *
10497  * @param[in, out] dev
10498  *   Pointer to rte_eth_dev structure.
10499  * @param[in] resource
10500  *   Pointer to destination array resource.
10501  * @parm[in, out] dev_flow
10502  *   Pointer to the dev_flow.
10503  * @param[out] error
10504  *   pointer to error structure.
10505  *
10506  * @return
10507  *   0 on success otherwise -errno and errno is set.
10508  */
10509 static int
10510 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10511                          struct mlx5_flow_dv_dest_array_resource *resource,
10512                          struct mlx5_flow *dev_flow,
10513                          struct rte_flow_error *error)
10514 {
10515         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10516         struct mlx5_priv *priv = dev->data->dev_private;
10517         struct mlx5_cache_entry *entry;
10518         struct mlx5_flow_cb_ctx ctx = {
10519                 .dev = dev,
10520                 .error = error,
10521                 .data = resource,
10522         };
10523
10524         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10525         if (!entry)
10526                 return -rte_errno;
10527         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10528         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10529         dev_flow->dv.dest_array_res = cache_resource;
10530         return 0;
10531 }
10532
10533 /**
10534  * Convert Sample action to DV specification.
10535  *
10536  * @param[in] dev
10537  *   Pointer to rte_eth_dev structure.
10538  * @param[in] action
10539  *   Pointer to sample action structure.
10540  * @param[in, out] dev_flow
10541  *   Pointer to the mlx5_flow.
10542  * @param[in] attr
10543  *   Pointer to the flow attributes.
10544  * @param[in, out] num_of_dest
10545  *   Pointer to the num of destination.
10546  * @param[in, out] sample_actions
10547  *   Pointer to sample actions list.
10548  * @param[in, out] res
10549  *   Pointer to sample resource.
10550  * @param[out] error
10551  *   Pointer to the error structure.
10552  *
10553  * @return
10554  *   0 on success, a negative errno value otherwise and rte_errno is set.
10555  */
10556 static int
10557 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10558                                 const struct rte_flow_action_sample *action,
10559                                 struct mlx5_flow *dev_flow,
10560                                 const struct rte_flow_attr *attr,
10561                                 uint32_t *num_of_dest,
10562                                 void **sample_actions,
10563                                 struct mlx5_flow_dv_sample_resource *res,
10564                                 struct rte_flow_error *error)
10565 {
10566         struct mlx5_priv *priv = dev->data->dev_private;
10567         const struct rte_flow_action *sub_actions;
10568         struct mlx5_flow_sub_actions_list *sample_act;
10569         struct mlx5_flow_sub_actions_idx *sample_idx;
10570         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10571         struct rte_flow *flow = dev_flow->flow;
10572         struct mlx5_flow_rss_desc *rss_desc;
10573         uint64_t action_flags = 0;
10574
10575         MLX5_ASSERT(wks);
10576         rss_desc = &wks->rss_desc;
10577         sample_act = &res->sample_act;
10578         sample_idx = &res->sample_idx;
10579         res->ratio = action->ratio;
10580         sub_actions = action->actions;
10581         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10582                 int type = sub_actions->type;
10583                 uint32_t pre_rix = 0;
10584                 void *pre_r;
10585                 switch (type) {
10586                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10587                 {
10588                         const struct rte_flow_action_queue *queue;
10589                         struct mlx5_hrxq *hrxq;
10590                         uint32_t hrxq_idx;
10591
10592                         queue = sub_actions->conf;
10593                         rss_desc->queue_num = 1;
10594                         rss_desc->queue[0] = queue->index;
10595                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10596                                                     rss_desc, &hrxq_idx);
10597                         if (!hrxq)
10598                                 return rte_flow_error_set
10599                                         (error, rte_errno,
10600                                          RTE_FLOW_ERROR_TYPE_ACTION,
10601                                          NULL,
10602                                          "cannot create fate queue");
10603                         sample_act->dr_queue_action = hrxq->action;
10604                         sample_idx->rix_hrxq = hrxq_idx;
10605                         sample_actions[sample_act->actions_num++] =
10606                                                 hrxq->action;
10607                         (*num_of_dest)++;
10608                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10609                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10610                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10611                         dev_flow->handle->fate_action =
10612                                         MLX5_FLOW_FATE_QUEUE;
10613                         break;
10614                 }
10615                 case RTE_FLOW_ACTION_TYPE_RSS:
10616                 {
10617                         struct mlx5_hrxq *hrxq;
10618                         uint32_t hrxq_idx;
10619                         const struct rte_flow_action_rss *rss;
10620                         const uint8_t *rss_key;
10621
10622                         rss = sub_actions->conf;
10623                         memcpy(rss_desc->queue, rss->queue,
10624                                rss->queue_num * sizeof(uint16_t));
10625                         rss_desc->queue_num = rss->queue_num;
10626                         /* NULL RSS key indicates default RSS key. */
10627                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10628                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10629                         /*
10630                          * rss->level and rss.types should be set in advance
10631                          * when expanding items for RSS.
10632                          */
10633                         flow_dv_hashfields_set(dev_flow, rss_desc);
10634                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10635                                                     rss_desc, &hrxq_idx);
10636                         if (!hrxq)
10637                                 return rte_flow_error_set
10638                                         (error, rte_errno,
10639                                          RTE_FLOW_ERROR_TYPE_ACTION,
10640                                          NULL,
10641                                          "cannot create fate queue");
10642                         sample_act->dr_queue_action = hrxq->action;
10643                         sample_idx->rix_hrxq = hrxq_idx;
10644                         sample_actions[sample_act->actions_num++] =
10645                                                 hrxq->action;
10646                         (*num_of_dest)++;
10647                         action_flags |= MLX5_FLOW_ACTION_RSS;
10648                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10649                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10650                         dev_flow->handle->fate_action =
10651                                         MLX5_FLOW_FATE_QUEUE;
10652                         break;
10653                 }
10654                 case RTE_FLOW_ACTION_TYPE_MARK:
10655                 {
10656                         uint32_t tag_be = mlx5_flow_mark_set
10657                                 (((const struct rte_flow_action_mark *)
10658                                 (sub_actions->conf))->id);
10659
10660                         dev_flow->handle->mark = 1;
10661                         pre_rix = dev_flow->handle->dvh.rix_tag;
10662                         /* Save the mark resource before sample */
10663                         pre_r = dev_flow->dv.tag_resource;
10664                         if (flow_dv_tag_resource_register(dev, tag_be,
10665                                                   dev_flow, error))
10666                                 return -rte_errno;
10667                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10668                         sample_act->dr_tag_action =
10669                                 dev_flow->dv.tag_resource->action;
10670                         sample_idx->rix_tag =
10671                                 dev_flow->handle->dvh.rix_tag;
10672                         sample_actions[sample_act->actions_num++] =
10673                                                 sample_act->dr_tag_action;
10674                         /* Recover the mark resource after sample */
10675                         dev_flow->dv.tag_resource = pre_r;
10676                         dev_flow->handle->dvh.rix_tag = pre_rix;
10677                         action_flags |= MLX5_FLOW_ACTION_MARK;
10678                         break;
10679                 }
10680                 case RTE_FLOW_ACTION_TYPE_COUNT:
10681                 {
10682                         if (!flow->counter) {
10683                                 flow->counter =
10684                                         flow_dv_translate_create_counter(dev,
10685                                                 dev_flow, sub_actions->conf,
10686                                                 0);
10687                                 if (!flow->counter)
10688                                         return rte_flow_error_set
10689                                                 (error, rte_errno,
10690                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10691                                                 NULL,
10692                                                 "cannot create counter"
10693                                                 " object.");
10694                         }
10695                         sample_act->dr_cnt_action =
10696                                   (flow_dv_counter_get_by_idx(dev,
10697                                   flow->counter, NULL))->action;
10698                         sample_actions[sample_act->actions_num++] =
10699                                                 sample_act->dr_cnt_action;
10700                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10701                         break;
10702                 }
10703                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10704                 {
10705                         struct mlx5_flow_dv_port_id_action_resource
10706                                         port_id_resource;
10707                         uint32_t port_id = 0;
10708
10709                         memset(&port_id_resource, 0, sizeof(port_id_resource));
10710                         /* Save the port id resource before sample */
10711                         pre_rix = dev_flow->handle->rix_port_id_action;
10712                         pre_r = dev_flow->dv.port_id_action;
10713                         if (flow_dv_translate_action_port_id(dev, sub_actions,
10714                                                              &port_id, error))
10715                                 return -rte_errno;
10716                         port_id_resource.port_id = port_id;
10717                         if (flow_dv_port_id_action_resource_register
10718                             (dev, &port_id_resource, dev_flow, error))
10719                                 return -rte_errno;
10720                         sample_act->dr_port_id_action =
10721                                 dev_flow->dv.port_id_action->action;
10722                         sample_idx->rix_port_id_action =
10723                                 dev_flow->handle->rix_port_id_action;
10724                         sample_actions[sample_act->actions_num++] =
10725                                                 sample_act->dr_port_id_action;
10726                         /* Recover the port id resource after sample */
10727                         dev_flow->dv.port_id_action = pre_r;
10728                         dev_flow->handle->rix_port_id_action = pre_rix;
10729                         (*num_of_dest)++;
10730                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10731                         break;
10732                 }
10733                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10734                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10735                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10736                         /* Save the encap resource before sample */
10737                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
10738                         pre_r = dev_flow->dv.encap_decap;
10739                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
10740                                                            dev_flow,
10741                                                            attr->transfer,
10742                                                            error))
10743                                 return -rte_errno;
10744                         sample_act->dr_encap_action =
10745                                 dev_flow->dv.encap_decap->action;
10746                         sample_idx->rix_encap_decap =
10747                                 dev_flow->handle->dvh.rix_encap_decap;
10748                         sample_actions[sample_act->actions_num++] =
10749                                                 sample_act->dr_encap_action;
10750                         /* Recover the encap resource after sample */
10751                         dev_flow->dv.encap_decap = pre_r;
10752                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
10753                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10754                         break;
10755                 default:
10756                         return rte_flow_error_set(error, EINVAL,
10757                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10758                                 NULL,
10759                                 "Not support for sampler action");
10760                 }
10761         }
10762         sample_act->action_flags = action_flags;
10763         res->ft_id = dev_flow->dv.group;
10764         if (attr->transfer) {
10765                 union {
10766                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
10767                         uint64_t set_action;
10768                 } action_ctx = { .set_action = 0 };
10769
10770                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10771                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
10772                          MLX5_MODIFICATION_TYPE_SET);
10773                 MLX5_SET(set_action_in, action_ctx.action_in, field,
10774                          MLX5_MODI_META_REG_C_0);
10775                 MLX5_SET(set_action_in, action_ctx.action_in, data,
10776                          priv->vport_meta_tag);
10777                 res->set_action = action_ctx.set_action;
10778         } else if (attr->ingress) {
10779                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10780         } else {
10781                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
10782         }
10783         return 0;
10784 }
10785
10786 /**
10787  * Convert Sample action to DV specification.
10788  *
10789  * @param[in] dev
10790  *   Pointer to rte_eth_dev structure.
10791  * @param[in, out] dev_flow
10792  *   Pointer to the mlx5_flow.
10793  * @param[in] num_of_dest
10794  *   The num of destination.
10795  * @param[in, out] res
10796  *   Pointer to sample resource.
10797  * @param[in, out] mdest_res
10798  *   Pointer to destination array resource.
10799  * @param[in] sample_actions
10800  *   Pointer to sample path actions list.
10801  * @param[in] action_flags
10802  *   Holds the actions detected until now.
10803  * @param[out] error
10804  *   Pointer to the error structure.
10805  *
10806  * @return
10807  *   0 on success, a negative errno value otherwise and rte_errno is set.
10808  */
10809 static int
10810 flow_dv_create_action_sample(struct rte_eth_dev *dev,
10811                              struct mlx5_flow *dev_flow,
10812                              uint32_t num_of_dest,
10813                              struct mlx5_flow_dv_sample_resource *res,
10814                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
10815                              void **sample_actions,
10816                              uint64_t action_flags,
10817                              struct rte_flow_error *error)
10818 {
10819         /* update normal path action resource into last index of array */
10820         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
10821         struct mlx5_flow_sub_actions_list *sample_act =
10822                                         &mdest_res->sample_act[dest_index];
10823         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10824         struct mlx5_flow_rss_desc *rss_desc;
10825         uint32_t normal_idx = 0;
10826         struct mlx5_hrxq *hrxq;
10827         uint32_t hrxq_idx;
10828
10829         MLX5_ASSERT(wks);
10830         rss_desc = &wks->rss_desc;
10831         if (num_of_dest > 1) {
10832                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
10833                         /* Handle QP action for mirroring */
10834                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10835                                                     rss_desc, &hrxq_idx);
10836                         if (!hrxq)
10837                                 return rte_flow_error_set
10838                                      (error, rte_errno,
10839                                       RTE_FLOW_ERROR_TYPE_ACTION,
10840                                       NULL,
10841                                       "cannot create rx queue");
10842                         normal_idx++;
10843                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
10844                         sample_act->dr_queue_action = hrxq->action;
10845                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10846                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10847                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10848                 }
10849                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
10850                         normal_idx++;
10851                         mdest_res->sample_idx[dest_index].rix_encap_decap =
10852                                 dev_flow->handle->dvh.rix_encap_decap;
10853                         sample_act->dr_encap_action =
10854                                 dev_flow->dv.encap_decap->action;
10855                         dev_flow->handle->dvh.rix_encap_decap = 0;
10856                 }
10857                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
10858                         normal_idx++;
10859                         mdest_res->sample_idx[dest_index].rix_port_id_action =
10860                                 dev_flow->handle->rix_port_id_action;
10861                         sample_act->dr_port_id_action =
10862                                 dev_flow->dv.port_id_action->action;
10863                         dev_flow->handle->rix_port_id_action = 0;
10864                 }
10865                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
10866                         normal_idx++;
10867                         mdest_res->sample_idx[dest_index].rix_jump =
10868                                 dev_flow->handle->rix_jump;
10869                         sample_act->dr_jump_action =
10870                                 dev_flow->dv.jump->action;
10871                         dev_flow->handle->rix_jump = 0;
10872                 }
10873                 sample_act->actions_num = normal_idx;
10874                 /* update sample action resource into first index of array */
10875                 mdest_res->ft_type = res->ft_type;
10876                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
10877                                 sizeof(struct mlx5_flow_sub_actions_idx));
10878                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
10879                                 sizeof(struct mlx5_flow_sub_actions_list));
10880                 mdest_res->num_of_dest = num_of_dest;
10881                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
10882                                                          dev_flow, error))
10883                         return rte_flow_error_set(error, EINVAL,
10884                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10885                                                   NULL, "can't create sample "
10886                                                   "action");
10887         } else {
10888                 res->sub_actions = sample_actions;
10889                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
10890                         return rte_flow_error_set(error, EINVAL,
10891                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10892                                                   NULL,
10893                                                   "can't create sample action");
10894         }
10895         return 0;
10896 }
10897
10898 /**
10899  * Remove an ASO age action from age actions list.
10900  *
10901  * @param[in] dev
10902  *   Pointer to the Ethernet device structure.
10903  * @param[in] age
10904  *   Pointer to the aso age action handler.
10905  */
10906 static void
10907 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
10908                                 struct mlx5_aso_age_action *age)
10909 {
10910         struct mlx5_age_info *age_info;
10911         struct mlx5_age_param *age_param = &age->age_params;
10912         struct mlx5_priv *priv = dev->data->dev_private;
10913         uint16_t expected = AGE_CANDIDATE;
10914
10915         age_info = GET_PORT_AGE_INFO(priv);
10916         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
10917                                          AGE_FREE, false, __ATOMIC_RELAXED,
10918                                          __ATOMIC_RELAXED)) {
10919                 /**
10920                  * We need the lock even it is age timeout,
10921                  * since age action may still in process.
10922                  */
10923                 rte_spinlock_lock(&age_info->aged_sl);
10924                 LIST_REMOVE(age, next);
10925                 rte_spinlock_unlock(&age_info->aged_sl);
10926                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
10927         }
10928 }
10929
10930 /**
10931  * Release an ASO age action.
10932  *
10933  * @param[in] dev
10934  *   Pointer to the Ethernet device structure.
10935  * @param[in] age_idx
10936  *   Index of ASO age action to release.
10937  * @param[in] flow
10938  *   True if the release operation is during flow destroy operation.
10939  *   False if the release operation is during action destroy operation.
10940  *
10941  * @return
10942  *   0 when age action was removed, otherwise the number of references.
10943  */
10944 static int
10945 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
10946 {
10947         struct mlx5_priv *priv = dev->data->dev_private;
10948         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10949         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
10950         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
10951
10952         if (!ret) {
10953                 flow_dv_aso_age_remove_from_age(dev, age);
10954                 rte_spinlock_lock(&mng->free_sl);
10955                 LIST_INSERT_HEAD(&mng->free, age, next);
10956                 rte_spinlock_unlock(&mng->free_sl);
10957         }
10958         return ret;
10959 }
10960
10961 /**
10962  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
10963  *
10964  * @param[in] dev
10965  *   Pointer to the Ethernet device structure.
10966  *
10967  * @return
10968  *   0 on success, otherwise negative errno value and rte_errno is set.
10969  */
10970 static int
10971 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
10972 {
10973         struct mlx5_priv *priv = dev->data->dev_private;
10974         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10975         void *old_pools = mng->pools;
10976         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
10977         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
10978         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
10979
10980         if (!pools) {
10981                 rte_errno = ENOMEM;
10982                 return -ENOMEM;
10983         }
10984         if (old_pools) {
10985                 memcpy(pools, old_pools,
10986                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
10987                 mlx5_free(old_pools);
10988         } else {
10989                 /* First ASO flow hit allocation - starting ASO data-path. */
10990                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
10991
10992                 if (ret) {
10993                         mlx5_free(pools);
10994                         return ret;
10995                 }
10996         }
10997         mng->n = resize;
10998         mng->pools = pools;
10999         return 0;
11000 }
11001
11002 /**
11003  * Create and initialize a new ASO aging pool.
11004  *
11005  * @param[in] dev
11006  *   Pointer to the Ethernet device structure.
11007  * @param[out] age_free
11008  *   Where to put the pointer of a new age action.
11009  *
11010  * @return
11011  *   The age actions pool pointer and @p age_free is set on success,
11012  *   NULL otherwise and rte_errno is set.
11013  */
11014 static struct mlx5_aso_age_pool *
11015 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11016                         struct mlx5_aso_age_action **age_free)
11017 {
11018         struct mlx5_priv *priv = dev->data->dev_private;
11019         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11020         struct mlx5_aso_age_pool *pool = NULL;
11021         struct mlx5_devx_obj *obj = NULL;
11022         uint32_t i;
11023
11024         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11025                                                     priv->sh->pdn);
11026         if (!obj) {
11027                 rte_errno = ENODATA;
11028                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11029                 return NULL;
11030         }
11031         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11032         if (!pool) {
11033                 claim_zero(mlx5_devx_cmd_destroy(obj));
11034                 rte_errno = ENOMEM;
11035                 return NULL;
11036         }
11037         pool->flow_hit_aso_obj = obj;
11038         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11039         rte_spinlock_lock(&mng->resize_sl);
11040         pool->index = mng->next;
11041         /* Resize pools array if there is no room for the new pool in it. */
11042         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11043                 claim_zero(mlx5_devx_cmd_destroy(obj));
11044                 mlx5_free(pool);
11045                 rte_spinlock_unlock(&mng->resize_sl);
11046                 return NULL;
11047         }
11048         mng->pools[pool->index] = pool;
11049         mng->next++;
11050         rte_spinlock_unlock(&mng->resize_sl);
11051         /* Assign the first action in the new pool, the rest go to free list. */
11052         *age_free = &pool->actions[0];
11053         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11054                 pool->actions[i].offset = i;
11055                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11056         }
11057         return pool;
11058 }
11059
11060 /**
11061  * Allocate a ASO aging bit.
11062  *
11063  * @param[in] dev
11064  *   Pointer to the Ethernet device structure.
11065  * @param[out] error
11066  *   Pointer to the error structure.
11067  *
11068  * @return
11069  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11070  */
11071 static uint32_t
11072 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11073 {
11074         struct mlx5_priv *priv = dev->data->dev_private;
11075         const struct mlx5_aso_age_pool *pool;
11076         struct mlx5_aso_age_action *age_free = NULL;
11077         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11078
11079         MLX5_ASSERT(mng);
11080         /* Try to get the next free age action bit. */
11081         rte_spinlock_lock(&mng->free_sl);
11082         age_free = LIST_FIRST(&mng->free);
11083         if (age_free) {
11084                 LIST_REMOVE(age_free, next);
11085         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11086                 rte_spinlock_unlock(&mng->free_sl);
11087                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11088                                    NULL, "failed to create ASO age pool");
11089                 return 0; /* 0 is an error. */
11090         }
11091         rte_spinlock_unlock(&mng->free_sl);
11092         pool = container_of
11093           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11094                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11095                                                                        actions);
11096         if (!age_free->dr_action) {
11097                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11098                                                  error);
11099
11100                 if (reg_c < 0) {
11101                         rte_flow_error_set(error, rte_errno,
11102                                            RTE_FLOW_ERROR_TYPE_ACTION,
11103                                            NULL, "failed to get reg_c "
11104                                            "for ASO flow hit");
11105                         return 0; /* 0 is an error. */
11106                 }
11107 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11108                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11109                                 (priv->sh->rx_domain,
11110                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11111                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11112                                  (reg_c - REG_C_0));
11113 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11114                 if (!age_free->dr_action) {
11115                         rte_errno = errno;
11116                         rte_spinlock_lock(&mng->free_sl);
11117                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11118                         rte_spinlock_unlock(&mng->free_sl);
11119                         rte_flow_error_set(error, rte_errno,
11120                                            RTE_FLOW_ERROR_TYPE_ACTION,
11121                                            NULL, "failed to create ASO "
11122                                            "flow hit action");
11123                         return 0; /* 0 is an error. */
11124                 }
11125         }
11126         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11127         return pool->index | ((age_free->offset + 1) << 16);
11128 }
11129
11130 /**
11131  * Create a age action using ASO mechanism.
11132  *
11133  * @param[in] dev
11134  *   Pointer to rte_eth_dev structure.
11135  * @param[in] age
11136  *   Pointer to the aging action configuration.
11137  * @param[out] error
11138  *   Pointer to the error structure.
11139  *
11140  * @return
11141  *   Index to flow counter on success, 0 otherwise.
11142  */
11143 static uint32_t
11144 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
11145                                  const struct rte_flow_action_age *age,
11146                                  struct rte_flow_error *error)
11147 {
11148         uint32_t age_idx = 0;
11149         struct mlx5_aso_age_action *aso_age;
11150
11151         age_idx = flow_dv_aso_age_alloc(dev, error);
11152         if (!age_idx)
11153                 return 0;
11154         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11155         aso_age->age_params.context = age->context;
11156         aso_age->age_params.timeout = age->timeout;
11157         aso_age->age_params.port_id = dev->data->port_id;
11158         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11159                          __ATOMIC_RELAXED);
11160         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11161                          __ATOMIC_RELAXED);
11162         return age_idx;
11163 }
11164
11165 /**
11166  * Fill the flow with DV spec, lock free
11167  * (mutex should be acquired by caller).
11168  *
11169  * @param[in] dev
11170  *   Pointer to rte_eth_dev structure.
11171  * @param[in, out] dev_flow
11172  *   Pointer to the sub flow.
11173  * @param[in] attr
11174  *   Pointer to the flow attributes.
11175  * @param[in] items
11176  *   Pointer to the list of items.
11177  * @param[in] actions
11178  *   Pointer to the list of actions.
11179  * @param[out] error
11180  *   Pointer to the error structure.
11181  *
11182  * @return
11183  *   0 on success, a negative errno value otherwise and rte_errno is set.
11184  */
11185 static int
11186 flow_dv_translate(struct rte_eth_dev *dev,
11187                   struct mlx5_flow *dev_flow,
11188                   const struct rte_flow_attr *attr,
11189                   const struct rte_flow_item items[],
11190                   const struct rte_flow_action actions[],
11191                   struct rte_flow_error *error)
11192 {
11193         struct mlx5_priv *priv = dev->data->dev_private;
11194         struct mlx5_dev_config *dev_conf = &priv->config;
11195         struct rte_flow *flow = dev_flow->flow;
11196         struct mlx5_flow_handle *handle = dev_flow->handle;
11197         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11198         struct mlx5_flow_rss_desc *rss_desc;
11199         uint64_t item_flags = 0;
11200         uint64_t last_item = 0;
11201         uint64_t action_flags = 0;
11202         struct mlx5_flow_dv_matcher matcher = {
11203                 .mask = {
11204                         .size = sizeof(matcher.mask.buf) -
11205                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
11206                 },
11207         };
11208         int actions_n = 0;
11209         bool actions_end = false;
11210         union {
11211                 struct mlx5_flow_dv_modify_hdr_resource res;
11212                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
11213                             sizeof(struct mlx5_modification_cmd) *
11214                             (MLX5_MAX_MODIFY_NUM + 1)];
11215         } mhdr_dummy;
11216         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
11217         const struct rte_flow_action_count *count = NULL;
11218         const struct rte_flow_action_age *age = NULL;
11219         union flow_dv_attr flow_attr = { .attr = 0 };
11220         uint32_t tag_be;
11221         union mlx5_flow_tbl_key tbl_key;
11222         uint32_t modify_action_position = UINT32_MAX;
11223         void *match_mask = matcher.mask.buf;
11224         void *match_value = dev_flow->dv.value.buf;
11225         uint8_t next_protocol = 0xff;
11226         struct rte_vlan_hdr vlan = { 0 };
11227         struct mlx5_flow_dv_dest_array_resource mdest_res;
11228         struct mlx5_flow_dv_sample_resource sample_res;
11229         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11230         const struct rte_flow_action_sample *sample = NULL;
11231         struct mlx5_flow_sub_actions_list *sample_act;
11232         uint32_t sample_act_pos = UINT32_MAX;
11233         uint32_t num_of_dest = 0;
11234         int tmp_actions_n = 0;
11235         uint32_t table;
11236         int ret = 0;
11237         const struct mlx5_flow_tunnel *tunnel;
11238         struct flow_grp_info grp_info = {
11239                 .external = !!dev_flow->external,
11240                 .transfer = !!attr->transfer,
11241                 .fdb_def_rule = !!priv->fdb_def_rule,
11242                 .skip_scale = dev_flow->skip_scale &
11243                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
11244         };
11245
11246         if (!wks)
11247                 return rte_flow_error_set(error, ENOMEM,
11248                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11249                                           NULL,
11250                                           "failed to push flow workspace");
11251         rss_desc = &wks->rss_desc;
11252         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
11253         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
11254         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11255                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11256         /* update normal path action resource into last index of array */
11257         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
11258         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
11259                  flow_items_to_tunnel(items) :
11260                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
11261                  flow_actions_to_tunnel(actions) :
11262                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
11263         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11264                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11265         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
11266                                 (dev, tunnel, attr, items, actions);
11267         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
11268                                        &grp_info, error);
11269         if (ret)
11270                 return ret;
11271         dev_flow->dv.group = table;
11272         if (attr->transfer)
11273                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11274         /* number of actions must be set to 0 in case of dirty stack. */
11275         mhdr_res->actions_num = 0;
11276         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
11277                 /*
11278                  * do not add decap action if match rule drops packet
11279                  * HW rejects rules with decap & drop
11280                  *
11281                  * if tunnel match rule was inserted before matching tunnel set
11282                  * rule flow table used in the match rule must be registered.
11283                  * current implementation handles that in the
11284                  * flow_dv_match_register() at the function end.
11285                  */
11286                 bool add_decap = true;
11287                 const struct rte_flow_action *ptr = actions;
11288
11289                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
11290                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
11291                                 add_decap = false;
11292                                 break;
11293                         }
11294                 }
11295                 if (add_decap) {
11296                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11297                                                            attr->transfer,
11298                                                            error))
11299                                 return -rte_errno;
11300                         dev_flow->dv.actions[actions_n++] =
11301                                         dev_flow->dv.encap_decap->action;
11302                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11303                 }
11304         }
11305         for (; !actions_end ; actions++) {
11306                 const struct rte_flow_action_queue *queue;
11307                 const struct rte_flow_action_rss *rss;
11308                 const struct rte_flow_action *action = actions;
11309                 const uint8_t *rss_key;
11310                 struct mlx5_flow_tbl_resource *tbl;
11311                 struct mlx5_aso_age_action *age_act;
11312                 struct mlx5_flow_counter *cnt_act;
11313                 uint32_t port_id = 0;
11314                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
11315                 int action_type = actions->type;
11316                 const struct rte_flow_action *found_action = NULL;
11317                 uint32_t jump_group = 0;
11318
11319                 if (!mlx5_flow_os_action_supported(action_type))
11320                         return rte_flow_error_set(error, ENOTSUP,
11321                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11322                                                   actions,
11323                                                   "action not supported");
11324                 switch (action_type) {
11325                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
11326                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
11327                         break;
11328                 case RTE_FLOW_ACTION_TYPE_VOID:
11329                         break;
11330                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11331                         if (flow_dv_translate_action_port_id(dev, action,
11332                                                              &port_id, error))
11333                                 return -rte_errno;
11334                         port_id_resource.port_id = port_id;
11335                         MLX5_ASSERT(!handle->rix_port_id_action);
11336                         if (flow_dv_port_id_action_resource_register
11337                             (dev, &port_id_resource, dev_flow, error))
11338                                 return -rte_errno;
11339                         dev_flow->dv.actions[actions_n++] =
11340                                         dev_flow->dv.port_id_action->action;
11341                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11342                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
11343                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11344                         num_of_dest++;
11345                         break;
11346                 case RTE_FLOW_ACTION_TYPE_FLAG:
11347                         action_flags |= MLX5_FLOW_ACTION_FLAG;
11348                         dev_flow->handle->mark = 1;
11349                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11350                                 struct rte_flow_action_mark mark = {
11351                                         .id = MLX5_FLOW_MARK_DEFAULT,
11352                                 };
11353
11354                                 if (flow_dv_convert_action_mark(dev, &mark,
11355                                                                 mhdr_res,
11356                                                                 error))
11357                                         return -rte_errno;
11358                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11359                                 break;
11360                         }
11361                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
11362                         /*
11363                          * Only one FLAG or MARK is supported per device flow
11364                          * right now. So the pointer to the tag resource must be
11365                          * zero before the register process.
11366                          */
11367                         MLX5_ASSERT(!handle->dvh.rix_tag);
11368                         if (flow_dv_tag_resource_register(dev, tag_be,
11369                                                           dev_flow, error))
11370                                 return -rte_errno;
11371                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11372                         dev_flow->dv.actions[actions_n++] =
11373                                         dev_flow->dv.tag_resource->action;
11374                         break;
11375                 case RTE_FLOW_ACTION_TYPE_MARK:
11376                         action_flags |= MLX5_FLOW_ACTION_MARK;
11377                         dev_flow->handle->mark = 1;
11378                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11379                                 const struct rte_flow_action_mark *mark =
11380                                         (const struct rte_flow_action_mark *)
11381                                                 actions->conf;
11382
11383                                 if (flow_dv_convert_action_mark(dev, mark,
11384                                                                 mhdr_res,
11385                                                                 error))
11386                                         return -rte_errno;
11387                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11388                                 break;
11389                         }
11390                         /* Fall-through */
11391                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
11392                         /* Legacy (non-extensive) MARK action. */
11393                         tag_be = mlx5_flow_mark_set
11394                               (((const struct rte_flow_action_mark *)
11395                                (actions->conf))->id);
11396                         MLX5_ASSERT(!handle->dvh.rix_tag);
11397                         if (flow_dv_tag_resource_register(dev, tag_be,
11398                                                           dev_flow, error))
11399                                 return -rte_errno;
11400                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11401                         dev_flow->dv.actions[actions_n++] =
11402                                         dev_flow->dv.tag_resource->action;
11403                         break;
11404                 case RTE_FLOW_ACTION_TYPE_SET_META:
11405                         if (flow_dv_convert_action_set_meta
11406                                 (dev, mhdr_res, attr,
11407                                  (const struct rte_flow_action_set_meta *)
11408                                   actions->conf, error))
11409                                 return -rte_errno;
11410                         action_flags |= MLX5_FLOW_ACTION_SET_META;
11411                         break;
11412                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
11413                         if (flow_dv_convert_action_set_tag
11414                                 (dev, mhdr_res,
11415                                  (const struct rte_flow_action_set_tag *)
11416                                   actions->conf, error))
11417                                 return -rte_errno;
11418                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11419                         break;
11420                 case RTE_FLOW_ACTION_TYPE_DROP:
11421                         action_flags |= MLX5_FLOW_ACTION_DROP;
11422                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
11423                         break;
11424                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11425                         queue = actions->conf;
11426                         rss_desc->queue_num = 1;
11427                         rss_desc->queue[0] = queue->index;
11428                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11429                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11430                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
11431                         num_of_dest++;
11432                         break;
11433                 case RTE_FLOW_ACTION_TYPE_RSS:
11434                         rss = actions->conf;
11435                         memcpy(rss_desc->queue, rss->queue,
11436                                rss->queue_num * sizeof(uint16_t));
11437                         rss_desc->queue_num = rss->queue_num;
11438                         /* NULL RSS key indicates default RSS key. */
11439                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11440                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11441                         /*
11442                          * rss->level and rss.types should be set in advance
11443                          * when expanding items for RSS.
11444                          */
11445                         action_flags |= MLX5_FLOW_ACTION_RSS;
11446                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
11447                                 MLX5_FLOW_FATE_SHARED_RSS :
11448                                 MLX5_FLOW_FATE_QUEUE;
11449                         break;
11450                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
11451                         flow->age = (uint32_t)(uintptr_t)(action->conf);
11452                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
11453                         __atomic_fetch_add(&age_act->refcnt, 1,
11454                                            __ATOMIC_RELAXED);
11455                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
11456                         action_flags |= MLX5_FLOW_ACTION_AGE;
11457                         break;
11458                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
11459                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
11460                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
11461                                                              NULL);
11462                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
11463                                            __ATOMIC_RELAXED);
11464                         /* Save information first, will apply later. */
11465                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11466                         break;
11467                 case RTE_FLOW_ACTION_TYPE_AGE:
11468                         if (priv->sh->flow_hit_aso_en && attr->group) {
11469                                 /*
11470                                  * Create one shared age action, to be used
11471                                  * by all sub-flows.
11472                                  */
11473                                 if (!flow->age) {
11474                                         flow->age =
11475                                                 flow_dv_translate_create_aso_age
11476                                                         (dev, action->conf,
11477                                                          error);
11478                                         if (!flow->age)
11479                                                 return rte_flow_error_set
11480                                                 (error, rte_errno,
11481                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11482                                                  NULL,
11483                                                  "can't create ASO age action");
11484                                 }
11485                                 dev_flow->dv.actions[actions_n++] =
11486                                           (flow_aso_age_get_by_idx
11487                                                 (dev, flow->age))->dr_action;
11488                                 action_flags |= MLX5_FLOW_ACTION_AGE;
11489                                 break;
11490                         }
11491                         /* Fall-through */
11492                 case RTE_FLOW_ACTION_TYPE_COUNT:
11493                         if (!dev_conf->devx) {
11494                                 return rte_flow_error_set
11495                                               (error, ENOTSUP,
11496                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11497                                                NULL,
11498                                                "count action not supported");
11499                         }
11500                         /* Save information first, will apply later. */
11501                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
11502                                 count = action->conf;
11503                         else
11504                                 age = action->conf;
11505                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11506                         break;
11507                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
11508                         dev_flow->dv.actions[actions_n++] =
11509                                                 priv->sh->pop_vlan_action;
11510                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
11511                         break;
11512                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
11513                         if (!(action_flags &
11514                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
11515                                 flow_dev_get_vlan_info_from_items(items, &vlan);
11516                         vlan.eth_proto = rte_be_to_cpu_16
11517                              ((((const struct rte_flow_action_of_push_vlan *)
11518                                                    actions->conf)->ethertype));
11519                         found_action = mlx5_flow_find_action
11520                                         (actions + 1,
11521                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
11522                         if (found_action)
11523                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11524                         found_action = mlx5_flow_find_action
11525                                         (actions + 1,
11526                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
11527                         if (found_action)
11528                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11529                         if (flow_dv_create_action_push_vlan
11530                                             (dev, attr, &vlan, dev_flow, error))
11531                                 return -rte_errno;
11532                         dev_flow->dv.actions[actions_n++] =
11533                                         dev_flow->dv.push_vlan_res->action;
11534                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
11535                         break;
11536                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
11537                         /* of_vlan_push action handled this action */
11538                         MLX5_ASSERT(action_flags &
11539                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
11540                         break;
11541                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
11542                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
11543                                 break;
11544                         flow_dev_get_vlan_info_from_items(items, &vlan);
11545                         mlx5_update_vlan_vid_pcp(actions, &vlan);
11546                         /* If no VLAN push - this is a modify header action */
11547                         if (flow_dv_convert_action_modify_vlan_vid
11548                                                 (mhdr_res, actions, error))
11549                                 return -rte_errno;
11550                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
11551                         break;
11552                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11553                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11554                         if (flow_dv_create_action_l2_encap(dev, actions,
11555                                                            dev_flow,
11556                                                            attr->transfer,
11557                                                            error))
11558                                 return -rte_errno;
11559                         dev_flow->dv.actions[actions_n++] =
11560                                         dev_flow->dv.encap_decap->action;
11561                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11562                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11563                                 sample_act->action_flags |=
11564                                                         MLX5_FLOW_ACTION_ENCAP;
11565                         break;
11566                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
11567                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
11568                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11569                                                            attr->transfer,
11570                                                            error))
11571                                 return -rte_errno;
11572                         dev_flow->dv.actions[actions_n++] =
11573                                         dev_flow->dv.encap_decap->action;
11574                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11575                         break;
11576                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11577                         /* Handle encap with preceding decap. */
11578                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
11579                                 if (flow_dv_create_action_raw_encap
11580                                         (dev, actions, dev_flow, attr, error))
11581                                         return -rte_errno;
11582                                 dev_flow->dv.actions[actions_n++] =
11583                                         dev_flow->dv.encap_decap->action;
11584                         } else {
11585                                 /* Handle encap without preceding decap. */
11586                                 if (flow_dv_create_action_l2_encap
11587                                     (dev, actions, dev_flow, attr->transfer,
11588                                      error))
11589                                         return -rte_errno;
11590                                 dev_flow->dv.actions[actions_n++] =
11591                                         dev_flow->dv.encap_decap->action;
11592                         }
11593                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11594                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11595                                 sample_act->action_flags |=
11596                                                         MLX5_FLOW_ACTION_ENCAP;
11597                         break;
11598                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11599                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
11600                                 ;
11601                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11602                                 if (flow_dv_create_action_l2_decap
11603                                     (dev, dev_flow, attr->transfer, error))
11604                                         return -rte_errno;
11605                                 dev_flow->dv.actions[actions_n++] =
11606                                         dev_flow->dv.encap_decap->action;
11607                         }
11608                         /* If decap is followed by encap, handle it at encap. */
11609                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11610                         break;
11611                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
11612                         dev_flow->dv.actions[actions_n++] =
11613                                 (void *)(uintptr_t)action->conf;
11614                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11615                         break;
11616                 case RTE_FLOW_ACTION_TYPE_JUMP:
11617                         jump_group = ((const struct rte_flow_action_jump *)
11618                                                         action->conf)->group;
11619                         grp_info.std_tbl_fix = 0;
11620                         if (dev_flow->skip_scale &
11621                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
11622                                 grp_info.skip_scale = 1;
11623                         else
11624                                 grp_info.skip_scale = 0;
11625                         ret = mlx5_flow_group_to_table(dev, tunnel,
11626                                                        jump_group,
11627                                                        &table,
11628                                                        &grp_info, error);
11629                         if (ret)
11630                                 return ret;
11631                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
11632                                                        attr->transfer,
11633                                                        !!dev_flow->external,
11634                                                        tunnel, jump_group, 0,
11635                                                        0, error);
11636                         if (!tbl)
11637                                 return rte_flow_error_set
11638                                                 (error, errno,
11639                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11640                                                  NULL,
11641                                                  "cannot create jump action.");
11642                         if (flow_dv_jump_tbl_resource_register
11643                             (dev, tbl, dev_flow, error)) {
11644                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11645                                 return rte_flow_error_set
11646                                                 (error, errno,
11647                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11648                                                  NULL,
11649                                                  "cannot create jump action.");
11650                         }
11651                         dev_flow->dv.actions[actions_n++] =
11652                                         dev_flow->dv.jump->action;
11653                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11654                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
11655                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
11656                         num_of_dest++;
11657                         break;
11658                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
11659                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
11660                         if (flow_dv_convert_action_modify_mac
11661                                         (mhdr_res, actions, error))
11662                                 return -rte_errno;
11663                         action_flags |= actions->type ==
11664                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
11665                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
11666                                         MLX5_FLOW_ACTION_SET_MAC_DST;
11667                         break;
11668                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
11669                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
11670                         if (flow_dv_convert_action_modify_ipv4
11671                                         (mhdr_res, actions, error))
11672                                 return -rte_errno;
11673                         action_flags |= actions->type ==
11674                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
11675                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
11676                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
11677                         break;
11678                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
11679                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
11680                         if (flow_dv_convert_action_modify_ipv6
11681                                         (mhdr_res, actions, error))
11682                                 return -rte_errno;
11683                         action_flags |= actions->type ==
11684                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
11685                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
11686                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
11687                         break;
11688                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
11689                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
11690                         if (flow_dv_convert_action_modify_tp
11691                                         (mhdr_res, actions, items,
11692                                          &flow_attr, dev_flow, !!(action_flags &
11693                                          MLX5_FLOW_ACTION_DECAP), error))
11694                                 return -rte_errno;
11695                         action_flags |= actions->type ==
11696                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
11697                                         MLX5_FLOW_ACTION_SET_TP_SRC :
11698                                         MLX5_FLOW_ACTION_SET_TP_DST;
11699                         break;
11700                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
11701                         if (flow_dv_convert_action_modify_dec_ttl
11702                                         (mhdr_res, items, &flow_attr, dev_flow,
11703                                          !!(action_flags &
11704                                          MLX5_FLOW_ACTION_DECAP), error))
11705                                 return -rte_errno;
11706                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
11707                         break;
11708                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
11709                         if (flow_dv_convert_action_modify_ttl
11710                                         (mhdr_res, actions, items, &flow_attr,
11711                                          dev_flow, !!(action_flags &
11712                                          MLX5_FLOW_ACTION_DECAP), error))
11713                                 return -rte_errno;
11714                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
11715                         break;
11716                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
11717                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
11718                         if (flow_dv_convert_action_modify_tcp_seq
11719                                         (mhdr_res, actions, error))
11720                                 return -rte_errno;
11721                         action_flags |= actions->type ==
11722                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
11723                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
11724                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
11725                         break;
11726
11727                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
11728                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
11729                         if (flow_dv_convert_action_modify_tcp_ack
11730                                         (mhdr_res, actions, error))
11731                                 return -rte_errno;
11732                         action_flags |= actions->type ==
11733                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
11734                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
11735                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
11736                         break;
11737                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
11738                         if (flow_dv_convert_action_set_reg
11739                                         (mhdr_res, actions, error))
11740                                 return -rte_errno;
11741                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11742                         break;
11743                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
11744                         if (flow_dv_convert_action_copy_mreg
11745                                         (dev, mhdr_res, actions, error))
11746                                 return -rte_errno;
11747                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11748                         break;
11749                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
11750                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
11751                         dev_flow->handle->fate_action =
11752                                         MLX5_FLOW_FATE_DEFAULT_MISS;
11753                         break;
11754                 case RTE_FLOW_ACTION_TYPE_METER:
11755                         if (!wks->fm)
11756                                 return rte_flow_error_set(error, rte_errno,
11757                                         RTE_FLOW_ERROR_TYPE_ACTION,
11758                                         NULL, "Failed to get meter in flow.");
11759                         /* Set the meter action. */
11760                         dev_flow->dv.actions[actions_n++] =
11761                                 wks->fm->meter_action;
11762                         action_flags |= MLX5_FLOW_ACTION_METER;
11763                         break;
11764                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
11765                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
11766                                                               actions, error))
11767                                 return -rte_errno;
11768                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
11769                         break;
11770                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
11771                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
11772                                                               actions, error))
11773                                 return -rte_errno;
11774                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
11775                         break;
11776                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
11777                         sample_act_pos = actions_n;
11778                         sample = (const struct rte_flow_action_sample *)
11779                                  action->conf;
11780                         actions_n++;
11781                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
11782                         /* put encap action into group if work with port id */
11783                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
11784                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
11785                                 sample_act->action_flags |=
11786                                                         MLX5_FLOW_ACTION_ENCAP;
11787                         break;
11788                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
11789                         if (flow_dv_convert_action_modify_field
11790                                         (dev, mhdr_res, actions, attr, error))
11791                                 return -rte_errno;
11792                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
11793                         break;
11794                 case RTE_FLOW_ACTION_TYPE_END:
11795                         actions_end = true;
11796                         if (mhdr_res->actions_num) {
11797                                 /* create modify action if needed. */
11798                                 if (flow_dv_modify_hdr_resource_register
11799                                         (dev, mhdr_res, dev_flow, error))
11800                                         return -rte_errno;
11801                                 dev_flow->dv.actions[modify_action_position] =
11802                                         handle->dvh.modify_hdr->action;
11803                         }
11804                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
11805                                 /*
11806                                  * Create one count action, to be used
11807                                  * by all sub-flows.
11808                                  */
11809                                 if (!flow->counter) {
11810                                         flow->counter =
11811                                                 flow_dv_translate_create_counter
11812                                                         (dev, dev_flow, count,
11813                                                          age);
11814                                         if (!flow->counter)
11815                                                 return rte_flow_error_set
11816                                                 (error, rte_errno,
11817                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11818                                                  NULL, "cannot create counter"
11819                                                  " object.");
11820                                 }
11821                                 dev_flow->dv.actions[actions_n] =
11822                                           (flow_dv_counter_get_by_idx(dev,
11823                                           flow->counter, NULL))->action;
11824                                 actions_n++;
11825                         }
11826                 default:
11827                         break;
11828                 }
11829                 if (mhdr_res->actions_num &&
11830                     modify_action_position == UINT32_MAX)
11831                         modify_action_position = actions_n++;
11832         }
11833         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
11834                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
11835                 int item_type = items->type;
11836
11837                 if (!mlx5_flow_os_item_supported(item_type))
11838                         return rte_flow_error_set(error, ENOTSUP,
11839                                                   RTE_FLOW_ERROR_TYPE_ITEM,
11840                                                   NULL, "item not supported");
11841                 switch (item_type) {
11842                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
11843                         flow_dv_translate_item_port_id
11844                                 (dev, match_mask, match_value, items, attr);
11845                         last_item = MLX5_FLOW_ITEM_PORT_ID;
11846                         break;
11847                 case RTE_FLOW_ITEM_TYPE_ETH:
11848                         flow_dv_translate_item_eth(match_mask, match_value,
11849                                                    items, tunnel,
11850                                                    dev_flow->dv.group);
11851                         matcher.priority = action_flags &
11852                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
11853                                         !dev_flow->external ?
11854                                         MLX5_PRIORITY_MAP_L3 :
11855                                         MLX5_PRIORITY_MAP_L2;
11856                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
11857                                              MLX5_FLOW_LAYER_OUTER_L2;
11858                         break;
11859                 case RTE_FLOW_ITEM_TYPE_VLAN:
11860                         flow_dv_translate_item_vlan(dev_flow,
11861                                                     match_mask, match_value,
11862                                                     items, tunnel,
11863                                                     dev_flow->dv.group);
11864                         matcher.priority = MLX5_PRIORITY_MAP_L2;
11865                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
11866                                               MLX5_FLOW_LAYER_INNER_VLAN) :
11867                                              (MLX5_FLOW_LAYER_OUTER_L2 |
11868                                               MLX5_FLOW_LAYER_OUTER_VLAN);
11869                         break;
11870                 case RTE_FLOW_ITEM_TYPE_IPV4:
11871                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11872                                                   &item_flags, &tunnel);
11873                         flow_dv_translate_item_ipv4(match_mask, match_value,
11874                                                     items, tunnel,
11875                                                     dev_flow->dv.group);
11876                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11877                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
11878                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
11879                         if (items->mask != NULL &&
11880                             ((const struct rte_flow_item_ipv4 *)
11881                              items->mask)->hdr.next_proto_id) {
11882                                 next_protocol =
11883                                         ((const struct rte_flow_item_ipv4 *)
11884                                          (items->spec))->hdr.next_proto_id;
11885                                 next_protocol &=
11886                                         ((const struct rte_flow_item_ipv4 *)
11887                                          (items->mask))->hdr.next_proto_id;
11888                         } else {
11889                                 /* Reset for inner layer. */
11890                                 next_protocol = 0xff;
11891                         }
11892                         break;
11893                 case RTE_FLOW_ITEM_TYPE_IPV6:
11894                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11895                                                   &item_flags, &tunnel);
11896                         flow_dv_translate_item_ipv6(match_mask, match_value,
11897                                                     items, tunnel,
11898                                                     dev_flow->dv.group);
11899                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11900                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
11901                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
11902                         if (items->mask != NULL &&
11903                             ((const struct rte_flow_item_ipv6 *)
11904                              items->mask)->hdr.proto) {
11905                                 next_protocol =
11906                                         ((const struct rte_flow_item_ipv6 *)
11907                                          items->spec)->hdr.proto;
11908                                 next_protocol &=
11909                                         ((const struct rte_flow_item_ipv6 *)
11910                                          items->mask)->hdr.proto;
11911                         } else {
11912                                 /* Reset for inner layer. */
11913                                 next_protocol = 0xff;
11914                         }
11915                         break;
11916                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
11917                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
11918                                                              match_value,
11919                                                              items, tunnel);
11920                         last_item = tunnel ?
11921                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
11922                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
11923                         if (items->mask != NULL &&
11924                             ((const struct rte_flow_item_ipv6_frag_ext *)
11925                              items->mask)->hdr.next_header) {
11926                                 next_protocol =
11927                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11928                                  items->spec)->hdr.next_header;
11929                                 next_protocol &=
11930                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11931                                  items->mask)->hdr.next_header;
11932                         } else {
11933                                 /* Reset for inner layer. */
11934                                 next_protocol = 0xff;
11935                         }
11936                         break;
11937                 case RTE_FLOW_ITEM_TYPE_TCP:
11938                         flow_dv_translate_item_tcp(match_mask, match_value,
11939                                                    items, tunnel);
11940                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11941                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
11942                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
11943                         break;
11944                 case RTE_FLOW_ITEM_TYPE_UDP:
11945                         flow_dv_translate_item_udp(match_mask, match_value,
11946                                                    items, tunnel);
11947                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11948                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
11949                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
11950                         break;
11951                 case RTE_FLOW_ITEM_TYPE_GRE:
11952                         flow_dv_translate_item_gre(match_mask, match_value,
11953                                                    items, tunnel);
11954                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11955                         last_item = MLX5_FLOW_LAYER_GRE;
11956                         break;
11957                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
11958                         flow_dv_translate_item_gre_key(match_mask,
11959                                                        match_value, items);
11960                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
11961                         break;
11962                 case RTE_FLOW_ITEM_TYPE_NVGRE:
11963                         flow_dv_translate_item_nvgre(match_mask, match_value,
11964                                                      items, tunnel);
11965                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11966                         last_item = MLX5_FLOW_LAYER_GRE;
11967                         break;
11968                 case RTE_FLOW_ITEM_TYPE_VXLAN:
11969                         flow_dv_translate_item_vxlan(match_mask, match_value,
11970                                                      items, tunnel);
11971                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11972                         last_item = MLX5_FLOW_LAYER_VXLAN;
11973                         break;
11974                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
11975                         flow_dv_translate_item_vxlan_gpe(match_mask,
11976                                                          match_value, items,
11977                                                          tunnel);
11978                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11979                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
11980                         break;
11981                 case RTE_FLOW_ITEM_TYPE_GENEVE:
11982                         flow_dv_translate_item_geneve(match_mask, match_value,
11983                                                       items, tunnel);
11984                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11985                         last_item = MLX5_FLOW_LAYER_GENEVE;
11986                         break;
11987                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
11988                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
11989                                                           match_value,
11990                                                           items, error);
11991                         if (ret)
11992                                 return rte_flow_error_set(error, -ret,
11993                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11994                                         "cannot create GENEVE TLV option");
11995                         flow->geneve_tlv_option = 1;
11996                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
11997                         break;
11998                 case RTE_FLOW_ITEM_TYPE_MPLS:
11999                         flow_dv_translate_item_mpls(match_mask, match_value,
12000                                                     items, last_item, tunnel);
12001                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12002                         last_item = MLX5_FLOW_LAYER_MPLS;
12003                         break;
12004                 case RTE_FLOW_ITEM_TYPE_MARK:
12005                         flow_dv_translate_item_mark(dev, match_mask,
12006                                                     match_value, items);
12007                         last_item = MLX5_FLOW_ITEM_MARK;
12008                         break;
12009                 case RTE_FLOW_ITEM_TYPE_META:
12010                         flow_dv_translate_item_meta(dev, match_mask,
12011                                                     match_value, attr, items);
12012                         last_item = MLX5_FLOW_ITEM_METADATA;
12013                         break;
12014                 case RTE_FLOW_ITEM_TYPE_ICMP:
12015                         flow_dv_translate_item_icmp(match_mask, match_value,
12016                                                     items, tunnel);
12017                         last_item = MLX5_FLOW_LAYER_ICMP;
12018                         break;
12019                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12020                         flow_dv_translate_item_icmp6(match_mask, match_value,
12021                                                       items, tunnel);
12022                         last_item = MLX5_FLOW_LAYER_ICMP6;
12023                         break;
12024                 case RTE_FLOW_ITEM_TYPE_TAG:
12025                         flow_dv_translate_item_tag(dev, match_mask,
12026                                                    match_value, items);
12027                         last_item = MLX5_FLOW_ITEM_TAG;
12028                         break;
12029                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12030                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12031                                                         match_value, items);
12032                         last_item = MLX5_FLOW_ITEM_TAG;
12033                         break;
12034                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12035                         flow_dv_translate_item_tx_queue(dev, match_mask,
12036                                                         match_value,
12037                                                         items);
12038                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12039                         break;
12040                 case RTE_FLOW_ITEM_TYPE_GTP:
12041                         flow_dv_translate_item_gtp(match_mask, match_value,
12042                                                    items, tunnel);
12043                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12044                         last_item = MLX5_FLOW_LAYER_GTP;
12045                         break;
12046                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12047                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12048                                                           match_value,
12049                                                           items);
12050                         if (ret)
12051                                 return rte_flow_error_set(error, -ret,
12052                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12053                                         "cannot create GTP PSC item");
12054                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12055                         break;
12056                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12057                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12058                                 /* Create it only the first time to be used. */
12059                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12060                                 if (ret)
12061                                         return rte_flow_error_set
12062                                                 (error, -ret,
12063                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12064                                                 NULL,
12065                                                 "cannot create eCPRI parser");
12066                         }
12067                         /* Adjust the length matcher and device flow value. */
12068                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12069                         dev_flow->dv.value.size =
12070                                         MLX5_ST_SZ_BYTES(fte_match_param);
12071                         flow_dv_translate_item_ecpri(dev, match_mask,
12072                                                      match_value, items);
12073                         /* No other protocol should follow eCPRI layer. */
12074                         last_item = MLX5_FLOW_LAYER_ECPRI;
12075                         break;
12076                 default:
12077                         break;
12078                 }
12079                 item_flags |= last_item;
12080         }
12081         /*
12082          * When E-Switch mode is enabled, we have two cases where we need to
12083          * set the source port manually.
12084          * The first one, is in case of Nic steering rule, and the second is
12085          * E-Switch rule where no port_id item was found. In both cases
12086          * the source port is set according the current port in use.
12087          */
12088         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12089             (priv->representor || priv->master)) {
12090                 if (flow_dv_translate_item_port_id(dev, match_mask,
12091                                                    match_value, NULL, attr))
12092                         return -rte_errno;
12093         }
12094 #ifdef RTE_LIBRTE_MLX5_DEBUG
12095         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12096                                               dev_flow->dv.value.buf));
12097 #endif
12098         /*
12099          * Layers may be already initialized from prefix flow if this dev_flow
12100          * is the suffix flow.
12101          */
12102         handle->layers |= item_flags;
12103         if (action_flags & MLX5_FLOW_ACTION_RSS)
12104                 flow_dv_hashfields_set(dev_flow, rss_desc);
12105         /* If has RSS action in the sample action, the Sample/Mirror resource
12106          * should be registered after the hash filed be update.
12107          */
12108         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
12109                 ret = flow_dv_translate_action_sample(dev,
12110                                                       sample,
12111                                                       dev_flow, attr,
12112                                                       &num_of_dest,
12113                                                       sample_actions,
12114                                                       &sample_res,
12115                                                       error);
12116                 if (ret < 0)
12117                         return ret;
12118                 ret = flow_dv_create_action_sample(dev,
12119                                                    dev_flow,
12120                                                    num_of_dest,
12121                                                    &sample_res,
12122                                                    &mdest_res,
12123                                                    sample_actions,
12124                                                    action_flags,
12125                                                    error);
12126                 if (ret < 0)
12127                         return rte_flow_error_set
12128                                                 (error, rte_errno,
12129                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12130                                                 NULL,
12131                                                 "cannot create sample action");
12132                 if (num_of_dest > 1) {
12133                         dev_flow->dv.actions[sample_act_pos] =
12134                         dev_flow->dv.dest_array_res->action;
12135                 } else {
12136                         dev_flow->dv.actions[sample_act_pos] =
12137                         dev_flow->dv.sample_res->verbs_action;
12138                 }
12139         }
12140         /*
12141          * For multiple destination (sample action with ratio=1), the encap
12142          * action and port id action will be combined into group action.
12143          * So need remove the original these actions in the flow and only
12144          * use the sample action instead of.
12145          */
12146         if (num_of_dest > 1 &&
12147             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
12148                 int i;
12149                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12150
12151                 for (i = 0; i < actions_n; i++) {
12152                         if ((sample_act->dr_encap_action &&
12153                                 sample_act->dr_encap_action ==
12154                                 dev_flow->dv.actions[i]) ||
12155                                 (sample_act->dr_port_id_action &&
12156                                 sample_act->dr_port_id_action ==
12157                                 dev_flow->dv.actions[i]) ||
12158                                 (sample_act->dr_jump_action &&
12159                                 sample_act->dr_jump_action ==
12160                                 dev_flow->dv.actions[i]))
12161                                 continue;
12162                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
12163                 }
12164                 memcpy((void *)dev_flow->dv.actions,
12165                                 (void *)temp_actions,
12166                                 tmp_actions_n * sizeof(void *));
12167                 actions_n = tmp_actions_n;
12168         }
12169         dev_flow->dv.actions_n = actions_n;
12170         dev_flow->act_flags = action_flags;
12171         if (wks->skip_matcher_reg)
12172                 return 0;
12173         /* Register matcher. */
12174         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
12175                                     matcher.mask.size);
12176         matcher.priority = mlx5_get_matcher_priority(dev, attr,
12177                                         matcher.priority);
12178         /* reserved field no needs to be set to 0 here. */
12179         tbl_key.is_fdb = attr->transfer;
12180         tbl_key.is_egress = attr->egress;
12181         tbl_key.level = dev_flow->dv.group;
12182         tbl_key.id = dev_flow->dv.table_id;
12183         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
12184                                      tunnel, attr->group, error))
12185                 return -rte_errno;
12186         return 0;
12187 }
12188
12189 /**
12190  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12191  * and tunnel.
12192  *
12193  * @param[in, out] action
12194  *   Shred RSS action holding hash RX queue objects.
12195  * @param[in] hash_fields
12196  *   Defines combination of packet fields to participate in RX hash.
12197  * @param[in] tunnel
12198  *   Tunnel type
12199  * @param[in] hrxq_idx
12200  *   Hash RX queue index to set.
12201  *
12202  * @return
12203  *   0 on success, otherwise negative errno value.
12204  */
12205 static int
12206 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
12207                               const uint64_t hash_fields,
12208                               uint32_t hrxq_idx)
12209 {
12210         uint32_t *hrxqs = action->hrxq;
12211
12212         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12213         case MLX5_RSS_HASH_IPV4:
12214                 /* fall-through. */
12215         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12216                 /* fall-through. */
12217         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12218                 hrxqs[0] = hrxq_idx;
12219                 return 0;
12220         case MLX5_RSS_HASH_IPV4_TCP:
12221                 /* fall-through. */
12222         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12223                 /* fall-through. */
12224         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12225                 hrxqs[1] = hrxq_idx;
12226                 return 0;
12227         case MLX5_RSS_HASH_IPV4_UDP:
12228                 /* fall-through. */
12229         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12230                 /* fall-through. */
12231         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12232                 hrxqs[2] = hrxq_idx;
12233                 return 0;
12234         case MLX5_RSS_HASH_IPV6:
12235                 /* fall-through. */
12236         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12237                 /* fall-through. */
12238         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12239                 hrxqs[3] = hrxq_idx;
12240                 return 0;
12241         case MLX5_RSS_HASH_IPV6_TCP:
12242                 /* fall-through. */
12243         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12244                 /* fall-through. */
12245         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12246                 hrxqs[4] = hrxq_idx;
12247                 return 0;
12248         case MLX5_RSS_HASH_IPV6_UDP:
12249                 /* fall-through. */
12250         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12251                 /* fall-through. */
12252         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12253                 hrxqs[5] = hrxq_idx;
12254                 return 0;
12255         case MLX5_RSS_HASH_NONE:
12256                 hrxqs[6] = hrxq_idx;
12257                 return 0;
12258         default:
12259                 return -1;
12260         }
12261 }
12262
12263 /**
12264  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12265  * and tunnel.
12266  *
12267  * @param[in] dev
12268  *   Pointer to the Ethernet device structure.
12269  * @param[in] idx
12270  *   Shared RSS action ID holding hash RX queue objects.
12271  * @param[in] hash_fields
12272  *   Defines combination of packet fields to participate in RX hash.
12273  * @param[in] tunnel
12274  *   Tunnel type
12275  *
12276  * @return
12277  *   Valid hash RX queue index, otherwise 0.
12278  */
12279 static uint32_t
12280 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
12281                                  const uint64_t hash_fields)
12282 {
12283         struct mlx5_priv *priv = dev->data->dev_private;
12284         struct mlx5_shared_action_rss *shared_rss =
12285             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12286         const uint32_t *hrxqs = shared_rss->hrxq;
12287
12288         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12289         case MLX5_RSS_HASH_IPV4:
12290                 /* fall-through. */
12291         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12292                 /* fall-through. */
12293         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12294                 return hrxqs[0];
12295         case MLX5_RSS_HASH_IPV4_TCP:
12296                 /* fall-through. */
12297         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12298                 /* fall-through. */
12299         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12300                 return hrxqs[1];
12301         case MLX5_RSS_HASH_IPV4_UDP:
12302                 /* fall-through. */
12303         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12304                 /* fall-through. */
12305         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12306                 return hrxqs[2];
12307         case MLX5_RSS_HASH_IPV6:
12308                 /* fall-through. */
12309         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12310                 /* fall-through. */
12311         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12312                 return hrxqs[3];
12313         case MLX5_RSS_HASH_IPV6_TCP:
12314                 /* fall-through. */
12315         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12316                 /* fall-through. */
12317         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12318                 return hrxqs[4];
12319         case MLX5_RSS_HASH_IPV6_UDP:
12320                 /* fall-through. */
12321         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12322                 /* fall-through. */
12323         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12324                 return hrxqs[5];
12325         case MLX5_RSS_HASH_NONE:
12326                 return hrxqs[6];
12327         default:
12328                 return 0;
12329         }
12330
12331 }
12332
12333 /**
12334  * Apply the flow to the NIC, lock free,
12335  * (mutex should be acquired by caller).
12336  *
12337  * @param[in] dev
12338  *   Pointer to the Ethernet device structure.
12339  * @param[in, out] flow
12340  *   Pointer to flow structure.
12341  * @param[out] error
12342  *   Pointer to error structure.
12343  *
12344  * @return
12345  *   0 on success, a negative errno value otherwise and rte_errno is set.
12346  */
12347 static int
12348 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
12349               struct rte_flow_error *error)
12350 {
12351         struct mlx5_flow_dv_workspace *dv;
12352         struct mlx5_flow_handle *dh;
12353         struct mlx5_flow_handle_dv *dv_h;
12354         struct mlx5_flow *dev_flow;
12355         struct mlx5_priv *priv = dev->data->dev_private;
12356         uint32_t handle_idx;
12357         int n;
12358         int err;
12359         int idx;
12360         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12361         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
12362
12363         MLX5_ASSERT(wks);
12364         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
12365                 dev_flow = &wks->flows[idx];
12366                 dv = &dev_flow->dv;
12367                 dh = dev_flow->handle;
12368                 dv_h = &dh->dvh;
12369                 n = dv->actions_n;
12370                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
12371                         if (dv->transfer) {
12372                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12373                                 dv->actions[n++] = priv->sh->dr_drop_action;
12374                         } else {
12375 #ifdef HAVE_MLX5DV_DR
12376                                 /* DR supports drop action placeholder. */
12377                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12378                                 dv->actions[n++] = priv->sh->dr_drop_action;
12379 #else
12380                                 /* For DV we use the explicit drop queue. */
12381                                 MLX5_ASSERT(priv->drop_queue.hrxq);
12382                                 dv->actions[n++] =
12383                                                 priv->drop_queue.hrxq->action;
12384 #endif
12385                         }
12386                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
12387                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
12388                         struct mlx5_hrxq *hrxq;
12389                         uint32_t hrxq_idx;
12390
12391                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
12392                                                     &hrxq_idx);
12393                         if (!hrxq) {
12394                                 rte_flow_error_set
12395                                         (error, rte_errno,
12396                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12397                                          "cannot get hash queue");
12398                                 goto error;
12399                         }
12400                         dh->rix_hrxq = hrxq_idx;
12401                         dv->actions[n++] = hrxq->action;
12402                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12403                         struct mlx5_hrxq *hrxq = NULL;
12404                         uint32_t hrxq_idx;
12405
12406                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
12407                                                 rss_desc->shared_rss,
12408                                                 dev_flow->hash_fields);
12409                         if (hrxq_idx)
12410                                 hrxq = mlx5_ipool_get
12411                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
12412                                          hrxq_idx);
12413                         if (!hrxq) {
12414                                 rte_flow_error_set
12415                                         (error, rte_errno,
12416                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12417                                          "cannot get hash queue");
12418                                 goto error;
12419                         }
12420                         dh->rix_srss = rss_desc->shared_rss;
12421                         dv->actions[n++] = hrxq->action;
12422                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
12423                         if (!priv->sh->default_miss_action) {
12424                                 rte_flow_error_set
12425                                         (error, rte_errno,
12426                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12427                                          "default miss action not be created.");
12428                                 goto error;
12429                         }
12430                         dv->actions[n++] = priv->sh->default_miss_action;
12431                 }
12432                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
12433                                                (void *)&dv->value, n,
12434                                                dv->actions, &dh->drv_flow);
12435                 if (err) {
12436                         rte_flow_error_set(error, errno,
12437                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12438                                            NULL,
12439                                            "hardware refuses to create flow");
12440                         goto error;
12441                 }
12442                 if (priv->vmwa_context &&
12443                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
12444                         /*
12445                          * The rule contains the VLAN pattern.
12446                          * For VF we are going to create VLAN
12447                          * interface to make hypervisor set correct
12448                          * e-Switch vport context.
12449                          */
12450                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
12451                 }
12452         }
12453         return 0;
12454 error:
12455         err = rte_errno; /* Save rte_errno before cleanup. */
12456         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
12457                        handle_idx, dh, next) {
12458                 /* hrxq is union, don't clear it if the flag is not set. */
12459                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
12460                         mlx5_hrxq_release(dev, dh->rix_hrxq);
12461                         dh->rix_hrxq = 0;
12462                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12463                         dh->rix_srss = 0;
12464                 }
12465                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12466                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12467         }
12468         rte_errno = err; /* Restore rte_errno. */
12469         return -rte_errno;
12470 }
12471
12472 void
12473 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
12474                           struct mlx5_cache_entry *entry)
12475 {
12476         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
12477                                                           entry);
12478
12479         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
12480         mlx5_free(cache);
12481 }
12482
12483 /**
12484  * Release the flow matcher.
12485  *
12486  * @param dev
12487  *   Pointer to Ethernet device.
12488  * @param port_id
12489  *   Index to port ID action resource.
12490  *
12491  * @return
12492  *   1 while a reference on it exists, 0 when freed.
12493  */
12494 static int
12495 flow_dv_matcher_release(struct rte_eth_dev *dev,
12496                         struct mlx5_flow_handle *handle)
12497 {
12498         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
12499         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
12500                                                             typeof(*tbl), tbl);
12501         int ret;
12502
12503         MLX5_ASSERT(matcher->matcher_object);
12504         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
12505         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
12506         return ret;
12507 }
12508
12509 /**
12510  * Release encap_decap resource.
12511  *
12512  * @param list
12513  *   Pointer to the hash list.
12514  * @param entry
12515  *   Pointer to exist resource entry object.
12516  */
12517 void
12518 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
12519                               struct mlx5_hlist_entry *entry)
12520 {
12521         struct mlx5_dev_ctx_shared *sh = list->ctx;
12522         struct mlx5_flow_dv_encap_decap_resource *res =
12523                 container_of(entry, typeof(*res), entry);
12524
12525         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12526         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
12527 }
12528
12529 /**
12530  * Release an encap/decap resource.
12531  *
12532  * @param dev
12533  *   Pointer to Ethernet device.
12534  * @param encap_decap_idx
12535  *   Index of encap decap resource.
12536  *
12537  * @return
12538  *   1 while a reference on it exists, 0 when freed.
12539  */
12540 static int
12541 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
12542                                      uint32_t encap_decap_idx)
12543 {
12544         struct mlx5_priv *priv = dev->data->dev_private;
12545         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
12546
12547         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
12548                                         encap_decap_idx);
12549         if (!cache_resource)
12550                 return 0;
12551         MLX5_ASSERT(cache_resource->action);
12552         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
12553                                      &cache_resource->entry);
12554 }
12555
12556 /**
12557  * Release an jump to table action resource.
12558  *
12559  * @param dev
12560  *   Pointer to Ethernet device.
12561  * @param rix_jump
12562  *   Index to the jump action resource.
12563  *
12564  * @return
12565  *   1 while a reference on it exists, 0 when freed.
12566  */
12567 static int
12568 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
12569                                   uint32_t rix_jump)
12570 {
12571         struct mlx5_priv *priv = dev->data->dev_private;
12572         struct mlx5_flow_tbl_data_entry *tbl_data;
12573
12574         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
12575                                   rix_jump);
12576         if (!tbl_data)
12577                 return 0;
12578         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
12579 }
12580
12581 void
12582 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
12583                          struct mlx5_hlist_entry *entry)
12584 {
12585         struct mlx5_flow_dv_modify_hdr_resource *res =
12586                 container_of(entry, typeof(*res), entry);
12587
12588         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12589         mlx5_free(entry);
12590 }
12591
12592 /**
12593  * Release a modify-header resource.
12594  *
12595  * @param dev
12596  *   Pointer to Ethernet device.
12597  * @param handle
12598  *   Pointer to mlx5_flow_handle.
12599  *
12600  * @return
12601  *   1 while a reference on it exists, 0 when freed.
12602  */
12603 static int
12604 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
12605                                     struct mlx5_flow_handle *handle)
12606 {
12607         struct mlx5_priv *priv = dev->data->dev_private;
12608         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
12609
12610         MLX5_ASSERT(entry->action);
12611         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
12612 }
12613
12614 void
12615 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
12616                           struct mlx5_cache_entry *entry)
12617 {
12618         struct mlx5_dev_ctx_shared *sh = list->ctx;
12619         struct mlx5_flow_dv_port_id_action_resource *cache =
12620                         container_of(entry, typeof(*cache), entry);
12621
12622         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12623         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
12624 }
12625
12626 /**
12627  * Release port ID action resource.
12628  *
12629  * @param dev
12630  *   Pointer to Ethernet device.
12631  * @param handle
12632  *   Pointer to mlx5_flow_handle.
12633  *
12634  * @return
12635  *   1 while a reference on it exists, 0 when freed.
12636  */
12637 static int
12638 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
12639                                         uint32_t port_id)
12640 {
12641         struct mlx5_priv *priv = dev->data->dev_private;
12642         struct mlx5_flow_dv_port_id_action_resource *cache;
12643
12644         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
12645         if (!cache)
12646                 return 0;
12647         MLX5_ASSERT(cache->action);
12648         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
12649                                      &cache->entry);
12650 }
12651
12652 /**
12653  * Release shared RSS action resource.
12654  *
12655  * @param dev
12656  *   Pointer to Ethernet device.
12657  * @param srss
12658  *   Shared RSS action index.
12659  */
12660 static void
12661 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
12662 {
12663         struct mlx5_priv *priv = dev->data->dev_private;
12664         struct mlx5_shared_action_rss *shared_rss;
12665
12666         shared_rss = mlx5_ipool_get
12667                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
12668         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12669 }
12670
12671 void
12672 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
12673                             struct mlx5_cache_entry *entry)
12674 {
12675         struct mlx5_dev_ctx_shared *sh = list->ctx;
12676         struct mlx5_flow_dv_push_vlan_action_resource *cache =
12677                         container_of(entry, typeof(*cache), entry);
12678
12679         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12680         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
12681 }
12682
12683 /**
12684  * Release push vlan action resource.
12685  *
12686  * @param dev
12687  *   Pointer to Ethernet device.
12688  * @param handle
12689  *   Pointer to mlx5_flow_handle.
12690  *
12691  * @return
12692  *   1 while a reference on it exists, 0 when freed.
12693  */
12694 static int
12695 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
12696                                           struct mlx5_flow_handle *handle)
12697 {
12698         struct mlx5_priv *priv = dev->data->dev_private;
12699         struct mlx5_flow_dv_push_vlan_action_resource *cache;
12700         uint32_t idx = handle->dvh.rix_push_vlan;
12701
12702         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
12703         if (!cache)
12704                 return 0;
12705         MLX5_ASSERT(cache->action);
12706         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
12707                                      &cache->entry);
12708 }
12709
12710 /**
12711  * Release the fate resource.
12712  *
12713  * @param dev
12714  *   Pointer to Ethernet device.
12715  * @param handle
12716  *   Pointer to mlx5_flow_handle.
12717  */
12718 static void
12719 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
12720                                struct mlx5_flow_handle *handle)
12721 {
12722         if (!handle->rix_fate)
12723                 return;
12724         switch (handle->fate_action) {
12725         case MLX5_FLOW_FATE_QUEUE:
12726                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
12727                         mlx5_hrxq_release(dev, handle->rix_hrxq);
12728                 break;
12729         case MLX5_FLOW_FATE_JUMP:
12730                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
12731                 break;
12732         case MLX5_FLOW_FATE_PORT_ID:
12733                 flow_dv_port_id_action_resource_release(dev,
12734                                 handle->rix_port_id_action);
12735                 break;
12736         default:
12737                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
12738                 break;
12739         }
12740         handle->rix_fate = 0;
12741 }
12742
12743 void
12744 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
12745                          struct mlx5_cache_entry *entry)
12746 {
12747         struct mlx5_flow_dv_sample_resource *cache_resource =
12748                         container_of(entry, typeof(*cache_resource), entry);
12749         struct rte_eth_dev *dev = cache_resource->dev;
12750         struct mlx5_priv *priv = dev->data->dev_private;
12751
12752         if (cache_resource->verbs_action)
12753                 claim_zero(mlx5_flow_os_destroy_flow_action
12754                                 (cache_resource->verbs_action));
12755         if (cache_resource->normal_path_tbl)
12756                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12757                         cache_resource->normal_path_tbl);
12758         flow_dv_sample_sub_actions_release(dev,
12759                                 &cache_resource->sample_idx);
12760         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12761                         cache_resource->idx);
12762         DRV_LOG(DEBUG, "sample resource %p: removed",
12763                 (void *)cache_resource);
12764 }
12765
12766 /**
12767  * Release an sample resource.
12768  *
12769  * @param dev
12770  *   Pointer to Ethernet device.
12771  * @param handle
12772  *   Pointer to mlx5_flow_handle.
12773  *
12774  * @return
12775  *   1 while a reference on it exists, 0 when freed.
12776  */
12777 static int
12778 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
12779                                      struct mlx5_flow_handle *handle)
12780 {
12781         struct mlx5_priv *priv = dev->data->dev_private;
12782         struct mlx5_flow_dv_sample_resource *cache_resource;
12783
12784         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12785                          handle->dvh.rix_sample);
12786         if (!cache_resource)
12787                 return 0;
12788         MLX5_ASSERT(cache_resource->verbs_action);
12789         return mlx5_cache_unregister(&priv->sh->sample_action_list,
12790                                      &cache_resource->entry);
12791 }
12792
12793 void
12794 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
12795                              struct mlx5_cache_entry *entry)
12796 {
12797         struct mlx5_flow_dv_dest_array_resource *cache_resource =
12798                         container_of(entry, typeof(*cache_resource), entry);
12799         struct rte_eth_dev *dev = cache_resource->dev;
12800         struct mlx5_priv *priv = dev->data->dev_private;
12801         uint32_t i = 0;
12802
12803         MLX5_ASSERT(cache_resource->action);
12804         if (cache_resource->action)
12805                 claim_zero(mlx5_flow_os_destroy_flow_action
12806                                         (cache_resource->action));
12807         for (; i < cache_resource->num_of_dest; i++)
12808                 flow_dv_sample_sub_actions_release(dev,
12809                                 &cache_resource->sample_idx[i]);
12810         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12811                         cache_resource->idx);
12812         DRV_LOG(DEBUG, "destination array resource %p: removed",
12813                 (void *)cache_resource);
12814 }
12815
12816 /**
12817  * Release an destination array resource.
12818  *
12819  * @param dev
12820  *   Pointer to Ethernet device.
12821  * @param handle
12822  *   Pointer to mlx5_flow_handle.
12823  *
12824  * @return
12825  *   1 while a reference on it exists, 0 when freed.
12826  */
12827 static int
12828 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
12829                                     struct mlx5_flow_handle *handle)
12830 {
12831         struct mlx5_priv *priv = dev->data->dev_private;
12832         struct mlx5_flow_dv_dest_array_resource *cache;
12833
12834         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12835                                handle->dvh.rix_dest_array);
12836         if (!cache)
12837                 return 0;
12838         MLX5_ASSERT(cache->action);
12839         return mlx5_cache_unregister(&priv->sh->dest_array_list,
12840                                      &cache->entry);
12841 }
12842
12843 static void
12844 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
12845 {
12846         struct mlx5_priv *priv = dev->data->dev_private;
12847         struct mlx5_dev_ctx_shared *sh = priv->sh;
12848         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
12849                                 sh->geneve_tlv_option_resource;
12850         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
12851         if (geneve_opt_resource) {
12852                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
12853                                          __ATOMIC_RELAXED))) {
12854                         claim_zero(mlx5_devx_cmd_destroy
12855                                         (geneve_opt_resource->obj));
12856                         mlx5_free(sh->geneve_tlv_option_resource);
12857                         sh->geneve_tlv_option_resource = NULL;
12858                 }
12859         }
12860         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
12861 }
12862
12863 /**
12864  * Remove the flow from the NIC but keeps it in memory.
12865  * Lock free, (mutex should be acquired by caller).
12866  *
12867  * @param[in] dev
12868  *   Pointer to Ethernet device.
12869  * @param[in, out] flow
12870  *   Pointer to flow structure.
12871  */
12872 static void
12873 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12874 {
12875         struct mlx5_flow_handle *dh;
12876         uint32_t handle_idx;
12877         struct mlx5_priv *priv = dev->data->dev_private;
12878
12879         if (!flow)
12880                 return;
12881         handle_idx = flow->dev_handles;
12882         while (handle_idx) {
12883                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12884                                     handle_idx);
12885                 if (!dh)
12886                         return;
12887                 if (dh->drv_flow) {
12888                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
12889                         dh->drv_flow = NULL;
12890                 }
12891                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
12892                         flow_dv_fate_resource_release(dev, dh);
12893                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12894                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12895                 handle_idx = dh->next.next;
12896         }
12897 }
12898
12899 /**
12900  * Remove the flow from the NIC and the memory.
12901  * Lock free, (mutex should be acquired by caller).
12902  *
12903  * @param[in] dev
12904  *   Pointer to the Ethernet device structure.
12905  * @param[in, out] flow
12906  *   Pointer to flow structure.
12907  */
12908 static void
12909 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12910 {
12911         struct mlx5_flow_handle *dev_handle;
12912         struct mlx5_priv *priv = dev->data->dev_private;
12913         struct mlx5_flow_meter_info *fm = NULL;
12914         uint32_t srss = 0;
12915
12916         if (!flow)
12917                 return;
12918         flow_dv_remove(dev, flow);
12919         if (flow->counter) {
12920                 flow_dv_counter_free(dev, flow->counter);
12921                 flow->counter = 0;
12922         }
12923         if (flow->meter) {
12924                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
12925                 if (fm)
12926                         mlx5_flow_meter_detach(priv, fm);
12927                 flow->meter = 0;
12928         }
12929         if (flow->age)
12930                 flow_dv_aso_age_release(dev, flow->age);
12931         if (flow->geneve_tlv_option) {
12932                 flow_dv_geneve_tlv_option_resource_release(dev);
12933                 flow->geneve_tlv_option = 0;
12934         }
12935         while (flow->dev_handles) {
12936                 uint32_t tmp_idx = flow->dev_handles;
12937
12938                 dev_handle = mlx5_ipool_get(priv->sh->ipool
12939                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
12940                 if (!dev_handle)
12941                         return;
12942                 flow->dev_handles = dev_handle->next.next;
12943                 if (dev_handle->dvh.matcher)
12944                         flow_dv_matcher_release(dev, dev_handle);
12945                 if (dev_handle->dvh.rix_sample)
12946                         flow_dv_sample_resource_release(dev, dev_handle);
12947                 if (dev_handle->dvh.rix_dest_array)
12948                         flow_dv_dest_array_resource_release(dev, dev_handle);
12949                 if (dev_handle->dvh.rix_encap_decap)
12950                         flow_dv_encap_decap_resource_release(dev,
12951                                 dev_handle->dvh.rix_encap_decap);
12952                 if (dev_handle->dvh.modify_hdr)
12953                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
12954                 if (dev_handle->dvh.rix_push_vlan)
12955                         flow_dv_push_vlan_action_resource_release(dev,
12956                                                                   dev_handle);
12957                 if (dev_handle->dvh.rix_tag)
12958                         flow_dv_tag_release(dev,
12959                                             dev_handle->dvh.rix_tag);
12960                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
12961                         flow_dv_fate_resource_release(dev, dev_handle);
12962                 else if (!srss)
12963                         srss = dev_handle->rix_srss;
12964                 if (fm && dev_handle->is_meter_flow_id &&
12965                     dev_handle->split_flow_id)
12966                         mlx5_ipool_free(fm->flow_ipool,
12967                                         dev_handle->split_flow_id);
12968                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12969                            tmp_idx);
12970         }
12971         if (srss)
12972                 flow_dv_shared_rss_action_release(dev, srss);
12973 }
12974
12975 /**
12976  * Release array of hash RX queue objects.
12977  * Helper function.
12978  *
12979  * @param[in] dev
12980  *   Pointer to the Ethernet device structure.
12981  * @param[in, out] hrxqs
12982  *   Array of hash RX queue objects.
12983  *
12984  * @return
12985  *   Total number of references to hash RX queue objects in *hrxqs* array
12986  *   after this operation.
12987  */
12988 static int
12989 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
12990                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
12991 {
12992         size_t i;
12993         int remaining = 0;
12994
12995         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
12996                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
12997
12998                 if (!ret)
12999                         (*hrxqs)[i] = 0;
13000                 remaining += ret;
13001         }
13002         return remaining;
13003 }
13004
13005 /**
13006  * Release all hash RX queue objects representing shared RSS action.
13007  *
13008  * @param[in] dev
13009  *   Pointer to the Ethernet device structure.
13010  * @param[in, out] action
13011  *   Shared RSS action to remove hash RX queue objects from.
13012  *
13013  * @return
13014  *   Total number of references to hash RX queue objects stored in *action*
13015  *   after this operation.
13016  *   Expected to be 0 if no external references held.
13017  */
13018 static int
13019 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13020                                  struct mlx5_shared_action_rss *shared_rss)
13021 {
13022         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13023 }
13024
13025 /**
13026  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13027  * user input.
13028  *
13029  * Only one hash value is available for one L3+L4 combination:
13030  * for example:
13031  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13032  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13033  * same slot in mlx5_rss_hash_fields.
13034  *
13035  * @param[in] rss
13036  *   Pointer to the shared action RSS conf.
13037  * @param[in, out] hash_field
13038  *   hash_field variable needed to be adjusted.
13039  *
13040  * @return
13041  *   void
13042  */
13043 static void
13044 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13045                                      uint64_t *hash_field)
13046 {
13047         uint64_t rss_types = rss->origin.types;
13048
13049         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13050         case MLX5_RSS_HASH_IPV4:
13051                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13052                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13053                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13054                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13055                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13056                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13057                         else
13058                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13059                 }
13060                 return;
13061         case MLX5_RSS_HASH_IPV6:
13062                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13063                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13064                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13065                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13066                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13067                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13068                         else
13069                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13070                 }
13071                 return;
13072         case MLX5_RSS_HASH_IPV4_UDP:
13073                 /* fall-through. */
13074         case MLX5_RSS_HASH_IPV6_UDP:
13075                 if (rss_types & ETH_RSS_UDP) {
13076                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13077                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13078                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13079                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13080                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13081                         else
13082                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13083                 }
13084                 return;
13085         case MLX5_RSS_HASH_IPV4_TCP:
13086                 /* fall-through. */
13087         case MLX5_RSS_HASH_IPV6_TCP:
13088                 if (rss_types & ETH_RSS_TCP) {
13089                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13090                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13091                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13092                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13093                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13094                         else
13095                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13096                 }
13097                 return;
13098         default:
13099                 return;
13100         }
13101 }
13102
13103 /**
13104  * Setup shared RSS action.
13105  * Prepare set of hash RX queue objects sufficient to handle all valid
13106  * hash_fields combinations (see enum ibv_rx_hash_fields).
13107  *
13108  * @param[in] dev
13109  *   Pointer to the Ethernet device structure.
13110  * @param[in] action_idx
13111  *   Shared RSS action ipool index.
13112  * @param[in, out] action
13113  *   Partially initialized shared RSS action.
13114  * @param[out] error
13115  *   Perform verbose error reporting if not NULL. Initialized in case of
13116  *   error only.
13117  *
13118  * @return
13119  *   0 on success, otherwise negative errno value.
13120  */
13121 static int
13122 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
13123                            uint32_t action_idx,
13124                            struct mlx5_shared_action_rss *shared_rss,
13125                            struct rte_flow_error *error)
13126 {
13127         struct mlx5_flow_rss_desc rss_desc = { 0 };
13128         size_t i;
13129         int err;
13130
13131         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
13132                 return rte_flow_error_set(error, rte_errno,
13133                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13134                                           "cannot setup indirection table");
13135         }
13136         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
13137         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
13138         rss_desc.const_q = shared_rss->origin.queue;
13139         rss_desc.queue_num = shared_rss->origin.queue_num;
13140         /* Set non-zero value to indicate a shared RSS. */
13141         rss_desc.shared_rss = action_idx;
13142         rss_desc.ind_tbl = shared_rss->ind_tbl;
13143         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
13144                 uint32_t hrxq_idx;
13145                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
13146                 int tunnel = 0;
13147
13148                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
13149                 if (shared_rss->origin.level > 1) {
13150                         hash_fields |= IBV_RX_HASH_INNER;
13151                         tunnel = 1;
13152                 }
13153                 rss_desc.tunnel = tunnel;
13154                 rss_desc.hash_fields = hash_fields;
13155                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
13156                 if (!hrxq_idx) {
13157                         rte_flow_error_set
13158                                 (error, rte_errno,
13159                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13160                                  "cannot get hash queue");
13161                         goto error_hrxq_new;
13162                 }
13163                 err = __flow_dv_action_rss_hrxq_set
13164                         (shared_rss, hash_fields, hrxq_idx);
13165                 MLX5_ASSERT(!err);
13166         }
13167         return 0;
13168 error_hrxq_new:
13169         err = rte_errno;
13170         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13171         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
13172                 shared_rss->ind_tbl = NULL;
13173         rte_errno = err;
13174         return -rte_errno;
13175 }
13176
13177 /**
13178  * Create shared RSS action.
13179  *
13180  * @param[in] dev
13181  *   Pointer to the Ethernet device structure.
13182  * @param[in] conf
13183  *   Shared action configuration.
13184  * @param[in] rss
13185  *   RSS action specification used to create shared action.
13186  * @param[out] error
13187  *   Perform verbose error reporting if not NULL. Initialized in case of
13188  *   error only.
13189  *
13190  * @return
13191  *   A valid shared action ID in case of success, 0 otherwise and
13192  *   rte_errno is set.
13193  */
13194 static uint32_t
13195 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
13196                             const struct rte_flow_indir_action_conf *conf,
13197                             const struct rte_flow_action_rss *rss,
13198                             struct rte_flow_error *error)
13199 {
13200         struct mlx5_priv *priv = dev->data->dev_private;
13201         struct mlx5_shared_action_rss *shared_rss = NULL;
13202         void *queue = NULL;
13203         struct rte_flow_action_rss *origin;
13204         const uint8_t *rss_key;
13205         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
13206         uint32_t idx;
13207
13208         RTE_SET_USED(conf);
13209         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13210                             0, SOCKET_ID_ANY);
13211         shared_rss = mlx5_ipool_zmalloc
13212                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
13213         if (!shared_rss || !queue) {
13214                 rte_flow_error_set(error, ENOMEM,
13215                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13216                                    "cannot allocate resource memory");
13217                 goto error_rss_init;
13218         }
13219         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
13220                 rte_flow_error_set(error, E2BIG,
13221                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13222                                    "rss action number out of range");
13223                 goto error_rss_init;
13224         }
13225         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
13226                                           sizeof(*shared_rss->ind_tbl),
13227                                           0, SOCKET_ID_ANY);
13228         if (!shared_rss->ind_tbl) {
13229                 rte_flow_error_set(error, ENOMEM,
13230                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13231                                    "cannot allocate resource memory");
13232                 goto error_rss_init;
13233         }
13234         memcpy(queue, rss->queue, queue_size);
13235         shared_rss->ind_tbl->queues = queue;
13236         shared_rss->ind_tbl->queues_n = rss->queue_num;
13237         origin = &shared_rss->origin;
13238         origin->func = rss->func;
13239         origin->level = rss->level;
13240         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
13241         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
13242         /* NULL RSS key indicates default RSS key. */
13243         rss_key = !rss->key ? rss_hash_default_key : rss->key;
13244         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
13245         origin->key = &shared_rss->key[0];
13246         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
13247         origin->queue = queue;
13248         origin->queue_num = rss->queue_num;
13249         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
13250                 goto error_rss_init;
13251         rte_spinlock_init(&shared_rss->action_rss_sl);
13252         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13253         rte_spinlock_lock(&priv->shared_act_sl);
13254         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13255                      &priv->rss_shared_actions, idx, shared_rss, next);
13256         rte_spinlock_unlock(&priv->shared_act_sl);
13257         return idx;
13258 error_rss_init:
13259         if (shared_rss) {
13260                 if (shared_rss->ind_tbl)
13261                         mlx5_free(shared_rss->ind_tbl);
13262                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13263                                 idx);
13264         }
13265         if (queue)
13266                 mlx5_free(queue);
13267         return 0;
13268 }
13269
13270 /**
13271  * Destroy the shared RSS action.
13272  * Release related hash RX queue objects.
13273  *
13274  * @param[in] dev
13275  *   Pointer to the Ethernet device structure.
13276  * @param[in] idx
13277  *   The shared RSS action object ID to be removed.
13278  * @param[out] error
13279  *   Perform verbose error reporting if not NULL. Initialized in case of
13280  *   error only.
13281  *
13282  * @return
13283  *   0 on success, otherwise negative errno value.
13284  */
13285 static int
13286 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
13287                              struct rte_flow_error *error)
13288 {
13289         struct mlx5_priv *priv = dev->data->dev_private;
13290         struct mlx5_shared_action_rss *shared_rss =
13291             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13292         uint32_t old_refcnt = 1;
13293         int remaining;
13294         uint16_t *queue = NULL;
13295
13296         if (!shared_rss)
13297                 return rte_flow_error_set(error, EINVAL,
13298                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13299                                           "invalid shared action");
13300         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13301         if (remaining)
13302                 return rte_flow_error_set(error, EBUSY,
13303                                           RTE_FLOW_ERROR_TYPE_ACTION,
13304                                           NULL,
13305                                           "shared rss hrxq has references");
13306         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
13307                                          0, 0, __ATOMIC_ACQUIRE,
13308                                          __ATOMIC_RELAXED))
13309                 return rte_flow_error_set(error, EBUSY,
13310                                           RTE_FLOW_ERROR_TYPE_ACTION,
13311                                           NULL,
13312                                           "shared rss has references");
13313         queue = shared_rss->ind_tbl->queues;
13314         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
13315         if (remaining)
13316                 return rte_flow_error_set(error, EBUSY,
13317                                           RTE_FLOW_ERROR_TYPE_ACTION,
13318                                           NULL,
13319                                           "shared rss indirection table has"
13320                                           " references");
13321         mlx5_free(queue);
13322         rte_spinlock_lock(&priv->shared_act_sl);
13323         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13324                      &priv->rss_shared_actions, idx, shared_rss, next);
13325         rte_spinlock_unlock(&priv->shared_act_sl);
13326         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13327                         idx);
13328         return 0;
13329 }
13330
13331 /**
13332  * Create indirect action, lock free,
13333  * (mutex should be acquired by caller).
13334  * Dispatcher for action type specific call.
13335  *
13336  * @param[in] dev
13337  *   Pointer to the Ethernet device structure.
13338  * @param[in] conf
13339  *   Shared action configuration.
13340  * @param[in] action
13341  *   Action specification used to create indirect action.
13342  * @param[out] error
13343  *   Perform verbose error reporting if not NULL. Initialized in case of
13344  *   error only.
13345  *
13346  * @return
13347  *   A valid shared action handle in case of success, NULL otherwise and
13348  *   rte_errno is set.
13349  */
13350 static struct rte_flow_action_handle *
13351 flow_dv_action_create(struct rte_eth_dev *dev,
13352                       const struct rte_flow_indir_action_conf *conf,
13353                       const struct rte_flow_action *action,
13354                       struct rte_flow_error *err)
13355 {
13356         uint32_t idx = 0;
13357         uint32_t ret = 0;
13358
13359         switch (action->type) {
13360         case RTE_FLOW_ACTION_TYPE_RSS:
13361                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
13362                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
13363                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13364                 break;
13365         case RTE_FLOW_ACTION_TYPE_AGE:
13366                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
13367                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
13368                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13369                 if (ret) {
13370                         struct mlx5_aso_age_action *aso_age =
13371                                               flow_aso_age_get_by_idx(dev, ret);
13372
13373                         if (!aso_age->age_params.context)
13374                                 aso_age->age_params.context =
13375                                                          (void *)(uintptr_t)idx;
13376                 }
13377                 break;
13378         case RTE_FLOW_ACTION_TYPE_COUNT:
13379                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
13380                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
13381                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13382                 break;
13383         default:
13384                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
13385                                    NULL, "action type not supported");
13386                 break;
13387         }
13388         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
13389 }
13390
13391 /**
13392  * Destroy the indirect action.
13393  * Release action related resources on the NIC and the memory.
13394  * Lock free, (mutex should be acquired by caller).
13395  * Dispatcher for action type specific call.
13396  *
13397  * @param[in] dev
13398  *   Pointer to the Ethernet device structure.
13399  * @param[in] handle
13400  *   The indirect action object handle to be removed.
13401  * @param[out] error
13402  *   Perform verbose error reporting if not NULL. Initialized in case of
13403  *   error only.
13404  *
13405  * @return
13406  *   0 on success, otherwise negative errno value.
13407  */
13408 static int
13409 flow_dv_action_destroy(struct rte_eth_dev *dev,
13410                        struct rte_flow_action_handle *handle,
13411                        struct rte_flow_error *error)
13412 {
13413         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13414         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13415         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13416         struct mlx5_flow_counter *cnt;
13417         uint32_t no_flow_refcnt = 1;
13418         int ret;
13419
13420         switch (type) {
13421         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13422                 return __flow_dv_action_rss_release(dev, idx, error);
13423         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13424                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
13425                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
13426                                                  &no_flow_refcnt, 1, false,
13427                                                  __ATOMIC_ACQUIRE,
13428                                                  __ATOMIC_RELAXED))
13429                         return rte_flow_error_set(error, EBUSY,
13430                                                   RTE_FLOW_ERROR_TYPE_ACTION,
13431                                                   NULL,
13432                                                   "Indirect count action has references");
13433                 flow_dv_counter_free(dev, idx);
13434                 return 0;
13435         case MLX5_INDIRECT_ACTION_TYPE_AGE:
13436                 ret = flow_dv_aso_age_release(dev, idx);
13437                 if (ret)
13438                         /*
13439                          * In this case, the last flow has a reference will
13440                          * actually release the age action.
13441                          */
13442                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
13443                                 " released with references %d.", idx, ret);
13444                 return 0;
13445         default:
13446                 return rte_flow_error_set(error, ENOTSUP,
13447                                           RTE_FLOW_ERROR_TYPE_ACTION,
13448                                           NULL,
13449                                           "action type not supported");
13450         }
13451 }
13452
13453 /**
13454  * Updates in place shared RSS action configuration.
13455  *
13456  * @param[in] dev
13457  *   Pointer to the Ethernet device structure.
13458  * @param[in] idx
13459  *   The shared RSS action object ID to be updated.
13460  * @param[in] action_conf
13461  *   RSS action specification used to modify *shared_rss*.
13462  * @param[out] error
13463  *   Perform verbose error reporting if not NULL. Initialized in case of
13464  *   error only.
13465  *
13466  * @return
13467  *   0 on success, otherwise negative errno value.
13468  * @note: currently only support update of RSS queues.
13469  */
13470 static int
13471 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
13472                             const struct rte_flow_action_rss *action_conf,
13473                             struct rte_flow_error *error)
13474 {
13475         struct mlx5_priv *priv = dev->data->dev_private;
13476         struct mlx5_shared_action_rss *shared_rss =
13477             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13478         int ret = 0;
13479         void *queue = NULL;
13480         uint16_t *queue_old = NULL;
13481         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
13482
13483         if (!shared_rss)
13484                 return rte_flow_error_set(error, EINVAL,
13485                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13486                                           "invalid shared action to update");
13487         if (priv->obj_ops.ind_table_modify == NULL)
13488                 return rte_flow_error_set(error, ENOTSUP,
13489                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13490                                           "cannot modify indirection table");
13491         queue = mlx5_malloc(MLX5_MEM_ZERO,
13492                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13493                             0, SOCKET_ID_ANY);
13494         if (!queue)
13495                 return rte_flow_error_set(error, ENOMEM,
13496                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13497                                           NULL,
13498                                           "cannot allocate resource memory");
13499         memcpy(queue, action_conf->queue, queue_size);
13500         MLX5_ASSERT(shared_rss->ind_tbl);
13501         rte_spinlock_lock(&shared_rss->action_rss_sl);
13502         queue_old = shared_rss->ind_tbl->queues;
13503         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
13504                                         queue, action_conf->queue_num, true);
13505         if (ret) {
13506                 mlx5_free(queue);
13507                 ret = rte_flow_error_set(error, rte_errno,
13508                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13509                                           "cannot update indirection table");
13510         } else {
13511                 mlx5_free(queue_old);
13512                 shared_rss->origin.queue = queue;
13513                 shared_rss->origin.queue_num = action_conf->queue_num;
13514         }
13515         rte_spinlock_unlock(&shared_rss->action_rss_sl);
13516         return ret;
13517 }
13518
13519 /**
13520  * Updates in place shared action configuration, lock free,
13521  * (mutex should be acquired by caller).
13522  *
13523  * @param[in] dev
13524  *   Pointer to the Ethernet device structure.
13525  * @param[in] handle
13526  *   The indirect action object handle to be updated.
13527  * @param[in] update
13528  *   Action specification used to modify the action pointed by *handle*.
13529  *   *update* could be of same type with the action pointed by the *handle*
13530  *   handle argument, or some other structures like a wrapper, depending on
13531  *   the indirect action type.
13532  * @param[out] error
13533  *   Perform verbose error reporting if not NULL. Initialized in case of
13534  *   error only.
13535  *
13536  * @return
13537  *   0 on success, otherwise negative errno value.
13538  */
13539 static int
13540 flow_dv_action_update(struct rte_eth_dev *dev,
13541                         struct rte_flow_action_handle *handle,
13542                         const void *update,
13543                         struct rte_flow_error *err)
13544 {
13545         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13546         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13547         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13548         const void *action_conf;
13549
13550         switch (type) {
13551         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13552                 action_conf = ((const struct rte_flow_action *)update)->conf;
13553                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
13554         default:
13555                 return rte_flow_error_set(err, ENOTSUP,
13556                                           RTE_FLOW_ERROR_TYPE_ACTION,
13557                                           NULL,
13558                                           "action type update not supported");
13559         }
13560 }
13561
13562 /**
13563  * Destroy the meter sub policy table rules.
13564  * Lock free, (mutex should be acquired by caller).
13565  *
13566  * @param[in] dev
13567  *   Pointer to Ethernet device.
13568  * @param[in] sub_policy
13569  *   Pointer to meter sub policy table.
13570  */
13571 static void
13572 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
13573                              struct mlx5_flow_meter_sub_policy *sub_policy)
13574 {
13575         struct mlx5_flow_tbl_data_entry *tbl;
13576         int i;
13577
13578         for (i = 0; i < RTE_COLORS; i++) {
13579                 if (sub_policy->color_rule[i]) {
13580                         claim_zero(mlx5_flow_os_destroy_flow
13581                                 (sub_policy->color_rule[i]));
13582                         sub_policy->color_rule[i] = NULL;
13583                 }
13584                 if (sub_policy->color_matcher[i]) {
13585                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
13586                                 typeof(*tbl), tbl);
13587                         mlx5_cache_unregister(&tbl->matchers,
13588                                       &sub_policy->color_matcher[i]->entry);
13589                         sub_policy->color_matcher[i] = NULL;
13590                 }
13591         }
13592         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13593                 if (sub_policy->rix_hrxq[i]) {
13594                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
13595                         sub_policy->rix_hrxq[i] = 0;
13596                 }
13597                 if (sub_policy->jump_tbl[i]) {
13598                         flow_dv_tbl_resource_release(MLX5_SH(dev),
13599                         sub_policy->jump_tbl[i]);
13600                         sub_policy->jump_tbl[i] = NULL;
13601                 }
13602         }
13603         if (sub_policy->tbl_rsc) {
13604                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13605                         sub_policy->tbl_rsc);
13606                 sub_policy->tbl_rsc = NULL;
13607         }
13608 }
13609
13610 /**
13611  * Destroy policy rules, lock free,
13612  * (mutex should be acquired by caller).
13613  * Dispatcher for action type specific call.
13614  *
13615  * @param[in] dev
13616  *   Pointer to the Ethernet device structure.
13617  * @param[in] mtr_policy
13618  *   Meter policy struct.
13619  */
13620 static void
13621 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
13622                       struct mlx5_flow_meter_policy *mtr_policy)
13623 {
13624         uint32_t i, j;
13625         struct mlx5_flow_meter_sub_policy *sub_policy;
13626         uint16_t sub_policy_num;
13627
13628         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
13629                 sub_policy_num = (mtr_policy->sub_policy_num >>
13630                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
13631                         MLX5_MTR_SUB_POLICY_NUM_MASK;
13632                 for (j = 0; j < sub_policy_num; j++) {
13633                         sub_policy = mtr_policy->sub_policys[i][j];
13634                         if (sub_policy)
13635                                 __flow_dv_destroy_sub_policy_rules
13636                                                 (dev, sub_policy);
13637                 }
13638         }
13639 }
13640
13641 /**
13642  * Destroy policy action, lock free,
13643  * (mutex should be acquired by caller).
13644  * Dispatcher for action type specific call.
13645  *
13646  * @param[in] dev
13647  *   Pointer to the Ethernet device structure.
13648  * @param[in] mtr_policy
13649  *   Meter policy struct.
13650  */
13651 static void
13652 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
13653                       struct mlx5_flow_meter_policy *mtr_policy)
13654 {
13655         struct rte_flow_action *rss_action;
13656         struct mlx5_flow_handle dev_handle;
13657         uint32_t i, j;
13658
13659         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13660                 if (mtr_policy->act_cnt[i].rix_mark) {
13661                         flow_dv_tag_release(dev,
13662                                 mtr_policy->act_cnt[i].rix_mark);
13663                         mtr_policy->act_cnt[i].rix_mark = 0;
13664                 }
13665                 if (mtr_policy->act_cnt[i].modify_hdr) {
13666                         dev_handle.dvh.modify_hdr =
13667                                 mtr_policy->act_cnt[i].modify_hdr;
13668                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
13669                 }
13670                 switch (mtr_policy->act_cnt[i].fate_action) {
13671                 case MLX5_FLOW_FATE_SHARED_RSS:
13672                         rss_action = mtr_policy->act_cnt[i].rss;
13673                         mlx5_free(rss_action);
13674                         break;
13675                 case MLX5_FLOW_FATE_PORT_ID:
13676                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
13677                                 flow_dv_port_id_action_resource_release(dev,
13678                                 mtr_policy->act_cnt[i].rix_port_id_action);
13679                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
13680                         }
13681                         break;
13682                 case MLX5_FLOW_FATE_DROP:
13683                 case MLX5_FLOW_FATE_JUMP:
13684                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13685                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
13686                                                 NULL;
13687                         break;
13688                 default:
13689                         /*Queue action do nothing*/
13690                         break;
13691                 }
13692         }
13693         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13694                 mtr_policy->dr_drop_action[j] = NULL;
13695 }
13696
13697 /**
13698  * Create policy action per domain, lock free,
13699  * (mutex should be acquired by caller).
13700  * Dispatcher for action type specific call.
13701  *
13702  * @param[in] dev
13703  *   Pointer to the Ethernet device structure.
13704  * @param[in] mtr_policy
13705  *   Meter policy struct.
13706  * @param[in] action
13707  *   Action specification used to create meter actions.
13708  * @param[out] error
13709  *   Perform verbose error reporting if not NULL. Initialized in case of
13710  *   error only.
13711  *
13712  * @return
13713  *   0 on success, otherwise negative errno value.
13714  */
13715 static int
13716 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
13717                         struct mlx5_flow_meter_policy *mtr_policy,
13718                         const struct rte_flow_action *actions[RTE_COLORS],
13719                         enum mlx5_meter_domain domain,
13720                         struct rte_mtr_error *error)
13721 {
13722         struct mlx5_priv *priv = dev->data->dev_private;
13723         struct rte_flow_error flow_err;
13724         const struct rte_flow_action *act;
13725         uint64_t action_flags = 0;
13726         struct mlx5_flow_handle dh;
13727         struct mlx5_flow dev_flow;
13728         struct mlx5_flow_dv_port_id_action_resource port_id_action;
13729         int i, ret;
13730         uint8_t egress, transfer;
13731         struct mlx5_meter_policy_action_container *act_cnt = NULL;
13732         union {
13733                 struct mlx5_flow_dv_modify_hdr_resource res;
13734                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13735                             sizeof(struct mlx5_modification_cmd) *
13736                             (MLX5_MAX_MODIFY_NUM + 1)];
13737         } mhdr_dummy;
13738
13739         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
13740         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
13741         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
13742         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
13743         memset(&port_id_action, 0,
13744                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
13745         dev_flow.handle = &dh;
13746         dev_flow.dv.port_id_action = &port_id_action;
13747         dev_flow.external = true;
13748         for (i = 0; i < RTE_COLORS; i++) {
13749                 if (i < MLX5_MTR_RTE_COLORS)
13750                         act_cnt = &mtr_policy->act_cnt[i];
13751                 for (act = actions[i];
13752                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
13753                         act++) {
13754                         switch (act->type) {
13755                         case RTE_FLOW_ACTION_TYPE_MARK:
13756                         {
13757                                 uint32_t tag_be = mlx5_flow_mark_set
13758                                         (((const struct rte_flow_action_mark *)
13759                                         (act->conf))->id);
13760
13761                                 if (i >= MLX5_MTR_RTE_COLORS)
13762                                         return -rte_mtr_error_set(error,
13763                                           ENOTSUP,
13764                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13765                                           NULL,
13766                                           "cannot create policy "
13767                                           "mark action for this color");
13768                                 dev_flow.handle->mark = 1;
13769                                 if (flow_dv_tag_resource_register(dev, tag_be,
13770                                                   &dev_flow, &flow_err))
13771                                         return -rte_mtr_error_set(error,
13772                                         ENOTSUP,
13773                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13774                                         NULL,
13775                                         "cannot setup policy mark action");
13776                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
13777                                 act_cnt->rix_mark =
13778                                         dev_flow.handle->dvh.rix_tag;
13779                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13780                                         dev_flow.handle->rix_hrxq =
13781                         mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13782                                         flow_drv_rxq_flags_set(dev,
13783                                                 dev_flow.handle);
13784                                 }
13785                                 action_flags |= MLX5_FLOW_ACTION_MARK;
13786                                 break;
13787                         }
13788                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
13789                         {
13790                                 struct mlx5_flow_dv_modify_hdr_resource
13791                                         *mhdr_res = &mhdr_dummy.res;
13792
13793                                 if (i >= MLX5_MTR_RTE_COLORS)
13794                                         return -rte_mtr_error_set(error,
13795                                           ENOTSUP,
13796                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13797                                           NULL,
13798                                           "cannot create policy "
13799                                           "set tag action for this color");
13800                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
13801                                 mhdr_res->ft_type = transfer ?
13802                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
13803                                         egress ?
13804                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13805                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13806                                 if (flow_dv_convert_action_set_tag
13807                                 (dev, mhdr_res,
13808                                 (const struct rte_flow_action_set_tag *)
13809                                 act->conf,  &flow_err))
13810                                         return -rte_mtr_error_set(error,
13811                                         ENOTSUP,
13812                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13813                                         NULL, "cannot convert policy "
13814                                         "set tag action");
13815                                 if (!mhdr_res->actions_num)
13816                                         return -rte_mtr_error_set(error,
13817                                         ENOTSUP,
13818                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13819                                         NULL, "cannot find policy "
13820                                         "set tag action");
13821                                 /* create modify action if needed. */
13822                                 dev_flow.dv.group = 1;
13823                                 if (flow_dv_modify_hdr_resource_register
13824                                         (dev, mhdr_res, &dev_flow, &flow_err))
13825                                         return -rte_mtr_error_set(error,
13826                                         ENOTSUP,
13827                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13828                                         NULL, "cannot register policy "
13829                                         "set tag action");
13830                                 act_cnt->modify_hdr =
13831                                 dev_flow.handle->dvh.modify_hdr;
13832                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13833                                         dev_flow.handle->rix_hrxq =
13834                                 mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13835                                         flow_drv_rxq_flags_set(dev,
13836                                                 dev_flow.handle);
13837                                 }
13838                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13839                                 break;
13840                         }
13841                         case RTE_FLOW_ACTION_TYPE_DROP:
13842                         {
13843                                 struct mlx5_flow_mtr_mng *mtrmng =
13844                                                 priv->sh->mtrmng;
13845                                 struct mlx5_flow_tbl_data_entry *tbl_data;
13846
13847                                 /*
13848                                  * Create the drop table with
13849                                  * METER DROP level.
13850                                  */
13851                                 if (!mtrmng->drop_tbl[domain]) {
13852                                         mtrmng->drop_tbl[domain] =
13853                                         flow_dv_tbl_resource_get(dev,
13854                                         MLX5_FLOW_TABLE_LEVEL_METER,
13855                                         egress, transfer, false, NULL, 0,
13856                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
13857                                         if (!mtrmng->drop_tbl[domain])
13858                                                 return -rte_mtr_error_set
13859                                         (error, ENOTSUP,
13860                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13861                                         NULL,
13862                                         "Failed to create meter drop table");
13863                                 }
13864                                 tbl_data = container_of
13865                                 (mtrmng->drop_tbl[domain],
13866                                 struct mlx5_flow_tbl_data_entry, tbl);
13867                                 if (i < MLX5_MTR_RTE_COLORS) {
13868                                         act_cnt->dr_jump_action[domain] =
13869                                                 tbl_data->jump.action;
13870                                         act_cnt->fate_action =
13871                                                 MLX5_FLOW_FATE_DROP;
13872                                 }
13873                                 if (i == RTE_COLOR_RED)
13874                                         mtr_policy->dr_drop_action[domain] =
13875                                                 tbl_data->jump.action;
13876                                 action_flags |= MLX5_FLOW_ACTION_DROP;
13877                                 break;
13878                         }
13879                         case RTE_FLOW_ACTION_TYPE_QUEUE:
13880                         {
13881                                 struct mlx5_hrxq *hrxq;
13882                                 uint32_t hrxq_idx;
13883                                 struct mlx5_flow_rss_desc rss_desc;
13884                                 struct mlx5_flow_meter_sub_policy *sub_policy =
13885                                 mtr_policy->sub_policys[domain][0];
13886
13887                                 if (i >= MLX5_MTR_RTE_COLORS)
13888                                         return -rte_mtr_error_set(error,
13889                                         ENOTSUP,
13890                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13891                                         NULL, "cannot create policy "
13892                                         "fate queue for this color");
13893                                 memset(&rss_desc, 0,
13894                                         sizeof(struct mlx5_flow_rss_desc));
13895                                 rss_desc.queue_num = 1;
13896                                 rss_desc.const_q = act->conf;
13897                                 hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
13898                                                     &rss_desc, &hrxq_idx);
13899                                 if (!hrxq)
13900                                         return -rte_mtr_error_set(error,
13901                                         ENOTSUP,
13902                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13903                                         NULL,
13904                                         "cannot create policy fate queue");
13905                                 sub_policy->rix_hrxq[i] = hrxq_idx;
13906                                 act_cnt->fate_action =
13907                                         MLX5_FLOW_FATE_QUEUE;
13908                                 dev_flow.handle->fate_action =
13909                                         MLX5_FLOW_FATE_QUEUE;
13910                                 if (action_flags & MLX5_FLOW_ACTION_MARK ||
13911                                     action_flags & MLX5_FLOW_ACTION_SET_TAG) {
13912                                         dev_flow.handle->rix_hrxq = hrxq_idx;
13913                                         flow_drv_rxq_flags_set(dev,
13914                                                 dev_flow.handle);
13915                                 }
13916                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
13917                                 break;
13918                         }
13919                         case RTE_FLOW_ACTION_TYPE_RSS:
13920                         {
13921                                 int rss_size;
13922
13923                                 if (i >= MLX5_MTR_RTE_COLORS)
13924                                         return -rte_mtr_error_set(error,
13925                                           ENOTSUP,
13926                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13927                                           NULL,
13928                                           "cannot create policy "
13929                                           "rss action for this color");
13930                                 /*
13931                                  * Save RSS conf into policy struct
13932                                  * for translate stage.
13933                                  */
13934                                 rss_size = (int)rte_flow_conv
13935                                         (RTE_FLOW_CONV_OP_ACTION,
13936                                         NULL, 0, act, &flow_err);
13937                                 if (rss_size <= 0)
13938                                         return -rte_mtr_error_set(error,
13939                                           ENOTSUP,
13940                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13941                                           NULL, "Get the wrong "
13942                                           "rss action struct size");
13943                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
13944                                                 rss_size, 0, SOCKET_ID_ANY);
13945                                 if (!act_cnt->rss)
13946                                         return -rte_mtr_error_set(error,
13947                                           ENOTSUP,
13948                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13949                                           NULL,
13950                                           "Fail to malloc rss action memory");
13951                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
13952                                         act_cnt->rss, rss_size,
13953                                         act, &flow_err);
13954                                 if (ret < 0)
13955                                         return -rte_mtr_error_set(error,
13956                                           ENOTSUP,
13957                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13958                                           NULL, "Fail to save "
13959                                           "rss action into policy struct");
13960                                 act_cnt->fate_action =
13961                                         MLX5_FLOW_FATE_SHARED_RSS;
13962                                 action_flags |= MLX5_FLOW_ACTION_RSS;
13963                                 break;
13964                         }
13965                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
13966                         {
13967                                 struct mlx5_flow_dv_port_id_action_resource
13968                                         port_id_resource;
13969                                 uint32_t port_id = 0;
13970
13971                                 if (i >= MLX5_MTR_RTE_COLORS)
13972                                         return -rte_mtr_error_set(error,
13973                                         ENOTSUP,
13974                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13975                                         NULL, "cannot create policy "
13976                                         "port action for this color");
13977                                 memset(&port_id_resource, 0,
13978                                         sizeof(port_id_resource));
13979                                 if (flow_dv_translate_action_port_id(dev, act,
13980                                                 &port_id, &flow_err))
13981                                         return -rte_mtr_error_set(error,
13982                                         ENOTSUP,
13983                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13984                                         NULL, "cannot translate "
13985                                         "policy port action");
13986                                 port_id_resource.port_id = port_id;
13987                                 if (flow_dv_port_id_action_resource_register
13988                                         (dev, &port_id_resource,
13989                                         &dev_flow, &flow_err))
13990                                         return -rte_mtr_error_set(error,
13991                                         ENOTSUP,
13992                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13993                                         NULL, "cannot setup "
13994                                         "policy port action");
13995                                 act_cnt->rix_port_id_action =
13996                                         dev_flow.handle->rix_port_id_action;
13997                                 act_cnt->fate_action =
13998                                         MLX5_FLOW_FATE_PORT_ID;
13999                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14000                                 break;
14001                         }
14002                         case RTE_FLOW_ACTION_TYPE_JUMP:
14003                         {
14004                                 uint32_t jump_group = 0;
14005                                 uint32_t table = 0;
14006                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14007                                 struct flow_grp_info grp_info = {
14008                                         .external = !!dev_flow.external,
14009                                         .transfer = !!transfer,
14010                                         .fdb_def_rule = !!priv->fdb_def_rule,
14011                                         .std_tbl_fix = 0,
14012                                         .skip_scale = dev_flow.skip_scale &
14013                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14014                                 };
14015                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14016                                 mtr_policy->sub_policys[domain][0];
14017
14018                                 if (i >= MLX5_MTR_RTE_COLORS)
14019                                         return -rte_mtr_error_set(error,
14020                                           ENOTSUP,
14021                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14022                                           NULL,
14023                                           "cannot create policy "
14024                                           "jump action for this color");
14025                                 jump_group =
14026                                 ((const struct rte_flow_action_jump *)
14027                                                         act->conf)->group;
14028                                 if (mlx5_flow_group_to_table(dev, NULL,
14029                                                        jump_group,
14030                                                        &table,
14031                                                        &grp_info, &flow_err))
14032                                         return -rte_mtr_error_set(error,
14033                                         ENOTSUP,
14034                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14035                                         NULL, "cannot setup "
14036                                         "policy jump action");
14037                                 sub_policy->jump_tbl[i] =
14038                                 flow_dv_tbl_resource_get(dev,
14039                                         table, egress,
14040                                         transfer,
14041                                         !!dev_flow.external,
14042                                         NULL, jump_group, 0,
14043                                         0, &flow_err);
14044                                 if
14045                                 (!sub_policy->jump_tbl[i])
14046                                         return  -rte_mtr_error_set(error,
14047                                         ENOTSUP,
14048                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14049                                         NULL, "cannot create jump action.");
14050                                 tbl_data = container_of
14051                                 (sub_policy->jump_tbl[i],
14052                                 struct mlx5_flow_tbl_data_entry, tbl);
14053                                 act_cnt->dr_jump_action[domain] =
14054                                         tbl_data->jump.action;
14055                                 act_cnt->fate_action =
14056                                         MLX5_FLOW_FATE_JUMP;
14057                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
14058                                 break;
14059                         }
14060                         default:
14061                                 return -rte_mtr_error_set(error, ENOTSUP,
14062                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14063                                           NULL, "action type not supported");
14064                         }
14065                 }
14066         }
14067         return 0;
14068 }
14069
14070 /**
14071  * Create policy action per domain, lock free,
14072  * (mutex should be acquired by caller).
14073  * Dispatcher for action type specific call.
14074  *
14075  * @param[in] dev
14076  *   Pointer to the Ethernet device structure.
14077  * @param[in] mtr_policy
14078  *   Meter policy struct.
14079  * @param[in] action
14080  *   Action specification used to create meter actions.
14081  * @param[out] error
14082  *   Perform verbose error reporting if not NULL. Initialized in case of
14083  *   error only.
14084  *
14085  * @return
14086  *   0 on success, otherwise negative errno value.
14087  */
14088 static int
14089 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
14090                       struct mlx5_flow_meter_policy *mtr_policy,
14091                       const struct rte_flow_action *actions[RTE_COLORS],
14092                       struct rte_mtr_error *error)
14093 {
14094         int ret, i;
14095         uint16_t sub_policy_num;
14096
14097         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14098                 sub_policy_num = (mtr_policy->sub_policy_num >>
14099                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14100                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14101                 if (sub_policy_num) {
14102                         ret = __flow_dv_create_domain_policy_acts(dev,
14103                                 mtr_policy, actions,
14104                                 (enum mlx5_meter_domain)i, error);
14105                         if (ret)
14106                                 return ret;
14107                 }
14108         }
14109         return 0;
14110 }
14111
14112 /**
14113  * Query a DV flow rule for its statistics via DevX.
14114  *
14115  * @param[in] dev
14116  *   Pointer to Ethernet device.
14117  * @param[in] cnt_idx
14118  *   Index to the flow counter.
14119  * @param[out] data
14120  *   Data retrieved by the query.
14121  * @param[out] error
14122  *   Perform verbose error reporting if not NULL.
14123  *
14124  * @return
14125  *   0 on success, a negative errno value otherwise and rte_errno is set.
14126  */
14127 static int
14128 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
14129                     struct rte_flow_error *error)
14130 {
14131         struct mlx5_priv *priv = dev->data->dev_private;
14132         struct rte_flow_query_count *qc = data;
14133
14134         if (!priv->config.devx)
14135                 return rte_flow_error_set(error, ENOTSUP,
14136                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14137                                           NULL,
14138                                           "counters are not supported");
14139         if (cnt_idx) {
14140                 uint64_t pkts, bytes;
14141                 struct mlx5_flow_counter *cnt;
14142                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
14143
14144                 if (err)
14145                         return rte_flow_error_set(error, -err,
14146                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14147                                         NULL, "cannot read counters");
14148                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
14149                 qc->hits_set = 1;
14150                 qc->bytes_set = 1;
14151                 qc->hits = pkts - cnt->hits;
14152                 qc->bytes = bytes - cnt->bytes;
14153                 if (qc->reset) {
14154                         cnt->hits = pkts;
14155                         cnt->bytes = bytes;
14156                 }
14157                 return 0;
14158         }
14159         return rte_flow_error_set(error, EINVAL,
14160                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14161                                   NULL,
14162                                   "counters are not available");
14163 }
14164
14165 static int
14166 flow_dv_action_query(struct rte_eth_dev *dev,
14167                      const struct rte_flow_action_handle *handle, void *data,
14168                      struct rte_flow_error *error)
14169 {
14170         struct mlx5_age_param *age_param;
14171         struct rte_flow_query_age *resp;
14172         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14173         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14174         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14175
14176         switch (type) {
14177         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14178                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
14179                 resp = data;
14180                 resp->aged = __atomic_load_n(&age_param->state,
14181                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
14182                                                                           1 : 0;
14183                 resp->sec_since_last_hit_valid = !resp->aged;
14184                 if (resp->sec_since_last_hit_valid)
14185                         resp->sec_since_last_hit = __atomic_load_n
14186                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
14187                 return 0;
14188         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14189                 return flow_dv_query_count(dev, idx, data, error);
14190         default:
14191                 return rte_flow_error_set(error, ENOTSUP,
14192                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14193                                           "action type query not supported");
14194         }
14195 }
14196
14197 /**
14198  * Query a flow rule AGE action for aging information.
14199  *
14200  * @param[in] dev
14201  *   Pointer to Ethernet device.
14202  * @param[in] flow
14203  *   Pointer to the sub flow.
14204  * @param[out] data
14205  *   data retrieved by the query.
14206  * @param[out] error
14207  *   Perform verbose error reporting if not NULL.
14208  *
14209  * @return
14210  *   0 on success, a negative errno value otherwise and rte_errno is set.
14211  */
14212 static int
14213 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
14214                   void *data, struct rte_flow_error *error)
14215 {
14216         struct rte_flow_query_age *resp = data;
14217         struct mlx5_age_param *age_param;
14218
14219         if (flow->age) {
14220                 struct mlx5_aso_age_action *act =
14221                                      flow_aso_age_get_by_idx(dev, flow->age);
14222
14223                 age_param = &act->age_params;
14224         } else if (flow->counter) {
14225                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
14226
14227                 if (!age_param || !age_param->timeout)
14228                         return rte_flow_error_set
14229                                         (error, EINVAL,
14230                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14231                                          NULL, "cannot read age data");
14232         } else {
14233                 return rte_flow_error_set(error, EINVAL,
14234                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14235                                           NULL, "age data not available");
14236         }
14237         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
14238                                      AGE_TMOUT ? 1 : 0;
14239         resp->sec_since_last_hit_valid = !resp->aged;
14240         if (resp->sec_since_last_hit_valid)
14241                 resp->sec_since_last_hit = __atomic_load_n
14242                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
14243         return 0;
14244 }
14245
14246 /**
14247  * Query a flow.
14248  *
14249  * @see rte_flow_query()
14250  * @see rte_flow_ops
14251  */
14252 static int
14253 flow_dv_query(struct rte_eth_dev *dev,
14254               struct rte_flow *flow __rte_unused,
14255               const struct rte_flow_action *actions __rte_unused,
14256               void *data __rte_unused,
14257               struct rte_flow_error *error __rte_unused)
14258 {
14259         int ret = -EINVAL;
14260
14261         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
14262                 switch (actions->type) {
14263                 case RTE_FLOW_ACTION_TYPE_VOID:
14264                         break;
14265                 case RTE_FLOW_ACTION_TYPE_COUNT:
14266                         ret = flow_dv_query_count(dev, flow->counter, data,
14267                                                   error);
14268                         break;
14269                 case RTE_FLOW_ACTION_TYPE_AGE:
14270                         ret = flow_dv_query_age(dev, flow, data, error);
14271                         break;
14272                 default:
14273                         return rte_flow_error_set(error, ENOTSUP,
14274                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14275                                                   actions,
14276                                                   "action not supported");
14277                 }
14278         }
14279         return ret;
14280 }
14281
14282 /**
14283  * Destroy the meter table set.
14284  * Lock free, (mutex should be acquired by caller).
14285  *
14286  * @param[in] dev
14287  *   Pointer to Ethernet device.
14288  * @param[in] fm
14289  *   Meter information table.
14290  */
14291 static void
14292 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
14293                         struct mlx5_flow_meter_info *fm)
14294 {
14295         struct mlx5_priv *priv = dev->data->dev_private;
14296         int i;
14297
14298         if (!fm || !priv->config.dv_flow_en)
14299                 return;
14300         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14301                 if (fm->drop_rule[i]) {
14302                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
14303                         fm->drop_rule[i] = NULL;
14304                 }
14305         }
14306 }
14307
14308 static void
14309 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
14310 {
14311         struct mlx5_priv *priv = dev->data->dev_private;
14312         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14313         struct mlx5_flow_tbl_data_entry *tbl;
14314         int i, j;
14315
14316         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14317                 if (mtrmng->def_rule[i]) {
14318                         claim_zero(mlx5_flow_os_destroy_flow
14319                                         (mtrmng->def_rule[i]));
14320                         mtrmng->def_rule[i] = NULL;
14321                 }
14322                 if (mtrmng->def_matcher[i]) {
14323                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
14324                                 struct mlx5_flow_tbl_data_entry, tbl);
14325                         mlx5_cache_unregister(&tbl->matchers,
14326                                       &mtrmng->def_matcher[i]->entry);
14327                         mtrmng->def_matcher[i] = NULL;
14328                 }
14329                 for (j = 0; j < MLX5_REG_BITS; j++) {
14330                         if (mtrmng->drop_matcher[i][j]) {
14331                                 tbl =
14332                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
14333                                              struct mlx5_flow_tbl_data_entry,
14334                                              tbl);
14335                                 mlx5_cache_unregister(&tbl->matchers,
14336                                         &mtrmng->drop_matcher[i][j]->entry);
14337                                 mtrmng->drop_matcher[i][j] = NULL;
14338                         }
14339                 }
14340                 if (mtrmng->drop_tbl[i]) {
14341                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14342                                 mtrmng->drop_tbl[i]);
14343                         mtrmng->drop_tbl[i] = NULL;
14344                 }
14345         }
14346 }
14347
14348 /* Number of meter flow actions, count and jump or count and drop. */
14349 #define METER_ACTIONS 2
14350
14351 static void
14352 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
14353                               enum mlx5_meter_domain domain)
14354 {
14355         struct mlx5_priv *priv = dev->data->dev_private;
14356         struct mlx5_flow_meter_def_policy *def_policy =
14357                         priv->sh->mtrmng->def_policy[domain];
14358
14359         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
14360         mlx5_free(def_policy);
14361         priv->sh->mtrmng->def_policy[domain] = NULL;
14362 }
14363
14364 /**
14365  * Destroy the default policy table set.
14366  *
14367  * @param[in] dev
14368  *   Pointer to Ethernet device.
14369  */
14370 static void
14371 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
14372 {
14373         struct mlx5_priv *priv = dev->data->dev_private;
14374         int i;
14375
14376         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
14377                 if (priv->sh->mtrmng->def_policy[i])
14378                         __flow_dv_destroy_domain_def_policy(dev,
14379                                         (enum mlx5_meter_domain)i);
14380         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
14381 }
14382
14383 static int
14384 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
14385                         uint32_t color_reg_c_idx,
14386                         enum rte_color color, void *matcher_object,
14387                         int actions_n, void *actions,
14388                         bool is_default_policy, void **rule,
14389                         const struct rte_flow_attr *attr)
14390 {
14391         int ret;
14392         struct mlx5_flow_dv_match_params value = {
14393                 .size = sizeof(value.buf) -
14394                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14395         };
14396         struct mlx5_flow_dv_match_params matcher = {
14397                 .size = sizeof(matcher.buf) -
14398                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14399         };
14400         struct mlx5_priv *priv = dev->data->dev_private;
14401
14402         if (!is_default_policy && (priv->representor || priv->master)) {
14403                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
14404                                                    value.buf, NULL, attr)) {
14405                         DRV_LOG(ERR,
14406                         "Failed to create meter policy flow with port.");
14407                         return -1;
14408                 }
14409         }
14410         flow_dv_match_meta_reg(matcher.buf, value.buf,
14411                                 (enum modify_reg)color_reg_c_idx,
14412                                 rte_col_2_mlx5_col(color),
14413                                 UINT32_MAX);
14414         ret = mlx5_flow_os_create_flow(matcher_object,
14415                         (void *)&value, actions_n, actions, rule);
14416         if (ret) {
14417                 DRV_LOG(ERR, "Failed to create meter policy flow.");
14418                 return -1;
14419         }
14420         return 0;
14421 }
14422
14423 static int
14424 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
14425                         uint32_t color_reg_c_idx,
14426                         uint16_t priority,
14427                         struct mlx5_flow_meter_sub_policy *sub_policy,
14428                         const struct rte_flow_attr *attr,
14429                         bool is_default_policy,
14430                         struct rte_flow_error *error)
14431 {
14432         struct mlx5_cache_entry *entry;
14433         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
14434         struct mlx5_flow_dv_matcher matcher = {
14435                 .mask = {
14436                         .size = sizeof(matcher.mask.buf) -
14437                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14438                 },
14439                 .tbl = tbl_rsc,
14440         };
14441         struct mlx5_flow_dv_match_params value = {
14442                 .size = sizeof(value.buf) -
14443                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14444         };
14445         struct mlx5_flow_cb_ctx ctx = {
14446                 .error = error,
14447                 .data = &matcher,
14448         };
14449         struct mlx5_flow_tbl_data_entry *tbl_data;
14450         struct mlx5_priv *priv = dev->data->dev_private;
14451         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
14452
14453         if (!is_default_policy && (priv->representor || priv->master)) {
14454                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
14455                                                    value.buf, NULL, attr)) {
14456                         DRV_LOG(ERR,
14457                         "Failed to register meter drop matcher with port.");
14458                         return -1;
14459                 }
14460         }
14461         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
14462         if (priority < RTE_COLOR_RED)
14463                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14464                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
14465         matcher.priority = priority;
14466         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
14467                                         matcher.mask.size);
14468         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14469         if (!entry) {
14470                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
14471                 return -1;
14472         }
14473         sub_policy->color_matcher[priority] =
14474                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
14475         return 0;
14476 }
14477
14478 /**
14479  * Create the policy rules per domain.
14480  *
14481  * @param[in] dev
14482  *   Pointer to Ethernet device.
14483  * @param[in] sub_policy
14484  *    Pointer to sub policy table..
14485  * @param[in] egress
14486  *   Direction of the table.
14487  * @param[in] transfer
14488  *   E-Switch or NIC flow.
14489  * @param[in] acts
14490  *   Pointer to policy action list per color.
14491  *
14492  * @return
14493  *   0 on success, -1 otherwise.
14494  */
14495 static int
14496 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
14497                 struct mlx5_flow_meter_sub_policy *sub_policy,
14498                 uint8_t egress, uint8_t transfer, bool is_default_policy,
14499                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
14500 {
14501         struct rte_flow_error flow_err;
14502         uint32_t color_reg_c_idx;
14503         struct rte_flow_attr attr = {
14504                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
14505                 .priority = 0,
14506                 .ingress = 0,
14507                 .egress = !!egress,
14508                 .transfer = !!transfer,
14509                 .reserved = 0,
14510         };
14511         int i;
14512         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
14513
14514         if (ret < 0)
14515                 return -1;
14516         /* Create policy table with POLICY level. */
14517         if (!sub_policy->tbl_rsc)
14518                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
14519                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
14520                                 egress, transfer, false, NULL, 0, 0,
14521                                 sub_policy->idx, &flow_err);
14522         if (!sub_policy->tbl_rsc) {
14523                 DRV_LOG(ERR,
14524                         "Failed to create meter sub policy table.");
14525                 return -1;
14526         }
14527         /* Prepare matchers. */
14528         color_reg_c_idx = ret;
14529         for (i = 0; i < RTE_COLORS; i++) {
14530                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
14531                         continue;
14532                 attr.priority = i;
14533                 if (!sub_policy->color_matcher[i]) {
14534                         /* Create matchers for Color. */
14535                         if (__flow_dv_create_policy_matcher(dev,
14536                                 color_reg_c_idx, i, sub_policy,
14537                                 &attr, is_default_policy, &flow_err))
14538                                 return -1;
14539                 }
14540                 /* Create flow, matching color. */
14541                 if (acts[i].actions_n)
14542                         if (__flow_dv_create_policy_flow(dev,
14543                                 color_reg_c_idx, (enum rte_color)i,
14544                                 sub_policy->color_matcher[i]->matcher_object,
14545                                 acts[i].actions_n,
14546                                 acts[i].dv_actions,
14547                                 is_default_policy,
14548                                 &sub_policy->color_rule[i],
14549                                 &attr))
14550                                 return -1;
14551         }
14552         return 0;
14553 }
14554
14555 static int
14556 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
14557                         struct mlx5_flow_meter_policy *mtr_policy,
14558                         struct mlx5_flow_meter_sub_policy *sub_policy,
14559                         uint32_t domain)
14560 {
14561         struct mlx5_priv *priv = dev->data->dev_private;
14562         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14563         struct mlx5_flow_dv_tag_resource *tag;
14564         struct mlx5_flow_dv_port_id_action_resource *port_action;
14565         struct mlx5_hrxq *hrxq;
14566         uint8_t egress, transfer;
14567         int i;
14568
14569         for (i = 0; i < RTE_COLORS; i++) {
14570                 acts[i].actions_n = 0;
14571                 if (i == RTE_COLOR_YELLOW)
14572                         continue;
14573                 if (i == RTE_COLOR_RED) {
14574                         /* Only support drop on red. */
14575                         acts[i].dv_actions[0] =
14576                         mtr_policy->dr_drop_action[domain];
14577                         acts[i].actions_n = 1;
14578                         continue;
14579                 }
14580                 if (mtr_policy->act_cnt[i].rix_mark) {
14581                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
14582                                         mtr_policy->act_cnt[i].rix_mark);
14583                         if (!tag) {
14584                                 DRV_LOG(ERR, "Failed to find "
14585                                 "mark action for policy.");
14586                                 return -1;
14587                         }
14588                         acts[i].dv_actions[acts[i].actions_n] =
14589                                                 tag->action;
14590                         acts[i].actions_n++;
14591                 }
14592                 if (mtr_policy->act_cnt[i].modify_hdr) {
14593                         acts[i].dv_actions[acts[i].actions_n] =
14594                         mtr_policy->act_cnt[i].modify_hdr->action;
14595                         acts[i].actions_n++;
14596                 }
14597                 if (mtr_policy->act_cnt[i].fate_action) {
14598                         switch (mtr_policy->act_cnt[i].fate_action) {
14599                         case MLX5_FLOW_FATE_PORT_ID:
14600                                 port_action = mlx5_ipool_get
14601                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
14602                                 mtr_policy->act_cnt[i].rix_port_id_action);
14603                                 if (!port_action) {
14604                                         DRV_LOG(ERR, "Failed to find "
14605                                                 "port action for policy.");
14606                                         return -1;
14607                                 }
14608                                 acts[i].dv_actions[acts[i].actions_n] =
14609                                 port_action->action;
14610                                 acts[i].actions_n++;
14611                                 break;
14612                         case MLX5_FLOW_FATE_DROP:
14613                         case MLX5_FLOW_FATE_JUMP:
14614                                 acts[i].dv_actions[acts[i].actions_n] =
14615                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
14616                                 acts[i].actions_n++;
14617                                 break;
14618                         case MLX5_FLOW_FATE_SHARED_RSS:
14619                         case MLX5_FLOW_FATE_QUEUE:
14620                                 hrxq = mlx5_ipool_get
14621                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
14622                                 sub_policy->rix_hrxq[i]);
14623                                 if (!hrxq) {
14624                                         DRV_LOG(ERR, "Failed to find "
14625                                                 "queue action for policy.");
14626                                         return -1;
14627                                 }
14628                                 acts[i].dv_actions[acts[i].actions_n] =
14629                                 hrxq->action;
14630                                 acts[i].actions_n++;
14631                                 break;
14632                         default:
14633                                 /*Queue action do nothing*/
14634                                 break;
14635                         }
14636                 }
14637         }
14638         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14639         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14640         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
14641                                 egress, transfer, false, acts)) {
14642                 DRV_LOG(ERR,
14643                 "Failed to create policy rules per domain.");
14644                 return -1;
14645         }
14646         return 0;
14647 }
14648
14649 /**
14650  * Create the policy rules.
14651  *
14652  * @param[in] dev
14653  *   Pointer to Ethernet device.
14654  * @param[in,out] mtr_policy
14655  *   Pointer to meter policy table.
14656  *
14657  * @return
14658  *   0 on success, -1 otherwise.
14659  */
14660 static int
14661 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
14662                              struct mlx5_flow_meter_policy *mtr_policy)
14663 {
14664         int i;
14665         uint16_t sub_policy_num;
14666
14667         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14668                 sub_policy_num = (mtr_policy->sub_policy_num >>
14669                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14670                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14671                 if (!sub_policy_num)
14672                         continue;
14673                 /* Prepare actions list and create policy rules. */
14674                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
14675                         mtr_policy->sub_policys[i][0], i)) {
14676                         DRV_LOG(ERR,
14677                         "Failed to create policy action list per domain.");
14678                         return -1;
14679                 }
14680         }
14681         return 0;
14682 }
14683
14684 static int
14685 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
14686 {
14687         struct mlx5_priv *priv = dev->data->dev_private;
14688         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14689         struct mlx5_flow_meter_def_policy *def_policy;
14690         struct mlx5_flow_tbl_resource *jump_tbl;
14691         struct mlx5_flow_tbl_data_entry *tbl_data;
14692         uint8_t egress, transfer;
14693         struct rte_flow_error error;
14694         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14695         int ret;
14696
14697         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14698         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14699         def_policy = mtrmng->def_policy[domain];
14700         if (!def_policy) {
14701                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
14702                         sizeof(struct mlx5_flow_meter_def_policy),
14703                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
14704                 if (!def_policy) {
14705                         DRV_LOG(ERR, "Failed to alloc "
14706                                         "default policy table.");
14707                         goto def_policy_error;
14708                 }
14709                 mtrmng->def_policy[domain] = def_policy;
14710                 /* Create the meter suffix table with SUFFIX level. */
14711                 jump_tbl = flow_dv_tbl_resource_get(dev,
14712                                 MLX5_FLOW_TABLE_LEVEL_METER,
14713                                 egress, transfer, false, NULL, 0,
14714                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
14715                 if (!jump_tbl) {
14716                         DRV_LOG(ERR,
14717                                 "Failed to create meter suffix table.");
14718                         goto def_policy_error;
14719                 }
14720                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
14721                 tbl_data = container_of(jump_tbl,
14722                                 struct mlx5_flow_tbl_data_entry, tbl);
14723                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
14724                                                 tbl_data->jump.action;
14725                 acts[RTE_COLOR_GREEN].dv_actions[0] =
14726                                                 tbl_data->jump.action;
14727                 acts[RTE_COLOR_GREEN].actions_n = 1;
14728                 /* Create jump action to the drop table. */
14729                 if (!mtrmng->drop_tbl[domain]) {
14730                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
14731                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
14732                                 egress, transfer, false, NULL, 0,
14733                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
14734                         if (!mtrmng->drop_tbl[domain]) {
14735                                 DRV_LOG(ERR, "Failed to create "
14736                                 "meter drop table for default policy.");
14737                                 goto def_policy_error;
14738                         }
14739                 }
14740                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14741                                 struct mlx5_flow_tbl_data_entry, tbl);
14742                 def_policy->dr_jump_action[RTE_COLOR_RED] =
14743                                                 tbl_data->jump.action;
14744                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
14745                 acts[RTE_COLOR_RED].actions_n = 1;
14746                 /* Create default policy rules. */
14747                 ret = __flow_dv_create_domain_policy_rules(dev,
14748                                         &def_policy->sub_policy,
14749                                         egress, transfer, true, acts);
14750                 if (ret) {
14751                         DRV_LOG(ERR, "Failed to create "
14752                                 "default policy rules.");
14753                                 goto def_policy_error;
14754                 }
14755         }
14756         return 0;
14757 def_policy_error:
14758         __flow_dv_destroy_domain_def_policy(dev,
14759                         (enum mlx5_meter_domain)domain);
14760         return -1;
14761 }
14762
14763 /**
14764  * Create the default policy table set.
14765  *
14766  * @param[in] dev
14767  *   Pointer to Ethernet device.
14768  * @return
14769  *   0 on success, -1 otherwise.
14770  */
14771 static int
14772 flow_dv_create_def_policy(struct rte_eth_dev *dev)
14773 {
14774         struct mlx5_priv *priv = dev->data->dev_private;
14775         int i;
14776
14777         /* Non-termination policy table. */
14778         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14779                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
14780                         continue;
14781                 if (__flow_dv_create_domain_def_policy(dev, i)) {
14782                         DRV_LOG(ERR,
14783                         "Failed to create default policy");
14784                         return -1;
14785                 }
14786         }
14787         return 0;
14788 }
14789
14790 /**
14791  * Create the needed meter tables.
14792  * Lock free, (mutex should be acquired by caller).
14793  *
14794  * @param[in] dev
14795  *   Pointer to Ethernet device.
14796  * @param[in] fm
14797  *   Meter information table.
14798  * @param[in] mtr_idx
14799  *   Meter index.
14800  * @param[in] domain_bitmap
14801  *   Domain bitmap.
14802  * @return
14803  *   0 on success, -1 otherwise.
14804  */
14805 static int
14806 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
14807                         struct mlx5_flow_meter_info *fm,
14808                         uint32_t mtr_idx,
14809                         uint8_t domain_bitmap)
14810 {
14811         struct mlx5_priv *priv = dev->data->dev_private;
14812         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14813         struct rte_flow_error error;
14814         struct mlx5_flow_tbl_data_entry *tbl_data;
14815         uint8_t egress, transfer;
14816         void *actions[METER_ACTIONS];
14817         int domain, ret, i;
14818         struct mlx5_flow_counter *cnt;
14819         struct mlx5_flow_dv_match_params value = {
14820                 .size = sizeof(value.buf) -
14821                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14822         };
14823         struct mlx5_flow_dv_match_params matcher_para = {
14824                 .size = sizeof(matcher_para.buf) -
14825                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14826         };
14827         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
14828                                                      0, &error);
14829         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
14830         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
14831         struct mlx5_cache_entry *entry;
14832         struct mlx5_flow_dv_matcher matcher = {
14833                 .mask = {
14834                         .size = sizeof(matcher.mask.buf) -
14835                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14836                 },
14837         };
14838         struct mlx5_flow_dv_matcher *drop_matcher;
14839         struct mlx5_flow_cb_ctx ctx = {
14840                 .error = &error,
14841                 .data = &matcher,
14842         };
14843
14844         if (!priv->mtr_en || mtr_id_reg_c < 0) {
14845                 rte_errno = ENOTSUP;
14846                 return -1;
14847         }
14848         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
14849                 if (!(domain_bitmap & (1 << domain)) ||
14850                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
14851                         continue;
14852                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14853                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14854                 /* Create the drop table with METER DROP level. */
14855                 if (!mtrmng->drop_tbl[domain]) {
14856                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
14857                                         MLX5_FLOW_TABLE_LEVEL_METER,
14858                                         egress, transfer, false, NULL, 0,
14859                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
14860                         if (!mtrmng->drop_tbl[domain]) {
14861                                 DRV_LOG(ERR, "Failed to create meter drop table.");
14862                                 goto policy_error;
14863                         }
14864                 }
14865                 /* Create default matcher in drop table. */
14866                 matcher.tbl = mtrmng->drop_tbl[domain],
14867                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14868                                 struct mlx5_flow_tbl_data_entry, tbl);
14869                 if (!mtrmng->def_matcher[domain]) {
14870                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14871                                        (enum modify_reg)mtr_id_reg_c,
14872                                        0, 0);
14873                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
14874                         matcher.crc = rte_raw_cksum
14875                                         ((const void *)matcher.mask.buf,
14876                                         matcher.mask.size);
14877                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14878                         if (!entry) {
14879                                 DRV_LOG(ERR, "Failed to register meter "
14880                                 "drop default matcher.");
14881                                 goto policy_error;
14882                         }
14883                         mtrmng->def_matcher[domain] = container_of(entry,
14884                         struct mlx5_flow_dv_matcher, entry);
14885                 }
14886                 /* Create default rule in drop table. */
14887                 if (!mtrmng->def_rule[domain]) {
14888                         i = 0;
14889                         actions[i++] = priv->sh->dr_drop_action;
14890                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14891                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
14892                         ret = mlx5_flow_os_create_flow
14893                                 (mtrmng->def_matcher[domain]->matcher_object,
14894                                 (void *)&value, i, actions,
14895                                 &mtrmng->def_rule[domain]);
14896                         if (ret) {
14897                                 DRV_LOG(ERR, "Failed to create meter "
14898                                 "default drop rule for drop table.");
14899                                 goto policy_error;
14900                         }
14901                 }
14902                 if (!fm->drop_cnt)
14903                         continue;
14904                 MLX5_ASSERT(mtrmng->max_mtr_bits);
14905                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
14906                         /* Create matchers for Drop. */
14907                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14908                                         (enum modify_reg)mtr_id_reg_c, 0,
14909                                         (mtr_id_mask << mtr_id_offset));
14910                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
14911                         matcher.crc = rte_raw_cksum
14912                                         ((const void *)matcher.mask.buf,
14913                                         matcher.mask.size);
14914                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14915                         if (!entry) {
14916                                 DRV_LOG(ERR,
14917                                 "Failed to register meter drop matcher.");
14918                                 goto policy_error;
14919                         }
14920                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
14921                                 container_of(entry, struct mlx5_flow_dv_matcher,
14922                                              entry);
14923                 }
14924                 drop_matcher =
14925                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
14926                 /* Create drop rule, matching meter_id only. */
14927                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14928                                 (enum modify_reg)mtr_id_reg_c,
14929                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
14930                 i = 0;
14931                 cnt = flow_dv_counter_get_by_idx(dev,
14932                                         fm->drop_cnt, NULL);
14933                 actions[i++] = cnt->action;
14934                 actions[i++] = priv->sh->dr_drop_action;
14935                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
14936                                                (void *)&value, i, actions,
14937                                                &fm->drop_rule[domain]);
14938                 if (ret) {
14939                         DRV_LOG(ERR, "Failed to create meter "
14940                                 "drop rule for drop table.");
14941                                 goto policy_error;
14942                 }
14943         }
14944         return 0;
14945 policy_error:
14946         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14947                 if (fm->drop_rule[i]) {
14948                         claim_zero(mlx5_flow_os_destroy_flow
14949                                 (fm->drop_rule[i]));
14950                         fm->drop_rule[i] = NULL;
14951                 }
14952         }
14953         return -1;
14954 }
14955
14956 /**
14957  * Find the policy table for prefix table with RSS.
14958  *
14959  * @param[in] dev
14960  *   Pointer to Ethernet device.
14961  * @param[in] mtr_policy
14962  *   Pointer to meter policy table.
14963  * @param[in] rss_desc
14964  *   Pointer to rss_desc
14965  * @return
14966  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
14967  */
14968 static struct mlx5_flow_meter_sub_policy *
14969 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
14970                 struct mlx5_flow_meter_policy *mtr_policy,
14971                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
14972 {
14973         struct mlx5_priv *priv = dev->data->dev_private;
14974         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
14975         uint32_t sub_policy_idx = 0;
14976         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
14977         uint32_t i, j;
14978         struct mlx5_hrxq *hrxq;
14979         struct mlx5_flow_handle dh;
14980         struct mlx5_meter_policy_action_container *act_cnt;
14981         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
14982         uint16_t sub_policy_num;
14983
14984         rte_spinlock_lock(&mtr_policy->sl);
14985         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14986                 if (!rss_desc[i])
14987                         continue;
14988                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
14989                 if (!hrxq_idx[i]) {
14990                         rte_spinlock_unlock(&mtr_policy->sl);
14991                         return NULL;
14992                 }
14993         }
14994         sub_policy_num = (mtr_policy->sub_policy_num >>
14995                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
14996                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14997         for (i = 0; i < sub_policy_num;
14998                 i++) {
14999                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
15000                         if (rss_desc[j] &&
15001                                 hrxq_idx[j] !=
15002                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
15003                                 break;
15004                 }
15005                 if (j >= MLX5_MTR_RTE_COLORS) {
15006                         /*
15007                          * Found the sub policy table with
15008                          * the same queue per color
15009                          */
15010                         rte_spinlock_unlock(&mtr_policy->sl);
15011                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
15012                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
15013                         return mtr_policy->sub_policys[domain][i];
15014                 }
15015         }
15016         /* Create sub policy. */
15017         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
15018                 /* Reuse the first dummy sub_policy*/
15019                 sub_policy = mtr_policy->sub_policys[domain][0];
15020                 sub_policy_idx = sub_policy->idx;
15021         } else {
15022                 sub_policy = mlx5_ipool_zmalloc
15023                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15024                                 &sub_policy_idx);
15025                 if (!sub_policy ||
15026                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
15027                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
15028                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
15029                         goto rss_sub_policy_error;
15030                 }
15031                 sub_policy->idx = sub_policy_idx;
15032                 sub_policy->main_policy = mtr_policy;
15033         }
15034         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15035                 if (!rss_desc[i])
15036                         continue;
15037                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
15038                 /*
15039                  * Overwrite the last action from
15040                  * RSS action to Queue action.
15041                  */
15042                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
15043                               hrxq_idx[i]);
15044                 if (!hrxq) {
15045                         DRV_LOG(ERR, "Failed to create policy hrxq");
15046                         goto rss_sub_policy_error;
15047                 }
15048                 act_cnt = &mtr_policy->act_cnt[i];
15049                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
15050                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15051                         if (act_cnt->rix_mark)
15052                                 dh.mark = 1;
15053                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
15054                         dh.rix_hrxq = hrxq_idx[i];
15055                         flow_drv_rxq_flags_set(dev, &dh);
15056                 }
15057         }
15058         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15059                 sub_policy, domain)) {
15060                 DRV_LOG(ERR, "Failed to create policy "
15061                         "rules per domain.");
15062                 goto rss_sub_policy_error;
15063         }
15064         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
15065                 i = (mtr_policy->sub_policy_num >>
15066                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15067                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15068                 mtr_policy->sub_policys[domain][i] = sub_policy;
15069                 i++;
15070                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
15071                         goto rss_sub_policy_error;
15072                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
15073                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
15074                 mtr_policy->sub_policy_num |=
15075                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
15076                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
15077         }
15078         rte_spinlock_unlock(&mtr_policy->sl);
15079         return sub_policy;
15080 rss_sub_policy_error:
15081         if (sub_policy) {
15082                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
15083                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
15084                         i = (mtr_policy->sub_policy_num >>
15085                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15086                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15087                         mtr_policy->sub_policys[domain][i] = NULL;
15088                         mlx5_ipool_free
15089                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15090                                         sub_policy->idx);
15091                 }
15092         }
15093         if (sub_policy_idx)
15094                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15095                         sub_policy_idx);
15096         rte_spinlock_unlock(&mtr_policy->sl);
15097         return NULL;
15098 }
15099
15100 /**
15101  * Validate the batch counter support in root table.
15102  *
15103  * Create a simple flow with invalid counter and drop action on root table to
15104  * validate if batch counter with offset on root table is supported or not.
15105  *
15106  * @param[in] dev
15107  *   Pointer to rte_eth_dev structure.
15108  *
15109  * @return
15110  *   0 on success, a negative errno value otherwise and rte_errno is set.
15111  */
15112 int
15113 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
15114 {
15115         struct mlx5_priv *priv = dev->data->dev_private;
15116         struct mlx5_dev_ctx_shared *sh = priv->sh;
15117         struct mlx5_flow_dv_match_params mask = {
15118                 .size = sizeof(mask.buf),
15119         };
15120         struct mlx5_flow_dv_match_params value = {
15121                 .size = sizeof(value.buf),
15122         };
15123         struct mlx5dv_flow_matcher_attr dv_attr = {
15124                 .type = IBV_FLOW_ATTR_NORMAL,
15125                 .priority = 0,
15126                 .match_criteria_enable = 0,
15127                 .match_mask = (void *)&mask,
15128         };
15129         void *actions[2] = { 0 };
15130         struct mlx5_flow_tbl_resource *tbl = NULL;
15131         struct mlx5_devx_obj *dcs = NULL;
15132         void *matcher = NULL;
15133         void *flow = NULL;
15134         int ret = -1;
15135
15136         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
15137                                         0, 0, 0, NULL);
15138         if (!tbl)
15139                 goto err;
15140         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
15141         if (!dcs)
15142                 goto err;
15143         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
15144                                                     &actions[0]);
15145         if (ret)
15146                 goto err;
15147         actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
15148                                           priv->drop_queue.hrxq->action;
15149         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
15150         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
15151                                                &matcher);
15152         if (ret)
15153                 goto err;
15154         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
15155                                        actions, &flow);
15156 err:
15157         /*
15158          * If batch counter with offset is not supported, the driver will not
15159          * validate the invalid offset value, flow create should success.
15160          * In this case, it means batch counter is not supported in root table.
15161          *
15162          * Otherwise, if flow create is failed, counter offset is supported.
15163          */
15164         if (flow) {
15165                 DRV_LOG(INFO, "Batch counter is not supported in root "
15166                               "table. Switch to fallback mode.");
15167                 rte_errno = ENOTSUP;
15168                 ret = -rte_errno;
15169                 claim_zero(mlx5_flow_os_destroy_flow(flow));
15170         } else {
15171                 /* Check matcher to make sure validate fail at flow create. */
15172                 if (!matcher || (matcher && errno != EINVAL))
15173                         DRV_LOG(ERR, "Unexpected error in counter offset "
15174                                      "support detection");
15175                 ret = 0;
15176         }
15177         if (actions[0])
15178                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
15179         if (matcher)
15180                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
15181         if (tbl)
15182                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
15183         if (dcs)
15184                 claim_zero(mlx5_devx_cmd_destroy(dcs));
15185         return ret;
15186 }
15187
15188 /**
15189  * Query a devx counter.
15190  *
15191  * @param[in] dev
15192  *   Pointer to the Ethernet device structure.
15193  * @param[in] cnt
15194  *   Index to the flow counter.
15195  * @param[in] clear
15196  *   Set to clear the counter statistics.
15197  * @param[out] pkts
15198  *   The statistics value of packets.
15199  * @param[out] bytes
15200  *   The statistics value of bytes.
15201  *
15202  * @return
15203  *   0 on success, otherwise return -1.
15204  */
15205 static int
15206 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
15207                       uint64_t *pkts, uint64_t *bytes)
15208 {
15209         struct mlx5_priv *priv = dev->data->dev_private;
15210         struct mlx5_flow_counter *cnt;
15211         uint64_t inn_pkts, inn_bytes;
15212         int ret;
15213
15214         if (!priv->config.devx)
15215                 return -1;
15216
15217         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
15218         if (ret)
15219                 return -1;
15220         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
15221         *pkts = inn_pkts - cnt->hits;
15222         *bytes = inn_bytes - cnt->bytes;
15223         if (clear) {
15224                 cnt->hits = inn_pkts;
15225                 cnt->bytes = inn_bytes;
15226         }
15227         return 0;
15228 }
15229
15230 /**
15231  * Get aged-out flows.
15232  *
15233  * @param[in] dev
15234  *   Pointer to the Ethernet device structure.
15235  * @param[in] context
15236  *   The address of an array of pointers to the aged-out flows contexts.
15237  * @param[in] nb_contexts
15238  *   The length of context array pointers.
15239  * @param[out] error
15240  *   Perform verbose error reporting if not NULL. Initialized in case of
15241  *   error only.
15242  *
15243  * @return
15244  *   how many contexts get in success, otherwise negative errno value.
15245  *   if nb_contexts is 0, return the amount of all aged contexts.
15246  *   if nb_contexts is not 0 , return the amount of aged flows reported
15247  *   in the context array.
15248  * @note: only stub for now
15249  */
15250 static int
15251 flow_get_aged_flows(struct rte_eth_dev *dev,
15252                     void **context,
15253                     uint32_t nb_contexts,
15254                     struct rte_flow_error *error)
15255 {
15256         struct mlx5_priv *priv = dev->data->dev_private;
15257         struct mlx5_age_info *age_info;
15258         struct mlx5_age_param *age_param;
15259         struct mlx5_flow_counter *counter;
15260         struct mlx5_aso_age_action *act;
15261         int nb_flows = 0;
15262
15263         if (nb_contexts && !context)
15264                 return rte_flow_error_set(error, EINVAL,
15265                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15266                                           NULL, "empty context");
15267         age_info = GET_PORT_AGE_INFO(priv);
15268         rte_spinlock_lock(&age_info->aged_sl);
15269         LIST_FOREACH(act, &age_info->aged_aso, next) {
15270                 nb_flows++;
15271                 if (nb_contexts) {
15272                         context[nb_flows - 1] =
15273                                                 act->age_params.context;
15274                         if (!(--nb_contexts))
15275                                 break;
15276                 }
15277         }
15278         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
15279                 nb_flows++;
15280                 if (nb_contexts) {
15281                         age_param = MLX5_CNT_TO_AGE(counter);
15282                         context[nb_flows - 1] = age_param->context;
15283                         if (!(--nb_contexts))
15284                                 break;
15285                 }
15286         }
15287         rte_spinlock_unlock(&age_info->aged_sl);
15288         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
15289         return nb_flows;
15290 }
15291
15292 /*
15293  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
15294  */
15295 static uint32_t
15296 flow_dv_counter_allocate(struct rte_eth_dev *dev)
15297 {
15298         return flow_dv_counter_alloc(dev, 0);
15299 }
15300
15301 /**
15302  * Validate indirect action.
15303  * Dispatcher for action type specific validation.
15304  *
15305  * @param[in] dev
15306  *   Pointer to the Ethernet device structure.
15307  * @param[in] conf
15308  *   Indirect action configuration.
15309  * @param[in] action
15310  *   The indirect action object to validate.
15311  * @param[out] error
15312  *   Perform verbose error reporting if not NULL. Initialized in case of
15313  *   error only.
15314  *
15315  * @return
15316  *   0 on success, otherwise negative errno value.
15317  */
15318 static int
15319 flow_dv_action_validate(struct rte_eth_dev *dev,
15320                         const struct rte_flow_indir_action_conf *conf,
15321                         const struct rte_flow_action *action,
15322                         struct rte_flow_error *err)
15323 {
15324         struct mlx5_priv *priv = dev->data->dev_private;
15325
15326         RTE_SET_USED(conf);
15327         switch (action->type) {
15328         case RTE_FLOW_ACTION_TYPE_RSS:
15329                 /*
15330                  * priv->obj_ops is set according to driver capabilities.
15331                  * When DevX capabilities are
15332                  * sufficient, it is set to devx_obj_ops.
15333                  * Otherwise, it is set to ibv_obj_ops.
15334                  * ibv_obj_ops doesn't support ind_table_modify operation.
15335                  * In this case the indirect RSS action can't be used.
15336                  */
15337                 if (priv->obj_ops.ind_table_modify == NULL)
15338                         return rte_flow_error_set
15339                                         (err, ENOTSUP,
15340                                          RTE_FLOW_ERROR_TYPE_ACTION,
15341                                          NULL,
15342                                          "Indirect RSS action not supported");
15343                 return mlx5_validate_action_rss(dev, action, err);
15344         case RTE_FLOW_ACTION_TYPE_AGE:
15345                 if (!priv->sh->aso_age_mng)
15346                         return rte_flow_error_set(err, ENOTSUP,
15347                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15348                                                 NULL,
15349                                                 "Indirect age action not supported");
15350                 return flow_dv_validate_action_age(0, action, dev, err);
15351         case RTE_FLOW_ACTION_TYPE_COUNT:
15352                 /*
15353                  * There are two mechanisms to share the action count.
15354                  * The old mechanism uses the shared field to share, while the
15355                  * new mechanism uses the indirect action API.
15356                  * This validation comes to make sure that the two mechanisms
15357                  * are not combined.
15358                  */
15359                 if (is_shared_action_count(action))
15360                         return rte_flow_error_set(err, ENOTSUP,
15361                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15362                                                   NULL,
15363                                                   "Mix shared and indirect counter is not supported");
15364                 return flow_dv_validate_action_count(dev, true, 0, err);
15365         default:
15366                 return rte_flow_error_set(err, ENOTSUP,
15367                                           RTE_FLOW_ERROR_TYPE_ACTION,
15368                                           NULL,
15369                                           "action type not supported");
15370         }
15371 }
15372
15373 /**
15374  * Validate meter policy actions.
15375  * Dispatcher for action type specific validation.
15376  *
15377  * @param[in] dev
15378  *   Pointer to the Ethernet device structure.
15379  * @param[in] action
15380  *   The meter policy action object to validate.
15381  * @param[in] attr
15382  *   Attributes of flow to determine steering domain.
15383  * @param[out] error
15384  *   Perform verbose error reporting if not NULL. Initialized in case of
15385  *   error only.
15386  *
15387  * @return
15388  *   0 on success, otherwise negative errno value.
15389  */
15390 static int
15391 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
15392                         const struct rte_flow_action *actions[RTE_COLORS],
15393                         struct rte_flow_attr *attr,
15394                         bool *is_rss,
15395                         uint8_t *domain_bitmap,
15396                         bool *is_def_policy,
15397                         struct rte_mtr_error *error)
15398 {
15399         struct mlx5_priv *priv = dev->data->dev_private;
15400         struct mlx5_dev_config *dev_conf = &priv->config;
15401         const struct rte_flow_action *act;
15402         uint64_t action_flags = 0;
15403         int actions_n;
15404         int i, ret;
15405         struct rte_flow_error flow_err;
15406         uint8_t domain_color[RTE_COLORS] = {0};
15407         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
15408
15409         if (!priv->config.dv_esw_en)
15410                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
15411         *domain_bitmap = def_domain;
15412         if (actions[RTE_COLOR_YELLOW] &&
15413                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
15414                 return -rte_mtr_error_set(error, ENOTSUP,
15415                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15416                                 NULL,
15417                                 "Yellow color does not support any action.");
15418         if (actions[RTE_COLOR_YELLOW] &&
15419                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
15420                 return -rte_mtr_error_set(error, ENOTSUP,
15421                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15422                                 NULL, "Red color only supports drop action.");
15423         /*
15424          * Check default policy actions:
15425          * Green/Yellow: no action, Red: drop action
15426          */
15427         if ((!actions[RTE_COLOR_GREEN] ||
15428                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
15429                 *is_def_policy = true;
15430                 return 0;
15431         }
15432         flow_err.message = NULL;
15433         for (i = 0; i < RTE_COLORS; i++) {
15434                 act = actions[i];
15435                 for (action_flags = 0, actions_n = 0;
15436                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15437                         act++) {
15438                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
15439                                 return -rte_mtr_error_set(error, ENOTSUP,
15440                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15441                                           NULL, "too many actions");
15442                         switch (act->type) {
15443                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15444                                 if (!priv->config.dv_esw_en)
15445                                         return -rte_mtr_error_set(error,
15446                                         ENOTSUP,
15447                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15448                                         NULL, "PORT action validate check"
15449                                         " fail for ESW disable");
15450                                 ret = flow_dv_validate_action_port_id(dev,
15451                                                 action_flags,
15452                                                 act, attr, &flow_err);
15453                                 if (ret)
15454                                         return -rte_mtr_error_set(error,
15455                                         ENOTSUP,
15456                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15457                                         NULL, flow_err.message ?
15458                                         flow_err.message :
15459                                         "PORT action validate check fail");
15460                                 ++actions_n;
15461                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15462                                 break;
15463                         case RTE_FLOW_ACTION_TYPE_MARK:
15464                                 ret = flow_dv_validate_action_mark(dev, act,
15465                                                            action_flags,
15466                                                            attr, &flow_err);
15467                                 if (ret < 0)
15468                                         return -rte_mtr_error_set(error,
15469                                         ENOTSUP,
15470                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15471                                         NULL, flow_err.message ?
15472                                         flow_err.message :
15473                                         "Mark action validate check fail");
15474                                 if (dev_conf->dv_xmeta_en !=
15475                                         MLX5_XMETA_MODE_LEGACY)
15476                                         return -rte_mtr_error_set(error,
15477                                         ENOTSUP,
15478                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15479                                         NULL, "Extend MARK action is "
15480                                         "not supported. Please try use "
15481                                         "default policy for meter.");
15482                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15483                                 ++actions_n;
15484                                 break;
15485                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15486                                 ret = flow_dv_validate_action_set_tag(dev,
15487                                                         act, action_flags,
15488                                                         attr, &flow_err);
15489                                 if (ret)
15490                                         return -rte_mtr_error_set(error,
15491                                         ENOTSUP,
15492                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15493                                         NULL, flow_err.message ?
15494                                         flow_err.message :
15495                                         "Set tag action validate check fail");
15496                                 /*
15497                                  * Count all modify-header actions
15498                                  * as one action.
15499                                  */
15500                                 if (!(action_flags &
15501                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
15502                                         ++actions_n;
15503                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15504                                 break;
15505                         case RTE_FLOW_ACTION_TYPE_DROP:
15506                                 ret = mlx5_flow_validate_action_drop
15507                                         (action_flags,
15508                                         attr, &flow_err);
15509                                 if (ret < 0)
15510                                         return -rte_mtr_error_set(error,
15511                                         ENOTSUP,
15512                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15513                                         NULL, flow_err.message ?
15514                                         flow_err.message :
15515                                         "Drop action validate check fail");
15516                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15517                                 ++actions_n;
15518                                 break;
15519                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15520                                 /*
15521                                  * Check whether extensive
15522                                  * metadata feature is engaged.
15523                                  */
15524                                 if (dev_conf->dv_flow_en &&
15525                                         (dev_conf->dv_xmeta_en !=
15526                                         MLX5_XMETA_MODE_LEGACY) &&
15527                                         mlx5_flow_ext_mreg_supported(dev))
15528                                         return -rte_mtr_error_set(error,
15529                                           ENOTSUP,
15530                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15531                                           NULL, "Queue action with meta "
15532                                           "is not supported. Please try use "
15533                                           "default policy for meter.");
15534                                 ret = mlx5_flow_validate_action_queue(act,
15535                                                         action_flags, dev,
15536                                                         attr, &flow_err);
15537                                 if (ret < 0)
15538                                         return -rte_mtr_error_set(error,
15539                                           ENOTSUP,
15540                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15541                                           NULL, flow_err.message ?
15542                                           flow_err.message :
15543                                           "Queue action validate check fail");
15544                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15545                                 ++actions_n;
15546                                 break;
15547                         case RTE_FLOW_ACTION_TYPE_RSS:
15548                                 if (dev_conf->dv_flow_en &&
15549                                         (dev_conf->dv_xmeta_en !=
15550                                         MLX5_XMETA_MODE_LEGACY) &&
15551                                         mlx5_flow_ext_mreg_supported(dev))
15552                                         return -rte_mtr_error_set(error,
15553                                           ENOTSUP,
15554                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15555                                           NULL, "RSS action with meta "
15556                                           "is not supported. Please try use "
15557                                           "default policy for meter.");
15558                                 ret = mlx5_validate_action_rss(dev, act,
15559                                                 &flow_err);
15560                                 if (ret < 0)
15561                                         return -rte_mtr_error_set(error,
15562                                           ENOTSUP,
15563                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15564                                           NULL, flow_err.message ?
15565                                           flow_err.message :
15566                                           "RSS action validate check fail");
15567                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15568                                 ++actions_n;
15569                                 *is_rss = true;
15570                                 break;
15571                         case RTE_FLOW_ACTION_TYPE_JUMP:
15572                                 ret = flow_dv_validate_action_jump(dev,
15573                                         NULL, act, action_flags,
15574                                         attr, true, &flow_err);
15575                                 if (ret)
15576                                         return -rte_mtr_error_set(error,
15577                                           ENOTSUP,
15578                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15579                                           NULL, flow_err.message ?
15580                                           flow_err.message :
15581                                           "Jump action validate check fail");
15582                                 ++actions_n;
15583                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15584                                 break;
15585                         default:
15586                                 return -rte_mtr_error_set(error, ENOTSUP,
15587                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15588                                         NULL,
15589                                         "Doesn't support optional action");
15590                         }
15591                 }
15592                 /* Yellow is not supported, just skip. */
15593                 if (i == RTE_COLOR_YELLOW)
15594                         continue;
15595                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
15596                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
15597                 else if ((action_flags &
15598                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
15599                         (action_flags & MLX5_FLOW_ACTION_MARK))
15600                         /*
15601                          * Only support MLX5_XMETA_MODE_LEGACY
15602                          * so MARK action only in ingress domain.
15603                          */
15604                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
15605                 else
15606                         domain_color[i] = def_domain;
15607                 /*
15608                  * Validate the drop action mutual exclusion
15609                  * with other actions. Drop action is mutually-exclusive
15610                  * with any other action, except for Count action.
15611                  */
15612                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
15613                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
15614                         return -rte_mtr_error_set(error, ENOTSUP,
15615                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15616                                 NULL, "Drop action is mutually-exclusive "
15617                                 "with any other action");
15618                 }
15619                 /* Eswitch has few restrictions on using items and actions */
15620                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
15621                         if (!mlx5_flow_ext_mreg_supported(dev) &&
15622                                 action_flags & MLX5_FLOW_ACTION_MARK)
15623                                 return -rte_mtr_error_set(error, ENOTSUP,
15624                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15625                                         NULL, "unsupported action MARK");
15626                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
15627                                 return -rte_mtr_error_set(error, ENOTSUP,
15628                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15629                                         NULL, "unsupported action QUEUE");
15630                         if (action_flags & MLX5_FLOW_ACTION_RSS)
15631                                 return -rte_mtr_error_set(error, ENOTSUP,
15632                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15633                                         NULL, "unsupported action RSS");
15634                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
15635                                 return -rte_mtr_error_set(error, ENOTSUP,
15636                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15637                                         NULL, "no fate action is found");
15638                 } else {
15639                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
15640                                 (domain_color[i] &
15641                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
15642                                 if ((domain_color[i] &
15643                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
15644                                         domain_color[i] =
15645                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
15646                                 else
15647                                         return -rte_mtr_error_set(error,
15648                                         ENOTSUP,
15649                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15650                                         NULL, "no fate action is found");
15651                         }
15652                 }
15653                 if (domain_color[i] != def_domain)
15654                         *domain_bitmap = domain_color[i];
15655         }
15656         return 0;
15657 }
15658
15659 static int
15660 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
15661 {
15662         struct mlx5_priv *priv = dev->data->dev_private;
15663         int ret = 0;
15664
15665         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
15666                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
15667                                                 flags);
15668                 if (ret != 0)
15669                         return ret;
15670         }
15671         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
15672                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
15673                 if (ret != 0)
15674                         return ret;
15675         }
15676         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
15677                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
15678                 if (ret != 0)
15679                         return ret;
15680         }
15681         return 0;
15682 }
15683
15684 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
15685         .validate = flow_dv_validate,
15686         .prepare = flow_dv_prepare,
15687         .translate = flow_dv_translate,
15688         .apply = flow_dv_apply,
15689         .remove = flow_dv_remove,
15690         .destroy = flow_dv_destroy,
15691         .query = flow_dv_query,
15692         .create_mtr_tbls = flow_dv_create_mtr_tbls,
15693         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
15694         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
15695         .create_meter = flow_dv_mtr_alloc,
15696         .free_meter = flow_dv_aso_mtr_release_to_pool,
15697         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
15698         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
15699         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
15700         .create_policy_rules = flow_dv_create_policy_rules,
15701         .destroy_policy_rules = flow_dv_destroy_policy_rules,
15702         .create_def_policy = flow_dv_create_def_policy,
15703         .destroy_def_policy = flow_dv_destroy_def_policy,
15704         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
15705         .counter_alloc = flow_dv_counter_allocate,
15706         .counter_free = flow_dv_counter_free,
15707         .counter_query = flow_dv_counter_query,
15708         .get_aged_flows = flow_get_aged_flows,
15709         .action_validate = flow_dv_action_validate,
15710         .action_create = flow_dv_action_create,
15711         .action_destroy = flow_dv_action_destroy,
15712         .action_update = flow_dv_action_update,
15713         .action_query = flow_dv_action_query,
15714         .sync_domain = flow_dv_sync_domain,
15715 };
15716
15717 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
15718