net/mlx5: use aging by counter when counter exists
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static void
272 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
273                           uint8_t next_protocol, uint64_t *item_flags,
274                           int *tunnel)
275 {
276         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
277                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
278         if (next_protocol == IPPROTO_IPIP) {
279                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
280                 *tunnel = 1;
281         }
282         if (next_protocol == IPPROTO_IPV6) {
283                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
284                 *tunnel = 1;
285         }
286 }
287
288 /* Update VLAN's VID/PCP based on input rte_flow_action.
289  *
290  * @param[in] action
291  *   Pointer to struct rte_flow_action.
292  * @param[out] vlan
293  *   Pointer to struct rte_vlan_hdr.
294  */
295 static void
296 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
297                          struct rte_vlan_hdr *vlan)
298 {
299         uint16_t vlan_tci;
300         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
301                 vlan_tci =
302                     ((const struct rte_flow_action_of_set_vlan_pcp *)
303                                                action->conf)->vlan_pcp;
304                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
306                 vlan->vlan_tci |= vlan_tci;
307         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
308                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
309                 vlan->vlan_tci |= rte_be_to_cpu_16
310                     (((const struct rte_flow_action_of_set_vlan_vid *)
311                                              action->conf)->vlan_vid);
312         }
313 }
314
315 /**
316  * Fetch 1, 2, 3 or 4 byte field from the byte array
317  * and return as unsigned integer in host-endian format.
318  *
319  * @param[in] data
320  *   Pointer to data array.
321  * @param[in] size
322  *   Size of field to extract.
323  *
324  * @return
325  *   converted field in host endian format.
326  */
327 static inline uint32_t
328 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
329 {
330         uint32_t ret;
331
332         switch (size) {
333         case 1:
334                 ret = *data;
335                 break;
336         case 2:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 break;
339         case 3:
340                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
341                 ret = (ret << 8) | *(data + sizeof(uint16_t));
342                 break;
343         case 4:
344                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
345                 break;
346         default:
347                 MLX5_ASSERT(false);
348                 ret = 0;
349                 break;
350         }
351         return ret;
352 }
353
354 /**
355  * Convert modify-header action to DV specification.
356  *
357  * Data length of each action is determined by provided field description
358  * and the item mask. Data bit offset and width of each action is determined
359  * by provided item mask.
360  *
361  * @param[in] item
362  *   Pointer to item specification.
363  * @param[in] field
364  *   Pointer to field modification information.
365  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
366  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
367  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
368  * @param[in] dcopy
369  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
370  *   Negative offset value sets the same offset as source offset.
371  *   size field is ignored, value is taken from source field.
372  * @param[in,out] resource
373  *   Pointer to the modify-header resource.
374  * @param[in] type
375  *   Type of modification.
376  * @param[out] error
377  *   Pointer to the error structure.
378  *
379  * @return
380  *   0 on success, a negative errno value otherwise and rte_errno is set.
381  */
382 static int
383 flow_dv_convert_modify_action(struct rte_flow_item *item,
384                               struct field_modify_info *field,
385                               struct field_modify_info *dcopy,
386                               struct mlx5_flow_dv_modify_hdr_resource *resource,
387                               uint32_t type, struct rte_flow_error *error)
388 {
389         uint32_t i = resource->actions_num;
390         struct mlx5_modification_cmd *actions = resource->actions;
391
392         /*
393          * The item and mask are provided in big-endian format.
394          * The fields should be presented as in big-endian format either.
395          * Mask must be always present, it defines the actual field width.
396          */
397         MLX5_ASSERT(item->mask);
398         MLX5_ASSERT(field->size);
399         do {
400                 unsigned int size_b;
401                 unsigned int off_b;
402                 uint32_t mask;
403                 uint32_t data;
404
405                 if (i >= MLX5_MAX_MODIFY_NUM)
406                         return rte_flow_error_set(error, EINVAL,
407                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
408                                  "too many items to modify");
409                 /* Fetch variable byte size mask from the array. */
410                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
411                                            field->offset, field->size);
412                 if (!mask) {
413                         ++field;
414                         continue;
415                 }
416                 /* Deduce actual data width in bits from mask value. */
417                 off_b = rte_bsf32(mask);
418                 size_b = sizeof(uint32_t) * CHAR_BIT -
419                          off_b - __builtin_clz(mask);
420                 MLX5_ASSERT(size_b);
421                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
422                 actions[i] = (struct mlx5_modification_cmd) {
423                         .action_type = type,
424                         .field = field->id,
425                         .offset = off_b,
426                         .length = size_b,
427                 };
428                 /* Convert entire record to expected big-endian format. */
429                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
430                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
431                         MLX5_ASSERT(dcopy);
432                         actions[i].dst_field = dcopy->id;
433                         actions[i].dst_offset =
434                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
435                         /* Convert entire record to big-endian format. */
436                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
437                         ++dcopy;
438                 } else {
439                         MLX5_ASSERT(item->spec);
440                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
441                                                    field->offset, field->size);
442                         /* Shift out the trailing masked bits from data. */
443                         data = (data & mask) >> off_b;
444                         actions[i].data1 = rte_cpu_to_be_32(data);
445                 }
446                 ++i;
447                 ++field;
448         } while (field->size);
449         if (resource->actions_num == i)
450                 return rte_flow_error_set(error, EINVAL,
451                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
452                                           "invalid modification flow item");
453         resource->actions_num = i;
454         return 0;
455 }
456
457 /**
458  * Convert modify-header set IPv4 address action to DV specification.
459  *
460  * @param[in,out] resource
461  *   Pointer to the modify-header resource.
462  * @param[in] action
463  *   Pointer to action specification.
464  * @param[out] error
465  *   Pointer to the error structure.
466  *
467  * @return
468  *   0 on success, a negative errno value otherwise and rte_errno is set.
469  */
470 static int
471 flow_dv_convert_action_modify_ipv4
472                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
473                          const struct rte_flow_action *action,
474                          struct rte_flow_error *error)
475 {
476         const struct rte_flow_action_set_ipv4 *conf =
477                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
478         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
479         struct rte_flow_item_ipv4 ipv4;
480         struct rte_flow_item_ipv4 ipv4_mask;
481
482         memset(&ipv4, 0, sizeof(ipv4));
483         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
484         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
485                 ipv4.hdr.src_addr = conf->ipv4_addr;
486                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
487         } else {
488                 ipv4.hdr.dst_addr = conf->ipv4_addr;
489                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
490         }
491         item.spec = &ipv4;
492         item.mask = &ipv4_mask;
493         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
494                                              MLX5_MODIFICATION_TYPE_SET, error);
495 }
496
497 /**
498  * Convert modify-header set IPv6 address action to DV specification.
499  *
500  * @param[in,out] resource
501  *   Pointer to the modify-header resource.
502  * @param[in] action
503  *   Pointer to action specification.
504  * @param[out] error
505  *   Pointer to the error structure.
506  *
507  * @return
508  *   0 on success, a negative errno value otherwise and rte_errno is set.
509  */
510 static int
511 flow_dv_convert_action_modify_ipv6
512                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
513                          const struct rte_flow_action *action,
514                          struct rte_flow_error *error)
515 {
516         const struct rte_flow_action_set_ipv6 *conf =
517                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
518         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
519         struct rte_flow_item_ipv6 ipv6;
520         struct rte_flow_item_ipv6 ipv6_mask;
521
522         memset(&ipv6, 0, sizeof(ipv6));
523         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
524         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
525                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
526                        sizeof(ipv6.hdr.src_addr));
527                 memcpy(&ipv6_mask.hdr.src_addr,
528                        &rte_flow_item_ipv6_mask.hdr.src_addr,
529                        sizeof(ipv6.hdr.src_addr));
530         } else {
531                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
532                        sizeof(ipv6.hdr.dst_addr));
533                 memcpy(&ipv6_mask.hdr.dst_addr,
534                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
535                        sizeof(ipv6.hdr.dst_addr));
536         }
537         item.spec = &ipv6;
538         item.mask = &ipv6_mask;
539         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
540                                              MLX5_MODIFICATION_TYPE_SET, error);
541 }
542
543 /**
544  * Convert modify-header set MAC address action to DV specification.
545  *
546  * @param[in,out] resource
547  *   Pointer to the modify-header resource.
548  * @param[in] action
549  *   Pointer to action specification.
550  * @param[out] error
551  *   Pointer to the error structure.
552  *
553  * @return
554  *   0 on success, a negative errno value otherwise and rte_errno is set.
555  */
556 static int
557 flow_dv_convert_action_modify_mac
558                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
559                          const struct rte_flow_action *action,
560                          struct rte_flow_error *error)
561 {
562         const struct rte_flow_action_set_mac *conf =
563                 (const struct rte_flow_action_set_mac *)(action->conf);
564         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
565         struct rte_flow_item_eth eth;
566         struct rte_flow_item_eth eth_mask;
567
568         memset(&eth, 0, sizeof(eth));
569         memset(&eth_mask, 0, sizeof(eth_mask));
570         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
571                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
572                        sizeof(eth.src.addr_bytes));
573                 memcpy(&eth_mask.src.addr_bytes,
574                        &rte_flow_item_eth_mask.src.addr_bytes,
575                        sizeof(eth_mask.src.addr_bytes));
576         } else {
577                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
578                        sizeof(eth.dst.addr_bytes));
579                 memcpy(&eth_mask.dst.addr_bytes,
580                        &rte_flow_item_eth_mask.dst.addr_bytes,
581                        sizeof(eth_mask.dst.addr_bytes));
582         }
583         item.spec = &eth;
584         item.mask = &eth_mask;
585         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
586                                              MLX5_MODIFICATION_TYPE_SET, error);
587 }
588
589 /**
590  * Convert modify-header set VLAN VID action to DV specification.
591  *
592  * @param[in,out] resource
593  *   Pointer to the modify-header resource.
594  * @param[in] action
595  *   Pointer to action specification.
596  * @param[out] error
597  *   Pointer to the error structure.
598  *
599  * @return
600  *   0 on success, a negative errno value otherwise and rte_errno is set.
601  */
602 static int
603 flow_dv_convert_action_modify_vlan_vid
604                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
605                          const struct rte_flow_action *action,
606                          struct rte_flow_error *error)
607 {
608         const struct rte_flow_action_of_set_vlan_vid *conf =
609                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
610         int i = resource->actions_num;
611         struct mlx5_modification_cmd *actions = resource->actions;
612         struct field_modify_info *field = modify_vlan_out_first_vid;
613
614         if (i >= MLX5_MAX_MODIFY_NUM)
615                 return rte_flow_error_set(error, EINVAL,
616                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
617                          "too many items to modify");
618         actions[i] = (struct mlx5_modification_cmd) {
619                 .action_type = MLX5_MODIFICATION_TYPE_SET,
620                 .field = field->id,
621                 .length = field->size,
622                 .offset = field->offset,
623         };
624         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
625         actions[i].data1 = conf->vlan_vid;
626         actions[i].data1 = actions[i].data1 << 16;
627         resource->actions_num = ++i;
628         return 0;
629 }
630
631 /**
632  * Convert modify-header set TP action to DV specification.
633  *
634  * @param[in,out] resource
635  *   Pointer to the modify-header resource.
636  * @param[in] action
637  *   Pointer to action specification.
638  * @param[in] items
639  *   Pointer to rte_flow_item objects list.
640  * @param[in] attr
641  *   Pointer to flow attributes structure.
642  * @param[in] dev_flow
643  *   Pointer to the sub flow.
644  * @param[in] tunnel_decap
645  *   Whether action is after tunnel decapsulation.
646  * @param[out] error
647  *   Pointer to the error structure.
648  *
649  * @return
650  *   0 on success, a negative errno value otherwise and rte_errno is set.
651  */
652 static int
653 flow_dv_convert_action_modify_tp
654                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
655                          const struct rte_flow_action *action,
656                          const struct rte_flow_item *items,
657                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
658                          bool tunnel_decap, struct rte_flow_error *error)
659 {
660         const struct rte_flow_action_set_tp *conf =
661                 (const struct rte_flow_action_set_tp *)(action->conf);
662         struct rte_flow_item item;
663         struct rte_flow_item_udp udp;
664         struct rte_flow_item_udp udp_mask;
665         struct rte_flow_item_tcp tcp;
666         struct rte_flow_item_tcp tcp_mask;
667         struct field_modify_info *field;
668
669         if (!attr->valid)
670                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
671         if (attr->udp) {
672                 memset(&udp, 0, sizeof(udp));
673                 memset(&udp_mask, 0, sizeof(udp_mask));
674                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
675                         udp.hdr.src_port = conf->port;
676                         udp_mask.hdr.src_port =
677                                         rte_flow_item_udp_mask.hdr.src_port;
678                 } else {
679                         udp.hdr.dst_port = conf->port;
680                         udp_mask.hdr.dst_port =
681                                         rte_flow_item_udp_mask.hdr.dst_port;
682                 }
683                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
684                 item.spec = &udp;
685                 item.mask = &udp_mask;
686                 field = modify_udp;
687         } else {
688                 MLX5_ASSERT(attr->tcp);
689                 memset(&tcp, 0, sizeof(tcp));
690                 memset(&tcp_mask, 0, sizeof(tcp_mask));
691                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
692                         tcp.hdr.src_port = conf->port;
693                         tcp_mask.hdr.src_port =
694                                         rte_flow_item_tcp_mask.hdr.src_port;
695                 } else {
696                         tcp.hdr.dst_port = conf->port;
697                         tcp_mask.hdr.dst_port =
698                                         rte_flow_item_tcp_mask.hdr.dst_port;
699                 }
700                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
701                 item.spec = &tcp;
702                 item.mask = &tcp_mask;
703                 field = modify_tcp;
704         }
705         return flow_dv_convert_modify_action(&item, field, NULL, resource,
706                                              MLX5_MODIFICATION_TYPE_SET, error);
707 }
708
709 /**
710  * Convert modify-header set TTL action to DV specification.
711  *
712  * @param[in,out] resource
713  *   Pointer to the modify-header resource.
714  * @param[in] action
715  *   Pointer to action specification.
716  * @param[in] items
717  *   Pointer to rte_flow_item objects list.
718  * @param[in] attr
719  *   Pointer to flow attributes structure.
720  * @param[in] dev_flow
721  *   Pointer to the sub flow.
722  * @param[in] tunnel_decap
723  *   Whether action is after tunnel decapsulation.
724  * @param[out] error
725  *   Pointer to the error structure.
726  *
727  * @return
728  *   0 on success, a negative errno value otherwise and rte_errno is set.
729  */
730 static int
731 flow_dv_convert_action_modify_ttl
732                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
733                          const struct rte_flow_action *action,
734                          const struct rte_flow_item *items,
735                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
736                          bool tunnel_decap, struct rte_flow_error *error)
737 {
738         const struct rte_flow_action_set_ttl *conf =
739                 (const struct rte_flow_action_set_ttl *)(action->conf);
740         struct rte_flow_item item;
741         struct rte_flow_item_ipv4 ipv4;
742         struct rte_flow_item_ipv4 ipv4_mask;
743         struct rte_flow_item_ipv6 ipv6;
744         struct rte_flow_item_ipv6 ipv6_mask;
745         struct field_modify_info *field;
746
747         if (!attr->valid)
748                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
749         if (attr->ipv4) {
750                 memset(&ipv4, 0, sizeof(ipv4));
751                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
752                 ipv4.hdr.time_to_live = conf->ttl_value;
753                 ipv4_mask.hdr.time_to_live = 0xFF;
754                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
755                 item.spec = &ipv4;
756                 item.mask = &ipv4_mask;
757                 field = modify_ipv4;
758         } else {
759                 MLX5_ASSERT(attr->ipv6);
760                 memset(&ipv6, 0, sizeof(ipv6));
761                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
762                 ipv6.hdr.hop_limits = conf->ttl_value;
763                 ipv6_mask.hdr.hop_limits = 0xFF;
764                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
765                 item.spec = &ipv6;
766                 item.mask = &ipv6_mask;
767                 field = modify_ipv6;
768         }
769         return flow_dv_convert_modify_action(&item, field, NULL, resource,
770                                              MLX5_MODIFICATION_TYPE_SET, error);
771 }
772
773 /**
774  * Convert modify-header decrement TTL action to DV specification.
775  *
776  * @param[in,out] resource
777  *   Pointer to the modify-header resource.
778  * @param[in] action
779  *   Pointer to action specification.
780  * @param[in] items
781  *   Pointer to rte_flow_item objects list.
782  * @param[in] attr
783  *   Pointer to flow attributes structure.
784  * @param[in] dev_flow
785  *   Pointer to the sub flow.
786  * @param[in] tunnel_decap
787  *   Whether action is after tunnel decapsulation.
788  * @param[out] error
789  *   Pointer to the error structure.
790  *
791  * @return
792  *   0 on success, a negative errno value otherwise and rte_errno is set.
793  */
794 static int
795 flow_dv_convert_action_modify_dec_ttl
796                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
797                          const struct rte_flow_item *items,
798                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
799                          bool tunnel_decap, struct rte_flow_error *error)
800 {
801         struct rte_flow_item item;
802         struct rte_flow_item_ipv4 ipv4;
803         struct rte_flow_item_ipv4 ipv4_mask;
804         struct rte_flow_item_ipv6 ipv6;
805         struct rte_flow_item_ipv6 ipv6_mask;
806         struct field_modify_info *field;
807
808         if (!attr->valid)
809                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
810         if (attr->ipv4) {
811                 memset(&ipv4, 0, sizeof(ipv4));
812                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
813                 ipv4.hdr.time_to_live = 0xFF;
814                 ipv4_mask.hdr.time_to_live = 0xFF;
815                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
816                 item.spec = &ipv4;
817                 item.mask = &ipv4_mask;
818                 field = modify_ipv4;
819         } else {
820                 MLX5_ASSERT(attr->ipv6);
821                 memset(&ipv6, 0, sizeof(ipv6));
822                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
823                 ipv6.hdr.hop_limits = 0xFF;
824                 ipv6_mask.hdr.hop_limits = 0xFF;
825                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
826                 item.spec = &ipv6;
827                 item.mask = &ipv6_mask;
828                 field = modify_ipv6;
829         }
830         return flow_dv_convert_modify_action(&item, field, NULL, resource,
831                                              MLX5_MODIFICATION_TYPE_ADD, error);
832 }
833
834 /**
835  * Convert modify-header increment/decrement TCP Sequence number
836  * to DV specification.
837  *
838  * @param[in,out] resource
839  *   Pointer to the modify-header resource.
840  * @param[in] action
841  *   Pointer to action specification.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_tcp_seq
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_action *action,
852                          struct rte_flow_error *error)
853 {
854         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
855         uint64_t value = rte_be_to_cpu_32(*conf);
856         struct rte_flow_item item;
857         struct rte_flow_item_tcp tcp;
858         struct rte_flow_item_tcp tcp_mask;
859
860         memset(&tcp, 0, sizeof(tcp));
861         memset(&tcp_mask, 0, sizeof(tcp_mask));
862         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
863                 /*
864                  * The HW has no decrement operation, only increment operation.
865                  * To simulate decrement X from Y using increment operation
866                  * we need to add UINT32_MAX X times to Y.
867                  * Each adding of UINT32_MAX decrements Y by 1.
868                  */
869                 value *= UINT32_MAX;
870         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
871         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
872         item.type = RTE_FLOW_ITEM_TYPE_TCP;
873         item.spec = &tcp;
874         item.mask = &tcp_mask;
875         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
876                                              MLX5_MODIFICATION_TYPE_ADD, error);
877 }
878
879 /**
880  * Convert modify-header increment/decrement TCP Acknowledgment number
881  * to DV specification.
882  *
883  * @param[in,out] resource
884  *   Pointer to the modify-header resource.
885  * @param[in] action
886  *   Pointer to action specification.
887  * @param[out] error
888  *   Pointer to the error structure.
889  *
890  * @return
891  *   0 on success, a negative errno value otherwise and rte_errno is set.
892  */
893 static int
894 flow_dv_convert_action_modify_tcp_ack
895                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
896                          const struct rte_flow_action *action,
897                          struct rte_flow_error *error)
898 {
899         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
900         uint64_t value = rte_be_to_cpu_32(*conf);
901         struct rte_flow_item item;
902         struct rte_flow_item_tcp tcp;
903         struct rte_flow_item_tcp tcp_mask;
904
905         memset(&tcp, 0, sizeof(tcp));
906         memset(&tcp_mask, 0, sizeof(tcp_mask));
907         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
908                 /*
909                  * The HW has no decrement operation, only increment operation.
910                  * To simulate decrement X from Y using increment operation
911                  * we need to add UINT32_MAX X times to Y.
912                  * Each adding of UINT32_MAX decrements Y by 1.
913                  */
914                 value *= UINT32_MAX;
915         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
916         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
917         item.type = RTE_FLOW_ITEM_TYPE_TCP;
918         item.spec = &tcp;
919         item.mask = &tcp_mask;
920         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
921                                              MLX5_MODIFICATION_TYPE_ADD, error);
922 }
923
924 static enum mlx5_modification_field reg_to_field[] = {
925         [REG_NON] = MLX5_MODI_OUT_NONE,
926         [REG_A] = MLX5_MODI_META_DATA_REG_A,
927         [REG_B] = MLX5_MODI_META_DATA_REG_B,
928         [REG_C_0] = MLX5_MODI_META_REG_C_0,
929         [REG_C_1] = MLX5_MODI_META_REG_C_1,
930         [REG_C_2] = MLX5_MODI_META_REG_C_2,
931         [REG_C_3] = MLX5_MODI_META_REG_C_3,
932         [REG_C_4] = MLX5_MODI_META_REG_C_4,
933         [REG_C_5] = MLX5_MODI_META_REG_C_5,
934         [REG_C_6] = MLX5_MODI_META_REG_C_6,
935         [REG_C_7] = MLX5_MODI_META_REG_C_7,
936 };
937
938 /**
939  * Convert register set to DV specification.
940  *
941  * @param[in,out] resource
942  *   Pointer to the modify-header resource.
943  * @param[in] action
944  *   Pointer to action specification.
945  * @param[out] error
946  *   Pointer to the error structure.
947  *
948  * @return
949  *   0 on success, a negative errno value otherwise and rte_errno is set.
950  */
951 static int
952 flow_dv_convert_action_set_reg
953                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
954                          const struct rte_flow_action *action,
955                          struct rte_flow_error *error)
956 {
957         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
958         struct mlx5_modification_cmd *actions = resource->actions;
959         uint32_t i = resource->actions_num;
960
961         if (i >= MLX5_MAX_MODIFY_NUM)
962                 return rte_flow_error_set(error, EINVAL,
963                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
964                                           "too many items to modify");
965         MLX5_ASSERT(conf->id != REG_NON);
966         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
967         actions[i] = (struct mlx5_modification_cmd) {
968                 .action_type = MLX5_MODIFICATION_TYPE_SET,
969                 .field = reg_to_field[conf->id],
970                 .offset = conf->offset,
971                 .length = conf->length,
972         };
973         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
974         actions[i].data1 = rte_cpu_to_be_32(conf->data);
975         ++i;
976         resource->actions_num = i;
977         return 0;
978 }
979
980 /**
981  * Convert SET_TAG action to DV specification.
982  *
983  * @param[in] dev
984  *   Pointer to the rte_eth_dev structure.
985  * @param[in,out] resource
986  *   Pointer to the modify-header resource.
987  * @param[in] conf
988  *   Pointer to action specification.
989  * @param[out] error
990  *   Pointer to the error structure.
991  *
992  * @return
993  *   0 on success, a negative errno value otherwise and rte_errno is set.
994  */
995 static int
996 flow_dv_convert_action_set_tag
997                         (struct rte_eth_dev *dev,
998                          struct mlx5_flow_dv_modify_hdr_resource *resource,
999                          const struct rte_flow_action_set_tag *conf,
1000                          struct rte_flow_error *error)
1001 {
1002         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1003         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1004         struct rte_flow_item item = {
1005                 .spec = &data,
1006                 .mask = &mask,
1007         };
1008         struct field_modify_info reg_c_x[] = {
1009                 [1] = {0, 0, 0},
1010         };
1011         enum mlx5_modification_field reg_type;
1012         int ret;
1013
1014         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1015         if (ret < 0)
1016                 return ret;
1017         MLX5_ASSERT(ret != REG_NON);
1018         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1019         reg_type = reg_to_field[ret];
1020         MLX5_ASSERT(reg_type > 0);
1021         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1022         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1023                                              MLX5_MODIFICATION_TYPE_SET, error);
1024 }
1025
1026 /**
1027  * Convert internal COPY_REG action to DV specification.
1028  *
1029  * @param[in] dev
1030  *   Pointer to the rte_eth_dev structure.
1031  * @param[in,out] res
1032  *   Pointer to the modify-header resource.
1033  * @param[in] action
1034  *   Pointer to action specification.
1035  * @param[out] error
1036  *   Pointer to the error structure.
1037  *
1038  * @return
1039  *   0 on success, a negative errno value otherwise and rte_errno is set.
1040  */
1041 static int
1042 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1043                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1044                                  const struct rte_flow_action *action,
1045                                  struct rte_flow_error *error)
1046 {
1047         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1048         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1049         struct rte_flow_item item = {
1050                 .spec = NULL,
1051                 .mask = &mask,
1052         };
1053         struct field_modify_info reg_src[] = {
1054                 {4, 0, reg_to_field[conf->src]},
1055                 {0, 0, 0},
1056         };
1057         struct field_modify_info reg_dst = {
1058                 .offset = 0,
1059                 .id = reg_to_field[conf->dst],
1060         };
1061         /* Adjust reg_c[0] usage according to reported mask. */
1062         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1063                 struct mlx5_priv *priv = dev->data->dev_private;
1064                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1065
1066                 MLX5_ASSERT(reg_c0);
1067                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1068                 if (conf->dst == REG_C_0) {
1069                         /* Copy to reg_c[0], within mask only. */
1070                         reg_dst.offset = rte_bsf32(reg_c0);
1071                         /*
1072                          * Mask is ignoring the enianness, because
1073                          * there is no conversion in datapath.
1074                          */
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076                         /* Copy from destination lower bits to reg_c[0]. */
1077                         mask = reg_c0 >> reg_dst.offset;
1078 #else
1079                         /* Copy from destination upper bits to reg_c[0]. */
1080                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1081                                           rte_fls_u32(reg_c0));
1082 #endif
1083                 } else {
1084                         mask = rte_cpu_to_be_32(reg_c0);
1085 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1086                         /* Copy from reg_c[0] to destination lower bits. */
1087                         reg_dst.offset = 0;
1088 #else
1089                         /* Copy from reg_c[0] to destination upper bits. */
1090                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1091                                          (rte_fls_u32(reg_c0) -
1092                                           rte_bsf32(reg_c0));
1093 #endif
1094                 }
1095         }
1096         return flow_dv_convert_modify_action(&item,
1097                                              reg_src, &reg_dst, res,
1098                                              MLX5_MODIFICATION_TYPE_COPY,
1099                                              error);
1100 }
1101
1102 /**
1103  * Convert MARK action to DV specification. This routine is used
1104  * in extensive metadata only and requires metadata register to be
1105  * handled. In legacy mode hardware tag resource is engaged.
1106  *
1107  * @param[in] dev
1108  *   Pointer to the rte_eth_dev structure.
1109  * @param[in] conf
1110  *   Pointer to MARK action specification.
1111  * @param[in,out] resource
1112  *   Pointer to the modify-header resource.
1113  * @param[out] error
1114  *   Pointer to the error structure.
1115  *
1116  * @return
1117  *   0 on success, a negative errno value otherwise and rte_errno is set.
1118  */
1119 static int
1120 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1121                             const struct rte_flow_action_mark *conf,
1122                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1123                             struct rte_flow_error *error)
1124 {
1125         struct mlx5_priv *priv = dev->data->dev_private;
1126         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1127                                            priv->sh->dv_mark_mask);
1128         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1129         struct rte_flow_item item = {
1130                 .spec = &data,
1131                 .mask = &mask,
1132         };
1133         struct field_modify_info reg_c_x[] = {
1134                 [1] = {0, 0, 0},
1135         };
1136         int reg;
1137
1138         if (!mask)
1139                 return rte_flow_error_set(error, EINVAL,
1140                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1141                                           NULL, "zero mark action mask");
1142         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1143         if (reg < 0)
1144                 return reg;
1145         MLX5_ASSERT(reg > 0);
1146         if (reg == REG_C_0) {
1147                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1148                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1149
1150                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1151                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1152                 mask = rte_cpu_to_be_32(mask << shl_c0);
1153         }
1154         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1155         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1156                                              MLX5_MODIFICATION_TYPE_SET, error);
1157 }
1158
1159 /**
1160  * Get metadata register index for specified steering domain.
1161  *
1162  * @param[in] dev
1163  *   Pointer to the rte_eth_dev structure.
1164  * @param[in] attr
1165  *   Attributes of flow to determine steering domain.
1166  * @param[out] error
1167  *   Pointer to the error structure.
1168  *
1169  * @return
1170  *   positive index on success, a negative errno value otherwise
1171  *   and rte_errno is set.
1172  */
1173 static enum modify_reg
1174 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1175                          const struct rte_flow_attr *attr,
1176                          struct rte_flow_error *error)
1177 {
1178         int reg =
1179                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1180                                           MLX5_METADATA_FDB :
1181                                             attr->egress ?
1182                                             MLX5_METADATA_TX :
1183                                             MLX5_METADATA_RX, 0, error);
1184         if (reg < 0)
1185                 return rte_flow_error_set(error,
1186                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1187                                           NULL, "unavailable "
1188                                           "metadata register");
1189         return reg;
1190 }
1191
1192 /**
1193  * Convert SET_META action to DV specification.
1194  *
1195  * @param[in] dev
1196  *   Pointer to the rte_eth_dev structure.
1197  * @param[in,out] resource
1198  *   Pointer to the modify-header resource.
1199  * @param[in] attr
1200  *   Attributes of flow that includes this item.
1201  * @param[in] conf
1202  *   Pointer to action specification.
1203  * @param[out] error
1204  *   Pointer to the error structure.
1205  *
1206  * @return
1207  *   0 on success, a negative errno value otherwise and rte_errno is set.
1208  */
1209 static int
1210 flow_dv_convert_action_set_meta
1211                         (struct rte_eth_dev *dev,
1212                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1213                          const struct rte_flow_attr *attr,
1214                          const struct rte_flow_action_set_meta *conf,
1215                          struct rte_flow_error *error)
1216 {
1217         uint32_t data = conf->data;
1218         uint32_t mask = conf->mask;
1219         struct rte_flow_item item = {
1220                 .spec = &data,
1221                 .mask = &mask,
1222         };
1223         struct field_modify_info reg_c_x[] = {
1224                 [1] = {0, 0, 0},
1225         };
1226         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1227
1228         if (reg < 0)
1229                 return reg;
1230         MLX5_ASSERT(reg != REG_NON);
1231         /*
1232          * In datapath code there is no endianness
1233          * coversions for perfromance reasons, all
1234          * pattern conversions are done in rte_flow.
1235          */
1236         if (reg == REG_C_0) {
1237                 struct mlx5_priv *priv = dev->data->dev_private;
1238                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1239                 uint32_t shl_c0;
1240
1241                 MLX5_ASSERT(msk_c0);
1242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1243                 shl_c0 = rte_bsf32(msk_c0);
1244 #else
1245                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1246 #endif
1247                 mask <<= shl_c0;
1248                 data <<= shl_c0;
1249                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1250         }
1251         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1252         /* The routine expects parameters in memory as big-endian ones. */
1253         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1254                                              MLX5_MODIFICATION_TYPE_SET, error);
1255 }
1256
1257 /**
1258  * Convert modify-header set IPv4 DSCP action to DV specification.
1259  *
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] action
1263  *   Pointer to action specification.
1264  * @param[out] error
1265  *   Pointer to the error structure.
1266  *
1267  * @return
1268  *   0 on success, a negative errno value otherwise and rte_errno is set.
1269  */
1270 static int
1271 flow_dv_convert_action_modify_ipv4_dscp
1272                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1273                          const struct rte_flow_action *action,
1274                          struct rte_flow_error *error)
1275 {
1276         const struct rte_flow_action_set_dscp *conf =
1277                 (const struct rte_flow_action_set_dscp *)(action->conf);
1278         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1279         struct rte_flow_item_ipv4 ipv4;
1280         struct rte_flow_item_ipv4 ipv4_mask;
1281
1282         memset(&ipv4, 0, sizeof(ipv4));
1283         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1284         ipv4.hdr.type_of_service = conf->dscp;
1285         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1286         item.spec = &ipv4;
1287         item.mask = &ipv4_mask;
1288         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1289                                              MLX5_MODIFICATION_TYPE_SET, error);
1290 }
1291
1292 /**
1293  * Convert modify-header set IPv6 DSCP action to DV specification.
1294  *
1295  * @param[in,out] resource
1296  *   Pointer to the modify-header resource.
1297  * @param[in] action
1298  *   Pointer to action specification.
1299  * @param[out] error
1300  *   Pointer to the error structure.
1301  *
1302  * @return
1303  *   0 on success, a negative errno value otherwise and rte_errno is set.
1304  */
1305 static int
1306 flow_dv_convert_action_modify_ipv6_dscp
1307                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1308                          const struct rte_flow_action *action,
1309                          struct rte_flow_error *error)
1310 {
1311         const struct rte_flow_action_set_dscp *conf =
1312                 (const struct rte_flow_action_set_dscp *)(action->conf);
1313         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1314         struct rte_flow_item_ipv6 ipv6;
1315         struct rte_flow_item_ipv6 ipv6_mask;
1316
1317         memset(&ipv6, 0, sizeof(ipv6));
1318         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1319         /*
1320          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1321          * rdma-core only accept the DSCP bits byte aligned start from
1322          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1323          * bits in IPv6 case as rdma-core requires byte aligned value.
1324          */
1325         ipv6.hdr.vtc_flow = conf->dscp;
1326         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1327         item.spec = &ipv6;
1328         item.mask = &ipv6_mask;
1329         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1330                                              MLX5_MODIFICATION_TYPE_SET, error);
1331 }
1332
1333 static int
1334 mlx5_flow_item_field_width(enum rte_flow_field_id field)
1335 {
1336         switch (field) {
1337         case RTE_FLOW_FIELD_START:
1338                 return 32;
1339         case RTE_FLOW_FIELD_MAC_DST:
1340         case RTE_FLOW_FIELD_MAC_SRC:
1341                 return 48;
1342         case RTE_FLOW_FIELD_VLAN_TYPE:
1343                 return 16;
1344         case RTE_FLOW_FIELD_VLAN_ID:
1345                 return 12;
1346         case RTE_FLOW_FIELD_MAC_TYPE:
1347                 return 16;
1348         case RTE_FLOW_FIELD_IPV4_DSCP:
1349                 return 6;
1350         case RTE_FLOW_FIELD_IPV4_TTL:
1351                 return 8;
1352         case RTE_FLOW_FIELD_IPV4_SRC:
1353         case RTE_FLOW_FIELD_IPV4_DST:
1354                 return 32;
1355         case RTE_FLOW_FIELD_IPV6_DSCP:
1356                 return 6;
1357         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1358                 return 8;
1359         case RTE_FLOW_FIELD_IPV6_SRC:
1360         case RTE_FLOW_FIELD_IPV6_DST:
1361                 return 128;
1362         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1363         case RTE_FLOW_FIELD_TCP_PORT_DST:
1364                 return 16;
1365         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1366         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1367                 return 32;
1368         case RTE_FLOW_FIELD_TCP_FLAGS:
1369                 return 6;
1370         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1371         case RTE_FLOW_FIELD_UDP_PORT_DST:
1372                 return 16;
1373         case RTE_FLOW_FIELD_VXLAN_VNI:
1374         case RTE_FLOW_FIELD_GENEVE_VNI:
1375                 return 24;
1376         case RTE_FLOW_FIELD_GTP_TEID:
1377         case RTE_FLOW_FIELD_TAG:
1378                 return 32;
1379         case RTE_FLOW_FIELD_MARK:
1380                 return 24;
1381         case RTE_FLOW_FIELD_META:
1382                 return 32;
1383         case RTE_FLOW_FIELD_POINTER:
1384         case RTE_FLOW_FIELD_VALUE:
1385                 return 64;
1386         default:
1387                 MLX5_ASSERT(false);
1388         }
1389         return 0;
1390 }
1391
1392 static void
1393 mlx5_flow_field_id_to_modify_info
1394                 (const struct rte_flow_action_modify_data *data,
1395                  struct field_modify_info *info,
1396                  uint32_t *mask, uint32_t *value,
1397                  uint32_t width, uint32_t dst_width,
1398                  struct rte_eth_dev *dev,
1399                  const struct rte_flow_attr *attr,
1400                  struct rte_flow_error *error)
1401 {
1402         uint32_t idx = 0;
1403         uint64_t val = 0;
1404         switch (data->field) {
1405         case RTE_FLOW_FIELD_START:
1406                 /* not supported yet */
1407                 MLX5_ASSERT(false);
1408                 break;
1409         case RTE_FLOW_FIELD_MAC_DST:
1410                 if (mask) {
1411                         if (data->offset < 32) {
1412                                 info[idx] = (struct field_modify_info){4, 0,
1413                                                 MLX5_MODI_OUT_DMAC_47_16};
1414                                 if (width < 32) {
1415                                         mask[idx] =
1416                                                 rte_cpu_to_be_32(0xffffffff >>
1417                                                                  (32 - width));
1418                                         width = 0;
1419                                 } else {
1420                                         mask[idx] = RTE_BE32(0xffffffff);
1421                                         width -= 32;
1422                                 }
1423                                 if (!width)
1424                                         break;
1425                                 ++idx;
1426                         }
1427                         info[idx] = (struct field_modify_info){2, 4 * idx,
1428                                                 MLX5_MODI_OUT_DMAC_15_0};
1429                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1430                 } else {
1431                         if (data->offset < 32)
1432                                 info[idx++] = (struct field_modify_info){4, 0,
1433                                                 MLX5_MODI_OUT_DMAC_47_16};
1434                         info[idx] = (struct field_modify_info){2, 0,
1435                                                 MLX5_MODI_OUT_DMAC_15_0};
1436                 }
1437                 break;
1438         case RTE_FLOW_FIELD_MAC_SRC:
1439                 if (mask) {
1440                         if (data->offset < 32) {
1441                                 info[idx] = (struct field_modify_info){4, 0,
1442                                                 MLX5_MODI_OUT_SMAC_47_16};
1443                                 if (width < 32) {
1444                                         mask[idx] =
1445                                                 rte_cpu_to_be_32(0xffffffff >>
1446                                                                 (32 - width));
1447                                         width = 0;
1448                                 } else {
1449                                         mask[idx] = RTE_BE32(0xffffffff);
1450                                         width -= 32;
1451                                 }
1452                                 if (!width)
1453                                         break;
1454                                 ++idx;
1455                         }
1456                         info[idx] = (struct field_modify_info){2, 4 * idx,
1457                                                 MLX5_MODI_OUT_SMAC_15_0};
1458                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1459                 } else {
1460                         if (data->offset < 32)
1461                                 info[idx++] = (struct field_modify_info){4, 0,
1462                                                 MLX5_MODI_OUT_SMAC_47_16};
1463                         info[idx] = (struct field_modify_info){2, 0,
1464                                                 MLX5_MODI_OUT_SMAC_15_0};
1465                 }
1466                 break;
1467         case RTE_FLOW_FIELD_VLAN_TYPE:
1468                 /* not supported yet */
1469                 break;
1470         case RTE_FLOW_FIELD_VLAN_ID:
1471                 info[idx] = (struct field_modify_info){2, 0,
1472                                         MLX5_MODI_OUT_FIRST_VID};
1473                 if (mask)
1474                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1475                 break;
1476         case RTE_FLOW_FIELD_MAC_TYPE:
1477                 info[idx] = (struct field_modify_info){2, 0,
1478                                         MLX5_MODI_OUT_ETHERTYPE};
1479                 if (mask)
1480                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1481                 break;
1482         case RTE_FLOW_FIELD_IPV4_DSCP:
1483                 info[idx] = (struct field_modify_info){1, 0,
1484                                         MLX5_MODI_OUT_IP_DSCP};
1485                 if (mask)
1486                         mask[idx] = 0x3f >> (6 - width);
1487                 break;
1488         case RTE_FLOW_FIELD_IPV4_TTL:
1489                 info[idx] = (struct field_modify_info){1, 0,
1490                                         MLX5_MODI_OUT_IPV4_TTL};
1491                 if (mask)
1492                         mask[idx] = 0xff >> (8 - width);
1493                 break;
1494         case RTE_FLOW_FIELD_IPV4_SRC:
1495                 info[idx] = (struct field_modify_info){4, 0,
1496                                         MLX5_MODI_OUT_SIPV4};
1497                 if (mask)
1498                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1499                                                      (32 - width));
1500                 break;
1501         case RTE_FLOW_FIELD_IPV4_DST:
1502                 info[idx] = (struct field_modify_info){4, 0,
1503                                         MLX5_MODI_OUT_DIPV4};
1504                 if (mask)
1505                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1506                                                      (32 - width));
1507                 break;
1508         case RTE_FLOW_FIELD_IPV6_DSCP:
1509                 info[idx] = (struct field_modify_info){1, 0,
1510                                         MLX5_MODI_OUT_IP_DSCP};
1511                 if (mask)
1512                         mask[idx] = 0x3f >> (6 - width);
1513                 break;
1514         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1515                 info[idx] = (struct field_modify_info){1, 0,
1516                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1517                 if (mask)
1518                         mask[idx] = 0xff >> (8 - width);
1519                 break;
1520         case RTE_FLOW_FIELD_IPV6_SRC:
1521                 if (mask) {
1522                         if (data->offset < 32) {
1523                                 info[idx] = (struct field_modify_info){4,
1524                                                 4 * idx,
1525                                                 MLX5_MODI_OUT_SIPV6_31_0};
1526                                 if (width < 32) {
1527                                         mask[idx] =
1528                                                 rte_cpu_to_be_32(0xffffffff >>
1529                                                                  (32 - width));
1530                                         width = 0;
1531                                 } else {
1532                                         mask[idx] = RTE_BE32(0xffffffff);
1533                                         width -= 32;
1534                                 }
1535                                 if (!width)
1536                                         break;
1537                                 ++idx;
1538                         }
1539                         if (data->offset < 64) {
1540                                 info[idx] = (struct field_modify_info){4,
1541                                                 4 * idx,
1542                                                 MLX5_MODI_OUT_SIPV6_63_32};
1543                                 if (width < 32) {
1544                                         mask[idx] =
1545                                                 rte_cpu_to_be_32(0xffffffff >>
1546                                                                  (32 - width));
1547                                         width = 0;
1548                                 } else {
1549                                         mask[idx] = RTE_BE32(0xffffffff);
1550                                         width -= 32;
1551                                 }
1552                                 if (!width)
1553                                         break;
1554                                 ++idx;
1555                         }
1556                         if (data->offset < 96) {
1557                                 info[idx] = (struct field_modify_info){4,
1558                                                 4 * idx,
1559                                                 MLX5_MODI_OUT_SIPV6_95_64};
1560                                 if (width < 32) {
1561                                         mask[idx] =
1562                                                 rte_cpu_to_be_32(0xffffffff >>
1563                                                                  (32 - width));
1564                                         width = 0;
1565                                 } else {
1566                                         mask[idx] = RTE_BE32(0xffffffff);
1567                                         width -= 32;
1568                                 }
1569                                 if (!width)
1570                                         break;
1571                                 ++idx;
1572                         }
1573                         info[idx] = (struct field_modify_info){4, 4 * idx,
1574                                                 MLX5_MODI_OUT_SIPV6_127_96};
1575                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1576                                                      (32 - width));
1577                 } else {
1578                         if (data->offset < 32)
1579                                 info[idx++] = (struct field_modify_info){4, 0,
1580                                                 MLX5_MODI_OUT_SIPV6_31_0};
1581                         if (data->offset < 64)
1582                                 info[idx++] = (struct field_modify_info){4, 0,
1583                                                 MLX5_MODI_OUT_SIPV6_63_32};
1584                         if (data->offset < 96)
1585                                 info[idx++] = (struct field_modify_info){4, 0,
1586                                                 MLX5_MODI_OUT_SIPV6_95_64};
1587                         if (data->offset < 128)
1588                                 info[idx++] = (struct field_modify_info){4, 0,
1589                                                 MLX5_MODI_OUT_SIPV6_127_96};
1590                 }
1591                 break;
1592         case RTE_FLOW_FIELD_IPV6_DST:
1593                 if (mask) {
1594                         if (data->offset < 32) {
1595                                 info[idx] = (struct field_modify_info){4,
1596                                                 4 * idx,
1597                                                 MLX5_MODI_OUT_DIPV6_31_0};
1598                                 if (width < 32) {
1599                                         mask[idx] =
1600                                                 rte_cpu_to_be_32(0xffffffff >>
1601                                                                  (32 - width));
1602                                         width = 0;
1603                                 } else {
1604                                         mask[idx] = RTE_BE32(0xffffffff);
1605                                         width -= 32;
1606                                 }
1607                                 if (!width)
1608                                         break;
1609                                 ++idx;
1610                         }
1611                         if (data->offset < 64) {
1612                                 info[idx] = (struct field_modify_info){4,
1613                                                 4 * idx,
1614                                                 MLX5_MODI_OUT_DIPV6_63_32};
1615                                 if (width < 32) {
1616                                         mask[idx] =
1617                                                 rte_cpu_to_be_32(0xffffffff >>
1618                                                                  (32 - width));
1619                                         width = 0;
1620                                 } else {
1621                                         mask[idx] = RTE_BE32(0xffffffff);
1622                                         width -= 32;
1623                                 }
1624                                 if (!width)
1625                                         break;
1626                                 ++idx;
1627                         }
1628                         if (data->offset < 96) {
1629                                 info[idx] = (struct field_modify_info){4,
1630                                                 4 * idx,
1631                                                 MLX5_MODI_OUT_DIPV6_95_64};
1632                                 if (width < 32) {
1633                                         mask[idx] =
1634                                                 rte_cpu_to_be_32(0xffffffff >>
1635                                                                  (32 - width));
1636                                         width = 0;
1637                                 } else {
1638                                         mask[idx] = RTE_BE32(0xffffffff);
1639                                         width -= 32;
1640                                 }
1641                                 if (!width)
1642                                         break;
1643                                 ++idx;
1644                         }
1645                         info[idx] = (struct field_modify_info){4, 4 * idx,
1646                                                 MLX5_MODI_OUT_DIPV6_127_96};
1647                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1648                                                      (32 - width));
1649                 } else {
1650                         if (data->offset < 32)
1651                                 info[idx++] = (struct field_modify_info){4, 0,
1652                                                 MLX5_MODI_OUT_DIPV6_31_0};
1653                         if (data->offset < 64)
1654                                 info[idx++] = (struct field_modify_info){4, 0,
1655                                                 MLX5_MODI_OUT_DIPV6_63_32};
1656                         if (data->offset < 96)
1657                                 info[idx++] = (struct field_modify_info){4, 0,
1658                                                 MLX5_MODI_OUT_DIPV6_95_64};
1659                         if (data->offset < 128)
1660                                 info[idx++] = (struct field_modify_info){4, 0,
1661                                                 MLX5_MODI_OUT_DIPV6_127_96};
1662                 }
1663                 break;
1664         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1665                 info[idx] = (struct field_modify_info){2, 0,
1666                                         MLX5_MODI_OUT_TCP_SPORT};
1667                 if (mask)
1668                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1669                 break;
1670         case RTE_FLOW_FIELD_TCP_PORT_DST:
1671                 info[idx] = (struct field_modify_info){2, 0,
1672                                         MLX5_MODI_OUT_TCP_DPORT};
1673                 if (mask)
1674                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1675                 break;
1676         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1677                 info[idx] = (struct field_modify_info){4, 0,
1678                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1679                 if (mask)
1680                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1681                                                      (32 - width));
1682                 break;
1683         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1684                 info[idx] = (struct field_modify_info){4, 0,
1685                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1686                 if (mask)
1687                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1688                                                      (32 - width));
1689                 break;
1690         case RTE_FLOW_FIELD_TCP_FLAGS:
1691                 info[idx] = (struct field_modify_info){1, 0,
1692                                         MLX5_MODI_OUT_TCP_FLAGS};
1693                 if (mask)
1694                         mask[idx] = 0x3f >> (6 - width);
1695                 break;
1696         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1697                 info[idx] = (struct field_modify_info){2, 0,
1698                                         MLX5_MODI_OUT_UDP_SPORT};
1699                 if (mask)
1700                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1701                 break;
1702         case RTE_FLOW_FIELD_UDP_PORT_DST:
1703                 info[idx] = (struct field_modify_info){2, 0,
1704                                         MLX5_MODI_OUT_UDP_DPORT};
1705                 if (mask)
1706                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1707                 break;
1708         case RTE_FLOW_FIELD_VXLAN_VNI:
1709                 /* not supported yet */
1710                 break;
1711         case RTE_FLOW_FIELD_GENEVE_VNI:
1712                 /* not supported yet*/
1713                 break;
1714         case RTE_FLOW_FIELD_GTP_TEID:
1715                 info[idx] = (struct field_modify_info){4, 0,
1716                                         MLX5_MODI_GTP_TEID};
1717                 if (mask)
1718                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1719                                                      (32 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TAG:
1722                 {
1723                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1724                                                    data->level, error);
1725                         if (reg < 0)
1726                                 return;
1727                         MLX5_ASSERT(reg != REG_NON);
1728                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1729                         info[idx] = (struct field_modify_info){4, 0,
1730                                                 reg_to_field[reg]};
1731                         if (mask)
1732                                 mask[idx] =
1733                                         rte_cpu_to_be_32(0xffffffff >>
1734                                                          (32 - width));
1735                 }
1736                 break;
1737         case RTE_FLOW_FIELD_MARK:
1738                 {
1739                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1740                                                        0, error);
1741                         if (reg < 0)
1742                                 return;
1743                         MLX5_ASSERT(reg != REG_NON);
1744                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1745                         info[idx] = (struct field_modify_info){4, 0,
1746                                                 reg_to_field[reg]};
1747                         if (mask)
1748                                 mask[idx] =
1749                                         rte_cpu_to_be_32(0xffffffff >>
1750                                                          (32 - width));
1751                 }
1752                 break;
1753         case RTE_FLOW_FIELD_META:
1754                 {
1755                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1756                         if (reg < 0)
1757                                 return;
1758                         MLX5_ASSERT(reg != REG_NON);
1759                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1760                         info[idx] = (struct field_modify_info){4, 0,
1761                                                 reg_to_field[reg]};
1762                         if (mask)
1763                                 mask[idx] =
1764                                         rte_cpu_to_be_32(0xffffffff >>
1765                                                          (32 - width));
1766                 }
1767                 break;
1768         case RTE_FLOW_FIELD_POINTER:
1769         case RTE_FLOW_FIELD_VALUE:
1770                 if (data->field == RTE_FLOW_FIELD_POINTER)
1771                         memcpy(&val, (void *)(uintptr_t)data->value,
1772                                sizeof(uint64_t));
1773                 else
1774                         val = data->value;
1775                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1776                         if (mask[idx]) {
1777                                 if (dst_width > 16) {
1778                                         value[idx] = rte_cpu_to_be_32(val);
1779                                         val >>= 32;
1780                                 } else if (dst_width > 8) {
1781                                         value[idx] = rte_cpu_to_be_16(val);
1782                                         val >>= 16;
1783                                 } else {
1784                                         value[idx] = (uint8_t)val;
1785                                         val >>= 8;
1786                                 }
1787                                 if (!val)
1788                                         break;
1789                         }
1790                 }
1791                 break;
1792         default:
1793                 MLX5_ASSERT(false);
1794                 break;
1795         }
1796 }
1797
1798 /**
1799  * Convert modify_field action to DV specification.
1800  *
1801  * @param[in] dev
1802  *   Pointer to the rte_eth_dev structure.
1803  * @param[in,out] resource
1804  *   Pointer to the modify-header resource.
1805  * @param[in] action
1806  *   Pointer to action specification.
1807  * @param[in] attr
1808  *   Attributes of flow that includes this item.
1809  * @param[out] error
1810  *   Pointer to the error structure.
1811  *
1812  * @return
1813  *   0 on success, a negative errno value otherwise and rte_errno is set.
1814  */
1815 static int
1816 flow_dv_convert_action_modify_field
1817                         (struct rte_eth_dev *dev,
1818                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1819                          const struct rte_flow_action *action,
1820                          const struct rte_flow_attr *attr,
1821                          struct rte_flow_error *error)
1822 {
1823         const struct rte_flow_action_modify_field *conf =
1824                 (const struct rte_flow_action_modify_field *)(action->conf);
1825         struct rte_flow_item item;
1826         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1827                                                                 {0, 0, 0} };
1828         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1829                                                                 {0, 0, 0} };
1830         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1831         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1832         uint32_t type;
1833         uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
1834
1835         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1836                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1837                 type = MLX5_MODIFICATION_TYPE_SET;
1838                 /** For SET fill the destination field (field) first. */
1839                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1840                         value, conf->width, dst_width, dev, attr, error);
1841                 /** Then copy immediate value from source as per mask. */
1842                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1843                         value, conf->width, dst_width, dev, attr, error);
1844                 item.spec = &value;
1845         } else {
1846                 type = MLX5_MODIFICATION_TYPE_COPY;
1847                 /** For COPY fill the destination field (dcopy) without mask. */
1848                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1849                         value, conf->width, dst_width, dev, attr, error);
1850                 /** Then construct the source field (field) with mask. */
1851                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1852                         value, conf->width, dst_width, dev, attr, error);
1853         }
1854         item.mask = &mask;
1855         return flow_dv_convert_modify_action(&item,
1856                         field, dcopy, resource, type, error);
1857 }
1858
1859 /**
1860  * Validate MARK item.
1861  *
1862  * @param[in] dev
1863  *   Pointer to the rte_eth_dev structure.
1864  * @param[in] item
1865  *   Item specification.
1866  * @param[in] attr
1867  *   Attributes of flow that includes this item.
1868  * @param[out] error
1869  *   Pointer to error structure.
1870  *
1871  * @return
1872  *   0 on success, a negative errno value otherwise and rte_errno is set.
1873  */
1874 static int
1875 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1876                            const struct rte_flow_item *item,
1877                            const struct rte_flow_attr *attr __rte_unused,
1878                            struct rte_flow_error *error)
1879 {
1880         struct mlx5_priv *priv = dev->data->dev_private;
1881         struct mlx5_dev_config *config = &priv->config;
1882         const struct rte_flow_item_mark *spec = item->spec;
1883         const struct rte_flow_item_mark *mask = item->mask;
1884         const struct rte_flow_item_mark nic_mask = {
1885                 .id = priv->sh->dv_mark_mask,
1886         };
1887         int ret;
1888
1889         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1890                 return rte_flow_error_set(error, ENOTSUP,
1891                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1892                                           "extended metadata feature"
1893                                           " isn't enabled");
1894         if (!mlx5_flow_ext_mreg_supported(dev))
1895                 return rte_flow_error_set(error, ENOTSUP,
1896                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1897                                           "extended metadata register"
1898                                           " isn't supported");
1899         if (!nic_mask.id)
1900                 return rte_flow_error_set(error, ENOTSUP,
1901                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1902                                           "extended metadata register"
1903                                           " isn't available");
1904         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1905         if (ret < 0)
1906                 return ret;
1907         if (!spec)
1908                 return rte_flow_error_set(error, EINVAL,
1909                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1910                                           item->spec,
1911                                           "data cannot be empty");
1912         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1913                 return rte_flow_error_set(error, EINVAL,
1914                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1915                                           &spec->id,
1916                                           "mark id exceeds the limit");
1917         if (!mask)
1918                 mask = &nic_mask;
1919         if (!mask->id)
1920                 return rte_flow_error_set(error, EINVAL,
1921                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1922                                         "mask cannot be zero");
1923
1924         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1925                                         (const uint8_t *)&nic_mask,
1926                                         sizeof(struct rte_flow_item_mark),
1927                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1928         if (ret < 0)
1929                 return ret;
1930         return 0;
1931 }
1932
1933 /**
1934  * Validate META item.
1935  *
1936  * @param[in] dev
1937  *   Pointer to the rte_eth_dev structure.
1938  * @param[in] item
1939  *   Item specification.
1940  * @param[in] attr
1941  *   Attributes of flow that includes this item.
1942  * @param[out] error
1943  *   Pointer to error structure.
1944  *
1945  * @return
1946  *   0 on success, a negative errno value otherwise and rte_errno is set.
1947  */
1948 static int
1949 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1950                            const struct rte_flow_item *item,
1951                            const struct rte_flow_attr *attr,
1952                            struct rte_flow_error *error)
1953 {
1954         struct mlx5_priv *priv = dev->data->dev_private;
1955         struct mlx5_dev_config *config = &priv->config;
1956         const struct rte_flow_item_meta *spec = item->spec;
1957         const struct rte_flow_item_meta *mask = item->mask;
1958         struct rte_flow_item_meta nic_mask = {
1959                 .data = UINT32_MAX
1960         };
1961         int reg;
1962         int ret;
1963
1964         if (!spec)
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1967                                           item->spec,
1968                                           "data cannot be empty");
1969         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1970                 if (!mlx5_flow_ext_mreg_supported(dev))
1971                         return rte_flow_error_set(error, ENOTSUP,
1972                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1973                                           "extended metadata register"
1974                                           " isn't supported");
1975                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1976                 if (reg < 0)
1977                         return reg;
1978                 if (reg == REG_NON)
1979                         return rte_flow_error_set(error, ENOTSUP,
1980                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                         "unavalable extended metadata register");
1982                 if (reg == REG_B)
1983                         return rte_flow_error_set(error, ENOTSUP,
1984                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1985                                           "match on reg_b "
1986                                           "isn't supported");
1987                 if (reg != REG_A)
1988                         nic_mask.data = priv->sh->dv_meta_mask;
1989         } else {
1990                 if (attr->transfer)
1991                         return rte_flow_error_set(error, ENOTSUP,
1992                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1993                                         "extended metadata feature "
1994                                         "should be enabled when "
1995                                         "meta item is requested "
1996                                         "with e-switch mode ");
1997                 if (attr->ingress)
1998                         return rte_flow_error_set(error, ENOTSUP,
1999                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2000                                         "match on metadata for ingress "
2001                                         "is not supported in legacy "
2002                                         "metadata mode");
2003         }
2004         if (!mask)
2005                 mask = &rte_flow_item_meta_mask;
2006         if (!mask->data)
2007                 return rte_flow_error_set(error, EINVAL,
2008                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2009                                         "mask cannot be zero");
2010
2011         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2012                                         (const uint8_t *)&nic_mask,
2013                                         sizeof(struct rte_flow_item_meta),
2014                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2015         return ret;
2016 }
2017
2018 /**
2019  * Validate TAG item.
2020  *
2021  * @param[in] dev
2022  *   Pointer to the rte_eth_dev structure.
2023  * @param[in] item
2024  *   Item specification.
2025  * @param[in] attr
2026  *   Attributes of flow that includes this item.
2027  * @param[out] error
2028  *   Pointer to error structure.
2029  *
2030  * @return
2031  *   0 on success, a negative errno value otherwise and rte_errno is set.
2032  */
2033 static int
2034 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2035                           const struct rte_flow_item *item,
2036                           const struct rte_flow_attr *attr __rte_unused,
2037                           struct rte_flow_error *error)
2038 {
2039         const struct rte_flow_item_tag *spec = item->spec;
2040         const struct rte_flow_item_tag *mask = item->mask;
2041         const struct rte_flow_item_tag nic_mask = {
2042                 .data = RTE_BE32(UINT32_MAX),
2043                 .index = 0xff,
2044         };
2045         int ret;
2046
2047         if (!mlx5_flow_ext_mreg_supported(dev))
2048                 return rte_flow_error_set(error, ENOTSUP,
2049                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2050                                           "extensive metadata register"
2051                                           " isn't supported");
2052         if (!spec)
2053                 return rte_flow_error_set(error, EINVAL,
2054                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2055                                           item->spec,
2056                                           "data cannot be empty");
2057         if (!mask)
2058                 mask = &rte_flow_item_tag_mask;
2059         if (!mask->data)
2060                 return rte_flow_error_set(error, EINVAL,
2061                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2062                                         "mask cannot be zero");
2063
2064         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2065                                         (const uint8_t *)&nic_mask,
2066                                         sizeof(struct rte_flow_item_tag),
2067                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2068         if (ret < 0)
2069                 return ret;
2070         if (mask->index != 0xff)
2071                 return rte_flow_error_set(error, EINVAL,
2072                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2073                                           "partial mask for tag index"
2074                                           " is not supported");
2075         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2076         if (ret < 0)
2077                 return ret;
2078         MLX5_ASSERT(ret != REG_NON);
2079         return 0;
2080 }
2081
2082 /**
2083  * Validate vport item.
2084  *
2085  * @param[in] dev
2086  *   Pointer to the rte_eth_dev structure.
2087  * @param[in] item
2088  *   Item specification.
2089  * @param[in] attr
2090  *   Attributes of flow that includes this item.
2091  * @param[in] item_flags
2092  *   Bit-fields that holds the items detected until now.
2093  * @param[out] error
2094  *   Pointer to error structure.
2095  *
2096  * @return
2097  *   0 on success, a negative errno value otherwise and rte_errno is set.
2098  */
2099 static int
2100 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2101                               const struct rte_flow_item *item,
2102                               const struct rte_flow_attr *attr,
2103                               uint64_t item_flags,
2104                               struct rte_flow_error *error)
2105 {
2106         const struct rte_flow_item_port_id *spec = item->spec;
2107         const struct rte_flow_item_port_id *mask = item->mask;
2108         const struct rte_flow_item_port_id switch_mask = {
2109                         .id = 0xffffffff,
2110         };
2111         struct mlx5_priv *esw_priv;
2112         struct mlx5_priv *dev_priv;
2113         int ret;
2114
2115         if (!attr->transfer)
2116                 return rte_flow_error_set(error, EINVAL,
2117                                           RTE_FLOW_ERROR_TYPE_ITEM,
2118                                           NULL,
2119                                           "match on port id is valid only"
2120                                           " when transfer flag is enabled");
2121         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2122                 return rte_flow_error_set(error, ENOTSUP,
2123                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2124                                           "multiple source ports are not"
2125                                           " supported");
2126         if (!mask)
2127                 mask = &switch_mask;
2128         if (mask->id != 0xffffffff)
2129                 return rte_flow_error_set(error, ENOTSUP,
2130                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2131                                            mask,
2132                                            "no support for partial mask on"
2133                                            " \"id\" field");
2134         ret = mlx5_flow_item_acceptable
2135                                 (item, (const uint8_t *)mask,
2136                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2137                                  sizeof(struct rte_flow_item_port_id),
2138                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2139         if (ret)
2140                 return ret;
2141         if (!spec)
2142                 return 0;
2143         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2144         if (!esw_priv)
2145                 return rte_flow_error_set(error, rte_errno,
2146                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2147                                           "failed to obtain E-Switch info for"
2148                                           " port");
2149         dev_priv = mlx5_dev_to_eswitch_info(dev);
2150         if (!dev_priv)
2151                 return rte_flow_error_set(error, rte_errno,
2152                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2153                                           NULL,
2154                                           "failed to obtain E-Switch info");
2155         if (esw_priv->domain_id != dev_priv->domain_id)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2158                                           "cannot match on a port from a"
2159                                           " different E-Switch");
2160         return 0;
2161 }
2162
2163 /**
2164  * Validate VLAN item.
2165  *
2166  * @param[in] item
2167  *   Item specification.
2168  * @param[in] item_flags
2169  *   Bit-fields that holds the items detected until now.
2170  * @param[in] dev
2171  *   Ethernet device flow is being created on.
2172  * @param[out] error
2173  *   Pointer to error structure.
2174  *
2175  * @return
2176  *   0 on success, a negative errno value otherwise and rte_errno is set.
2177  */
2178 static int
2179 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2180                            uint64_t item_flags,
2181                            struct rte_eth_dev *dev,
2182                            struct rte_flow_error *error)
2183 {
2184         const struct rte_flow_item_vlan *mask = item->mask;
2185         const struct rte_flow_item_vlan nic_mask = {
2186                 .tci = RTE_BE16(UINT16_MAX),
2187                 .inner_type = RTE_BE16(UINT16_MAX),
2188                 .has_more_vlan = 1,
2189         };
2190         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2191         int ret;
2192         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2193                                         MLX5_FLOW_LAYER_INNER_L4) :
2194                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2195                                         MLX5_FLOW_LAYER_OUTER_L4);
2196         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2197                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2198
2199         if (item_flags & vlanm)
2200                 return rte_flow_error_set(error, EINVAL,
2201                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2202                                           "multiple VLAN layers not supported");
2203         else if ((item_flags & l34m) != 0)
2204                 return rte_flow_error_set(error, EINVAL,
2205                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2206                                           "VLAN cannot follow L3/L4 layer");
2207         if (!mask)
2208                 mask = &rte_flow_item_vlan_mask;
2209         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2210                                         (const uint8_t *)&nic_mask,
2211                                         sizeof(struct rte_flow_item_vlan),
2212                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2213         if (ret)
2214                 return ret;
2215         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2216                 struct mlx5_priv *priv = dev->data->dev_private;
2217
2218                 if (priv->vmwa_context) {
2219                         /*
2220                          * Non-NULL context means we have a virtual machine
2221                          * and SR-IOV enabled, we have to create VLAN interface
2222                          * to make hypervisor to setup E-Switch vport
2223                          * context correctly. We avoid creating the multiple
2224                          * VLAN interfaces, so we cannot support VLAN tag mask.
2225                          */
2226                         return rte_flow_error_set(error, EINVAL,
2227                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2228                                                   item,
2229                                                   "VLAN tag mask is not"
2230                                                   " supported in virtual"
2231                                                   " environment");
2232                 }
2233         }
2234         return 0;
2235 }
2236
2237 /*
2238  * GTP flags are contained in 1 byte of the format:
2239  * -------------------------------------------
2240  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2241  * |-----------------------------------------|
2242  * | value | Version | PT | Res | E | S | PN |
2243  * -------------------------------------------
2244  *
2245  * Matching is supported only for GTP flags E, S, PN.
2246  */
2247 #define MLX5_GTP_FLAGS_MASK     0x07
2248
2249 /**
2250  * Validate GTP item.
2251  *
2252  * @param[in] dev
2253  *   Pointer to the rte_eth_dev structure.
2254  * @param[in] item
2255  *   Item specification.
2256  * @param[in] item_flags
2257  *   Bit-fields that holds the items detected until now.
2258  * @param[out] error
2259  *   Pointer to error structure.
2260  *
2261  * @return
2262  *   0 on success, a negative errno value otherwise and rte_errno is set.
2263  */
2264 static int
2265 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2266                           const struct rte_flow_item *item,
2267                           uint64_t item_flags,
2268                           struct rte_flow_error *error)
2269 {
2270         struct mlx5_priv *priv = dev->data->dev_private;
2271         const struct rte_flow_item_gtp *spec = item->spec;
2272         const struct rte_flow_item_gtp *mask = item->mask;
2273         const struct rte_flow_item_gtp nic_mask = {
2274                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2275                 .msg_type = 0xff,
2276                 .teid = RTE_BE32(0xffffffff),
2277         };
2278
2279         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2280                 return rte_flow_error_set(error, ENOTSUP,
2281                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2282                                           "GTP support is not enabled");
2283         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2284                 return rte_flow_error_set(error, ENOTSUP,
2285                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2286                                           "multiple tunnel layers not"
2287                                           " supported");
2288         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2289                 return rte_flow_error_set(error, EINVAL,
2290                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2291                                           "no outer UDP layer found");
2292         if (!mask)
2293                 mask = &rte_flow_item_gtp_mask;
2294         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2295                 return rte_flow_error_set(error, ENOTSUP,
2296                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2297                                           "Match is supported for GTP"
2298                                           " flags only");
2299         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2300                                          (const uint8_t *)&nic_mask,
2301                                          sizeof(struct rte_flow_item_gtp),
2302                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2303 }
2304
2305 /**
2306  * Validate GTP PSC item.
2307  *
2308  * @param[in] item
2309  *   Item specification.
2310  * @param[in] last_item
2311  *   Previous validated item in the pattern items.
2312  * @param[in] gtp_item
2313  *   Previous GTP item specification.
2314  * @param[in] attr
2315  *   Pointer to flow attributes.
2316  * @param[out] error
2317  *   Pointer to error structure.
2318  *
2319  * @return
2320  *   0 on success, a negative errno value otherwise and rte_errno is set.
2321  */
2322 static int
2323 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2324                               uint64_t last_item,
2325                               const struct rte_flow_item *gtp_item,
2326                               const struct rte_flow_attr *attr,
2327                               struct rte_flow_error *error)
2328 {
2329         const struct rte_flow_item_gtp *gtp_spec;
2330         const struct rte_flow_item_gtp *gtp_mask;
2331         const struct rte_flow_item_gtp_psc *spec;
2332         const struct rte_flow_item_gtp_psc *mask;
2333         const struct rte_flow_item_gtp_psc nic_mask = {
2334                 .pdu_type = 0xFF,
2335                 .qfi = 0xFF,
2336         };
2337
2338         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2339                 return rte_flow_error_set
2340                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2341                          "GTP PSC item must be preceded with GTP item");
2342         gtp_spec = gtp_item->spec;
2343         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2344         /* GTP spec and E flag is requested to match zero. */
2345         if (gtp_spec &&
2346                 (gtp_mask->v_pt_rsv_flags &
2347                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2348                 return rte_flow_error_set
2349                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2350                          "GTP E flag must be 1 to match GTP PSC");
2351         /* Check the flow is not created in group zero. */
2352         if (!attr->transfer && !attr->group)
2353                 return rte_flow_error_set
2354                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2355                          "GTP PSC is not supported for group 0");
2356         /* GTP spec is here and E flag is requested to match zero. */
2357         if (!item->spec)
2358                 return 0;
2359         spec = item->spec;
2360         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2361         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2362                 return rte_flow_error_set
2363                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                          "PDU type should be smaller than 16");
2365         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2366                                          (const uint8_t *)&nic_mask,
2367                                          sizeof(struct rte_flow_item_gtp_psc),
2368                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2369 }
2370
2371 /**
2372  * Validate IPV4 item.
2373  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2374  * add specific validation of fragment_offset field,
2375  *
2376  * @param[in] item
2377  *   Item specification.
2378  * @param[in] item_flags
2379  *   Bit-fields that holds the items detected until now.
2380  * @param[out] error
2381  *   Pointer to error structure.
2382  *
2383  * @return
2384  *   0 on success, a negative errno value otherwise and rte_errno is set.
2385  */
2386 static int
2387 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2388                            uint64_t item_flags,
2389                            uint64_t last_item,
2390                            uint16_t ether_type,
2391                            struct rte_flow_error *error)
2392 {
2393         int ret;
2394         const struct rte_flow_item_ipv4 *spec = item->spec;
2395         const struct rte_flow_item_ipv4 *last = item->last;
2396         const struct rte_flow_item_ipv4 *mask = item->mask;
2397         rte_be16_t fragment_offset_spec = 0;
2398         rte_be16_t fragment_offset_last = 0;
2399         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2400                 .hdr = {
2401                         .src_addr = RTE_BE32(0xffffffff),
2402                         .dst_addr = RTE_BE32(0xffffffff),
2403                         .type_of_service = 0xff,
2404                         .fragment_offset = RTE_BE16(0xffff),
2405                         .next_proto_id = 0xff,
2406                         .time_to_live = 0xff,
2407                 },
2408         };
2409
2410         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2411                                            ether_type, &nic_ipv4_mask,
2412                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2413         if (ret < 0)
2414                 return ret;
2415         if (spec && mask)
2416                 fragment_offset_spec = spec->hdr.fragment_offset &
2417                                        mask->hdr.fragment_offset;
2418         if (!fragment_offset_spec)
2419                 return 0;
2420         /*
2421          * spec and mask are valid, enforce using full mask to make sure the
2422          * complete value is used correctly.
2423          */
2424         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2425                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2426                 return rte_flow_error_set(error, EINVAL,
2427                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2428                                           item, "must use full mask for"
2429                                           " fragment_offset");
2430         /*
2431          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2432          * indicating this is 1st fragment of fragmented packet.
2433          * This is not yet supported in MLX5, return appropriate error message.
2434          */
2435         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2436                 return rte_flow_error_set(error, ENOTSUP,
2437                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2438                                           "match on first fragment not "
2439                                           "supported");
2440         if (fragment_offset_spec && !last)
2441                 return rte_flow_error_set(error, ENOTSUP,
2442                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2443                                           "specified value not supported");
2444         /* spec and last are valid, validate the specified range. */
2445         fragment_offset_last = last->hdr.fragment_offset &
2446                                mask->hdr.fragment_offset;
2447         /*
2448          * Match on fragment_offset spec 0x2001 and last 0x3fff
2449          * means MF is 1 and frag-offset is > 0.
2450          * This packet is fragment 2nd and onward, excluding last.
2451          * This is not yet supported in MLX5, return appropriate
2452          * error message.
2453          */
2454         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2455             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2456                 return rte_flow_error_set(error, ENOTSUP,
2457                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2458                                           last, "match on following "
2459                                           "fragments not supported");
2460         /*
2461          * Match on fragment_offset spec 0x0001 and last 0x1fff
2462          * means MF is 0 and frag-offset is > 0.
2463          * This packet is last fragment of fragmented packet.
2464          * This is not yet supported in MLX5, return appropriate
2465          * error message.
2466          */
2467         if (fragment_offset_spec == RTE_BE16(1) &&
2468             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2469                 return rte_flow_error_set(error, ENOTSUP,
2470                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2471                                           last, "match on last "
2472                                           "fragment not supported");
2473         /*
2474          * Match on fragment_offset spec 0x0001 and last 0x3fff
2475          * means MF and/or frag-offset is not 0.
2476          * This is a fragmented packet.
2477          * Other range values are invalid and rejected.
2478          */
2479         if (!(fragment_offset_spec == RTE_BE16(1) &&
2480               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2481                 return rte_flow_error_set(error, ENOTSUP,
2482                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2483                                           "specified range not supported");
2484         return 0;
2485 }
2486
2487 /**
2488  * Validate IPV6 fragment extension item.
2489  *
2490  * @param[in] item
2491  *   Item specification.
2492  * @param[in] item_flags
2493  *   Bit-fields that holds the items detected until now.
2494  * @param[out] error
2495  *   Pointer to error structure.
2496  *
2497  * @return
2498  *   0 on success, a negative errno value otherwise and rte_errno is set.
2499  */
2500 static int
2501 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2502                                     uint64_t item_flags,
2503                                     struct rte_flow_error *error)
2504 {
2505         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2506         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2507         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2508         rte_be16_t frag_data_spec = 0;
2509         rte_be16_t frag_data_last = 0;
2510         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2511         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2512                                       MLX5_FLOW_LAYER_OUTER_L4;
2513         int ret = 0;
2514         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2515                 .hdr = {
2516                         .next_header = 0xff,
2517                         .frag_data = RTE_BE16(0xffff),
2518                 },
2519         };
2520
2521         if (item_flags & l4m)
2522                 return rte_flow_error_set(error, EINVAL,
2523                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2524                                           "ipv6 fragment extension item cannot "
2525                                           "follow L4 item.");
2526         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2527             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2528                 return rte_flow_error_set(error, EINVAL,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2530                                           "ipv6 fragment extension item must "
2531                                           "follow ipv6 item");
2532         if (spec && mask)
2533                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2534         if (!frag_data_spec)
2535                 return 0;
2536         /*
2537          * spec and mask are valid, enforce using full mask to make sure the
2538          * complete value is used correctly.
2539          */
2540         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2541                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2542                 return rte_flow_error_set(error, EINVAL,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2544                                           item, "must use full mask for"
2545                                           " frag_data");
2546         /*
2547          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2548          * This is 1st fragment of fragmented packet.
2549          */
2550         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2551                 return rte_flow_error_set(error, ENOTSUP,
2552                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2553                                           "match on first fragment not "
2554                                           "supported");
2555         if (frag_data_spec && !last)
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2558                                           "specified value not supported");
2559         ret = mlx5_flow_item_acceptable
2560                                 (item, (const uint8_t *)mask,
2561                                  (const uint8_t *)&nic_mask,
2562                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2563                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2564         if (ret)
2565                 return ret;
2566         /* spec and last are valid, validate the specified range. */
2567         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2568         /*
2569          * Match on frag_data spec 0x0009 and last 0xfff9
2570          * means M is 1 and frag-offset is > 0.
2571          * This packet is fragment 2nd and onward, excluding last.
2572          * This is not yet supported in MLX5, return appropriate
2573          * error message.
2574          */
2575         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2576                                        RTE_IPV6_EHDR_MF_MASK) &&
2577             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2578                 return rte_flow_error_set(error, ENOTSUP,
2579                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2580                                           last, "match on following "
2581                                           "fragments not supported");
2582         /*
2583          * Match on frag_data spec 0x0008 and last 0xfff8
2584          * means M is 0 and frag-offset is > 0.
2585          * This packet is last fragment of fragmented packet.
2586          * This is not yet supported in MLX5, return appropriate
2587          * error message.
2588          */
2589         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2590             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2591                 return rte_flow_error_set(error, ENOTSUP,
2592                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2593                                           last, "match on last "
2594                                           "fragment not supported");
2595         /* Other range values are invalid and rejected. */
2596         return rte_flow_error_set(error, EINVAL,
2597                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2598                                   "specified range not supported");
2599 }
2600
2601 /**
2602  * Validate the pop VLAN action.
2603  *
2604  * @param[in] dev
2605  *   Pointer to the rte_eth_dev structure.
2606  * @param[in] action_flags
2607  *   Holds the actions detected until now.
2608  * @param[in] action
2609  *   Pointer to the pop vlan action.
2610  * @param[in] item_flags
2611  *   The items found in this flow rule.
2612  * @param[in] attr
2613  *   Pointer to flow attributes.
2614  * @param[out] error
2615  *   Pointer to error structure.
2616  *
2617  * @return
2618  *   0 on success, a negative errno value otherwise and rte_errno is set.
2619  */
2620 static int
2621 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2622                                  uint64_t action_flags,
2623                                  const struct rte_flow_action *action,
2624                                  uint64_t item_flags,
2625                                  const struct rte_flow_attr *attr,
2626                                  struct rte_flow_error *error)
2627 {
2628         const struct mlx5_priv *priv = dev->data->dev_private;
2629
2630         (void)action;
2631         (void)attr;
2632         if (!priv->sh->pop_vlan_action)
2633                 return rte_flow_error_set(error, ENOTSUP,
2634                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2635                                           NULL,
2636                                           "pop vlan action is not supported");
2637         if (attr->egress)
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2640                                           NULL,
2641                                           "pop vlan action not supported for "
2642                                           "egress");
2643         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2644                 return rte_flow_error_set(error, ENOTSUP,
2645                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2646                                           "no support for multiple VLAN "
2647                                           "actions");
2648         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2649         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2650             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2653                                           NULL,
2654                                           "cannot pop vlan after decap without "
2655                                           "match on inner vlan in the flow");
2656         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2657         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2658             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2659                 return rte_flow_error_set(error, ENOTSUP,
2660                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2661                                           NULL,
2662                                           "cannot pop vlan without a "
2663                                           "match on (outer) vlan in the flow");
2664         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2665                 return rte_flow_error_set(error, EINVAL,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2667                                           "wrong action order, port_id should "
2668                                           "be after pop VLAN action");
2669         if (!attr->transfer && priv->representor)
2670                 return rte_flow_error_set(error, ENOTSUP,
2671                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2672                                           "pop vlan action for VF representor "
2673                                           "not supported on NIC table");
2674         return 0;
2675 }
2676
2677 /**
2678  * Get VLAN default info from vlan match info.
2679  *
2680  * @param[in] items
2681  *   the list of item specifications.
2682  * @param[out] vlan
2683  *   pointer VLAN info to fill to.
2684  *
2685  * @return
2686  *   0 on success, a negative errno value otherwise and rte_errno is set.
2687  */
2688 static void
2689 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2690                                   struct rte_vlan_hdr *vlan)
2691 {
2692         const struct rte_flow_item_vlan nic_mask = {
2693                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2694                                 MLX5DV_FLOW_VLAN_VID_MASK),
2695                 .inner_type = RTE_BE16(0xffff),
2696         };
2697
2698         if (items == NULL)
2699                 return;
2700         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2701                 int type = items->type;
2702
2703                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2704                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2705                         break;
2706         }
2707         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2708                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2709                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2710
2711                 /* If VLAN item in pattern doesn't contain data, return here. */
2712                 if (!vlan_v)
2713                         return;
2714                 if (!vlan_m)
2715                         vlan_m = &nic_mask;
2716                 /* Only full match values are accepted */
2717                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2718                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2719                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2720                         vlan->vlan_tci |=
2721                                 rte_be_to_cpu_16(vlan_v->tci &
2722                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2723                 }
2724                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2725                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2726                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2727                         vlan->vlan_tci |=
2728                                 rte_be_to_cpu_16(vlan_v->tci &
2729                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2730                 }
2731                 if (vlan_m->inner_type == nic_mask.inner_type)
2732                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2733                                                            vlan_m->inner_type);
2734         }
2735 }
2736
2737 /**
2738  * Validate the push VLAN action.
2739  *
2740  * @param[in] dev
2741  *   Pointer to the rte_eth_dev structure.
2742  * @param[in] action_flags
2743  *   Holds the actions detected until now.
2744  * @param[in] item_flags
2745  *   The items found in this flow rule.
2746  * @param[in] action
2747  *   Pointer to the action structure.
2748  * @param[in] attr
2749  *   Pointer to flow attributes
2750  * @param[out] error
2751  *   Pointer to error structure.
2752  *
2753  * @return
2754  *   0 on success, a negative errno value otherwise and rte_errno is set.
2755  */
2756 static int
2757 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2758                                   uint64_t action_flags,
2759                                   const struct rte_flow_item_vlan *vlan_m,
2760                                   const struct rte_flow_action *action,
2761                                   const struct rte_flow_attr *attr,
2762                                   struct rte_flow_error *error)
2763 {
2764         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2765         const struct mlx5_priv *priv = dev->data->dev_private;
2766
2767         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2768             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2769                 return rte_flow_error_set(error, EINVAL,
2770                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2771                                           "invalid vlan ethertype");
2772         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2773                 return rte_flow_error_set(error, EINVAL,
2774                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2775                                           "wrong action order, port_id should "
2776                                           "be after push VLAN");
2777         if (!attr->transfer && priv->representor)
2778                 return rte_flow_error_set(error, ENOTSUP,
2779                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2780                                           "push vlan action for VF representor "
2781                                           "not supported on NIC table");
2782         if (vlan_m &&
2783             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2784             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2785                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2786             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2787             !(mlx5_flow_find_action
2788                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2789                 return rte_flow_error_set(error, EINVAL,
2790                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2791                                           "not full match mask on VLAN PCP and "
2792                                           "there is no of_set_vlan_pcp action, "
2793                                           "push VLAN action cannot figure out "
2794                                           "PCP value");
2795         if (vlan_m &&
2796             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2797             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2798                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2799             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2800             !(mlx5_flow_find_action
2801                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2802                 return rte_flow_error_set(error, EINVAL,
2803                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2804                                           "not full match mask on VLAN VID and "
2805                                           "there is no of_set_vlan_vid action, "
2806                                           "push VLAN action cannot figure out "
2807                                           "VID value");
2808         (void)attr;
2809         return 0;
2810 }
2811
2812 /**
2813  * Validate the set VLAN PCP.
2814  *
2815  * @param[in] action_flags
2816  *   Holds the actions detected until now.
2817  * @param[in] actions
2818  *   Pointer to the list of actions remaining in the flow rule.
2819  * @param[out] error
2820  *   Pointer to error structure.
2821  *
2822  * @return
2823  *   0 on success, a negative errno value otherwise and rte_errno is set.
2824  */
2825 static int
2826 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2827                                      const struct rte_flow_action actions[],
2828                                      struct rte_flow_error *error)
2829 {
2830         const struct rte_flow_action *action = actions;
2831         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2832
2833         if (conf->vlan_pcp > 7)
2834                 return rte_flow_error_set(error, EINVAL,
2835                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2836                                           "VLAN PCP value is too big");
2837         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2838                 return rte_flow_error_set(error, ENOTSUP,
2839                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2840                                           "set VLAN PCP action must follow "
2841                                           "the push VLAN action");
2842         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2843                 return rte_flow_error_set(error, ENOTSUP,
2844                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2845                                           "Multiple VLAN PCP modification are "
2846                                           "not supported");
2847         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2848                 return rte_flow_error_set(error, EINVAL,
2849                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2850                                           "wrong action order, port_id should "
2851                                           "be after set VLAN PCP");
2852         return 0;
2853 }
2854
2855 /**
2856  * Validate the set VLAN VID.
2857  *
2858  * @param[in] item_flags
2859  *   Holds the items detected in this rule.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] actions
2863  *   Pointer to the list of actions remaining in the flow rule.
2864  * @param[out] error
2865  *   Pointer to error structure.
2866  *
2867  * @return
2868  *   0 on success, a negative errno value otherwise and rte_errno is set.
2869  */
2870 static int
2871 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2872                                      uint64_t action_flags,
2873                                      const struct rte_flow_action actions[],
2874                                      struct rte_flow_error *error)
2875 {
2876         const struct rte_flow_action *action = actions;
2877         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2878
2879         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2880                 return rte_flow_error_set(error, EINVAL,
2881                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2882                                           "VLAN VID value is too big");
2883         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2884             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2885                 return rte_flow_error_set(error, ENOTSUP,
2886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2887                                           "set VLAN VID action must follow push"
2888                                           " VLAN action or match on VLAN item");
2889         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2892                                           "Multiple VLAN VID modifications are "
2893                                           "not supported");
2894         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2895                 return rte_flow_error_set(error, EINVAL,
2896                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2897                                           "wrong action order, port_id should "
2898                                           "be after set VLAN VID");
2899         return 0;
2900 }
2901
2902 /*
2903  * Validate the FLAG action.
2904  *
2905  * @param[in] dev
2906  *   Pointer to the rte_eth_dev structure.
2907  * @param[in] action_flags
2908  *   Holds the actions detected until now.
2909  * @param[in] attr
2910  *   Pointer to flow attributes
2911  * @param[out] error
2912  *   Pointer to error structure.
2913  *
2914  * @return
2915  *   0 on success, a negative errno value otherwise and rte_errno is set.
2916  */
2917 static int
2918 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2919                              uint64_t action_flags,
2920                              const struct rte_flow_attr *attr,
2921                              struct rte_flow_error *error)
2922 {
2923         struct mlx5_priv *priv = dev->data->dev_private;
2924         struct mlx5_dev_config *config = &priv->config;
2925         int ret;
2926
2927         /* Fall back if no extended metadata register support. */
2928         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2929                 return mlx5_flow_validate_action_flag(action_flags, attr,
2930                                                       error);
2931         /* Extensive metadata mode requires registers. */
2932         if (!mlx5_flow_ext_mreg_supported(dev))
2933                 return rte_flow_error_set(error, ENOTSUP,
2934                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2935                                           "no metadata registers "
2936                                           "to support flag action");
2937         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2938                 return rte_flow_error_set(error, ENOTSUP,
2939                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2940                                           "extended metadata register"
2941                                           " isn't available");
2942         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2943         if (ret < 0)
2944                 return ret;
2945         MLX5_ASSERT(ret > 0);
2946         if (action_flags & MLX5_FLOW_ACTION_MARK)
2947                 return rte_flow_error_set(error, EINVAL,
2948                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2949                                           "can't mark and flag in same flow");
2950         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2951                 return rte_flow_error_set(error, EINVAL,
2952                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2953                                           "can't have 2 flag"
2954                                           " actions in same flow");
2955         return 0;
2956 }
2957
2958 /**
2959  * Validate MARK action.
2960  *
2961  * @param[in] dev
2962  *   Pointer to the rte_eth_dev structure.
2963  * @param[in] action
2964  *   Pointer to action.
2965  * @param[in] action_flags
2966  *   Holds the actions detected until now.
2967  * @param[in] attr
2968  *   Pointer to flow attributes
2969  * @param[out] error
2970  *   Pointer to error structure.
2971  *
2972  * @return
2973  *   0 on success, a negative errno value otherwise and rte_errno is set.
2974  */
2975 static int
2976 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2977                              const struct rte_flow_action *action,
2978                              uint64_t action_flags,
2979                              const struct rte_flow_attr *attr,
2980                              struct rte_flow_error *error)
2981 {
2982         struct mlx5_priv *priv = dev->data->dev_private;
2983         struct mlx5_dev_config *config = &priv->config;
2984         const struct rte_flow_action_mark *mark = action->conf;
2985         int ret;
2986
2987         if (is_tunnel_offload_active(dev))
2988                 return rte_flow_error_set(error, ENOTSUP,
2989                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2990                                           "no mark action "
2991                                           "if tunnel offload active");
2992         /* Fall back if no extended metadata register support. */
2993         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2994                 return mlx5_flow_validate_action_mark(action, action_flags,
2995                                                       attr, error);
2996         /* Extensive metadata mode requires registers. */
2997         if (!mlx5_flow_ext_mreg_supported(dev))
2998                 return rte_flow_error_set(error, ENOTSUP,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3000                                           "no metadata registers "
3001                                           "to support mark action");
3002         if (!priv->sh->dv_mark_mask)
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3005                                           "extended metadata register"
3006                                           " isn't available");
3007         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3008         if (ret < 0)
3009                 return ret;
3010         MLX5_ASSERT(ret > 0);
3011         if (!mark)
3012                 return rte_flow_error_set(error, EINVAL,
3013                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                           "configuration cannot be null");
3015         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3016                 return rte_flow_error_set(error, EINVAL,
3017                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3018                                           &mark->id,
3019                                           "mark id exceeds the limit");
3020         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3021                 return rte_flow_error_set(error, EINVAL,
3022                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3023                                           "can't flag and mark in same flow");
3024         if (action_flags & MLX5_FLOW_ACTION_MARK)
3025                 return rte_flow_error_set(error, EINVAL,
3026                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3027                                           "can't have 2 mark actions in same"
3028                                           " flow");
3029         return 0;
3030 }
3031
3032 /**
3033  * Validate SET_META action.
3034  *
3035  * @param[in] dev
3036  *   Pointer to the rte_eth_dev structure.
3037  * @param[in] action
3038  *   Pointer to the action structure.
3039  * @param[in] action_flags
3040  *   Holds the actions detected until now.
3041  * @param[in] attr
3042  *   Pointer to flow attributes
3043  * @param[out] error
3044  *   Pointer to error structure.
3045  *
3046  * @return
3047  *   0 on success, a negative errno value otherwise and rte_errno is set.
3048  */
3049 static int
3050 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3051                                  const struct rte_flow_action *action,
3052                                  uint64_t action_flags __rte_unused,
3053                                  const struct rte_flow_attr *attr,
3054                                  struct rte_flow_error *error)
3055 {
3056         const struct rte_flow_action_set_meta *conf;
3057         uint32_t nic_mask = UINT32_MAX;
3058         int reg;
3059
3060         if (!mlx5_flow_ext_mreg_supported(dev))
3061                 return rte_flow_error_set(error, ENOTSUP,
3062                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3063                                           "extended metadata register"
3064                                           " isn't supported");
3065         reg = flow_dv_get_metadata_reg(dev, attr, error);
3066         if (reg < 0)
3067                 return reg;
3068         if (reg == REG_NON)
3069                 return rte_flow_error_set(error, ENOTSUP,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3071                                           "unavalable extended metadata register");
3072         if (reg != REG_A && reg != REG_B) {
3073                 struct mlx5_priv *priv = dev->data->dev_private;
3074
3075                 nic_mask = priv->sh->dv_meta_mask;
3076         }
3077         if (!(action->conf))
3078                 return rte_flow_error_set(error, EINVAL,
3079                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3080                                           "configuration cannot be null");
3081         conf = (const struct rte_flow_action_set_meta *)action->conf;
3082         if (!conf->mask)
3083                 return rte_flow_error_set(error, EINVAL,
3084                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3085                                           "zero mask doesn't have any effect");
3086         if (conf->mask & ~nic_mask)
3087                 return rte_flow_error_set(error, EINVAL,
3088                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3089                                           "meta data must be within reg C0");
3090         return 0;
3091 }
3092
3093 /**
3094  * Validate SET_TAG action.
3095  *
3096  * @param[in] dev
3097  *   Pointer to the rte_eth_dev structure.
3098  * @param[in] action
3099  *   Pointer to the action structure.
3100  * @param[in] action_flags
3101  *   Holds the actions detected until now.
3102  * @param[in] attr
3103  *   Pointer to flow attributes
3104  * @param[out] error
3105  *   Pointer to error structure.
3106  *
3107  * @return
3108  *   0 on success, a negative errno value otherwise and rte_errno is set.
3109  */
3110 static int
3111 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3112                                 const struct rte_flow_action *action,
3113                                 uint64_t action_flags,
3114                                 const struct rte_flow_attr *attr,
3115                                 struct rte_flow_error *error)
3116 {
3117         const struct rte_flow_action_set_tag *conf;
3118         const uint64_t terminal_action_flags =
3119                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3120                 MLX5_FLOW_ACTION_RSS;
3121         int ret;
3122
3123         if (!mlx5_flow_ext_mreg_supported(dev))
3124                 return rte_flow_error_set(error, ENOTSUP,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3126                                           "extensive metadata register"
3127                                           " isn't supported");
3128         if (!(action->conf))
3129                 return rte_flow_error_set(error, EINVAL,
3130                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3131                                           "configuration cannot be null");
3132         conf = (const struct rte_flow_action_set_tag *)action->conf;
3133         if (!conf->mask)
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3136                                           "zero mask doesn't have any effect");
3137         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3138         if (ret < 0)
3139                 return ret;
3140         if (!attr->transfer && attr->ingress &&
3141             (action_flags & terminal_action_flags))
3142                 return rte_flow_error_set(error, EINVAL,
3143                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3144                                           "set_tag has no effect"
3145                                           " with terminal actions");
3146         return 0;
3147 }
3148
3149 /**
3150  * Check if action counter is shared by either old or new mechanism.
3151  *
3152  * @param[in] action
3153  *   Pointer to the action structure.
3154  *
3155  * @return
3156  *   True when counter is shared, false otherwise.
3157  */
3158 static inline bool
3159 is_shared_action_count(const struct rte_flow_action *action)
3160 {
3161         const struct rte_flow_action_count *count =
3162                         (const struct rte_flow_action_count *)action->conf;
3163
3164         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3165                 return true;
3166         return !!(count && count->shared);
3167 }
3168
3169 /**
3170  * Validate count action.
3171  *
3172  * @param[in] dev
3173  *   Pointer to rte_eth_dev structure.
3174  * @param[in] shared
3175  *   Indicator if action is shared.
3176  * @param[in] action_flags
3177  *   Holds the actions detected until now.
3178  * @param[out] error
3179  *   Pointer to error structure.
3180  *
3181  * @return
3182  *   0 on success, a negative errno value otherwise and rte_errno is set.
3183  */
3184 static int
3185 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3186                               uint64_t action_flags,
3187                               struct rte_flow_error *error)
3188 {
3189         struct mlx5_priv *priv = dev->data->dev_private;
3190
3191         if (!priv->config.devx)
3192                 goto notsup_err;
3193         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3194                 return rte_flow_error_set(error, EINVAL,
3195                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3196                                           "duplicate count actions set");
3197         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3198             !priv->sh->flow_hit_aso_en)
3199                 return rte_flow_error_set(error, EINVAL,
3200                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3201                                           "old age and shared count combination is not supported");
3202 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3203         return 0;
3204 #endif
3205 notsup_err:
3206         return rte_flow_error_set
3207                       (error, ENOTSUP,
3208                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3209                        NULL,
3210                        "count action not supported");
3211 }
3212
3213 /**
3214  * Validate the L2 encap action.
3215  *
3216  * @param[in] dev
3217  *   Pointer to the rte_eth_dev structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] action
3221  *   Pointer to the action structure.
3222  * @param[in] attr
3223  *   Pointer to flow attributes.
3224  * @param[out] error
3225  *   Pointer to error structure.
3226  *
3227  * @return
3228  *   0 on success, a negative errno value otherwise and rte_errno is set.
3229  */
3230 static int
3231 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3232                                  uint64_t action_flags,
3233                                  const struct rte_flow_action *action,
3234                                  const struct rte_flow_attr *attr,
3235                                  struct rte_flow_error *error)
3236 {
3237         const struct mlx5_priv *priv = dev->data->dev_private;
3238
3239         if (!(action->conf))
3240                 return rte_flow_error_set(error, EINVAL,
3241                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3242                                           "configuration cannot be null");
3243         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3244                 return rte_flow_error_set(error, EINVAL,
3245                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3246                                           "can only have a single encap action "
3247                                           "in a flow");
3248         if (!attr->transfer && priv->representor)
3249                 return rte_flow_error_set(error, ENOTSUP,
3250                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3251                                           "encap action for VF representor "
3252                                           "not supported on NIC table");
3253         return 0;
3254 }
3255
3256 /**
3257  * Validate a decap action.
3258  *
3259  * @param[in] dev
3260  *   Pointer to the rte_eth_dev structure.
3261  * @param[in] action_flags
3262  *   Holds the actions detected until now.
3263  * @param[in] action
3264  *   Pointer to the action structure.
3265  * @param[in] item_flags
3266  *   Holds the items detected.
3267  * @param[in] attr
3268  *   Pointer to flow attributes
3269  * @param[out] error
3270  *   Pointer to error structure.
3271  *
3272  * @return
3273  *   0 on success, a negative errno value otherwise and rte_errno is set.
3274  */
3275 static int
3276 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3277                               uint64_t action_flags,
3278                               const struct rte_flow_action *action,
3279                               const uint64_t item_flags,
3280                               const struct rte_flow_attr *attr,
3281                               struct rte_flow_error *error)
3282 {
3283         const struct mlx5_priv *priv = dev->data->dev_private;
3284
3285         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3286             !priv->config.decap_en)
3287                 return rte_flow_error_set(error, ENOTSUP,
3288                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3289                                           "decap is not enabled");
3290         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3291                 return rte_flow_error_set(error, ENOTSUP,
3292                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3293                                           action_flags &
3294                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3295                                           "have a single decap action" : "decap "
3296                                           "after encap is not supported");
3297         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3298                 return rte_flow_error_set(error, EINVAL,
3299                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3300                                           "can't have decap action after"
3301                                           " modify action");
3302         if (attr->egress)
3303                 return rte_flow_error_set(error, ENOTSUP,
3304                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3305                                           NULL,
3306                                           "decap action not supported for "
3307                                           "egress");
3308         if (!attr->transfer && priv->representor)
3309                 return rte_flow_error_set(error, ENOTSUP,
3310                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3311                                           "decap action for VF representor "
3312                                           "not supported on NIC table");
3313         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3314             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3315                 return rte_flow_error_set(error, ENOTSUP,
3316                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3317                                 "VXLAN item should be present for VXLAN decap");
3318         return 0;
3319 }
3320
3321 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3322
3323 /**
3324  * Validate the raw encap and decap actions.
3325  *
3326  * @param[in] dev
3327  *   Pointer to the rte_eth_dev structure.
3328  * @param[in] decap
3329  *   Pointer to the decap action.
3330  * @param[in] encap
3331  *   Pointer to the encap action.
3332  * @param[in] attr
3333  *   Pointer to flow attributes
3334  * @param[in/out] action_flags
3335  *   Holds the actions detected until now.
3336  * @param[out] actions_n
3337  *   pointer to the number of actions counter.
3338  * @param[in] action
3339  *   Pointer to the action structure.
3340  * @param[in] item_flags
3341  *   Holds the items detected.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_raw_encap_decap
3350         (struct rte_eth_dev *dev,
3351          const struct rte_flow_action_raw_decap *decap,
3352          const struct rte_flow_action_raw_encap *encap,
3353          const struct rte_flow_attr *attr, uint64_t *action_flags,
3354          int *actions_n, const struct rte_flow_action *action,
3355          uint64_t item_flags, struct rte_flow_error *error)
3356 {
3357         const struct mlx5_priv *priv = dev->data->dev_private;
3358         int ret;
3359
3360         if (encap && (!encap->size || !encap->data))
3361                 return rte_flow_error_set(error, EINVAL,
3362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3363                                           "raw encap data cannot be empty");
3364         if (decap && encap) {
3365                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3366                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3367                         /* L3 encap. */
3368                         decap = NULL;
3369                 else if (encap->size <=
3370                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3371                            decap->size >
3372                            MLX5_ENCAPSULATION_DECISION_SIZE)
3373                         /* L3 decap. */
3374                         encap = NULL;
3375                 else if (encap->size >
3376                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3377                            decap->size >
3378                            MLX5_ENCAPSULATION_DECISION_SIZE)
3379                         /* 2 L2 actions: encap and decap. */
3380                         ;
3381                 else
3382                         return rte_flow_error_set(error,
3383                                 ENOTSUP,
3384                                 RTE_FLOW_ERROR_TYPE_ACTION,
3385                                 NULL, "unsupported too small "
3386                                 "raw decap and too small raw "
3387                                 "encap combination");
3388         }
3389         if (decap) {
3390                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3391                                                     item_flags, attr, error);
3392                 if (ret < 0)
3393                         return ret;
3394                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3395                 ++(*actions_n);
3396         }
3397         if (encap) {
3398                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3399                         return rte_flow_error_set(error, ENOTSUP,
3400                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3401                                                   NULL,
3402                                                   "small raw encap size");
3403                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3404                         return rte_flow_error_set(error, EINVAL,
3405                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3406                                                   NULL,
3407                                                   "more than one encap action");
3408                 if (!attr->transfer && priv->representor)
3409                         return rte_flow_error_set
3410                                         (error, ENOTSUP,
3411                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3412                                          "encap action for VF representor "
3413                                          "not supported on NIC table");
3414                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3415                 ++(*actions_n);
3416         }
3417         return 0;
3418 }
3419
3420 /**
3421  * Match encap_decap resource.
3422  *
3423  * @param list
3424  *   Pointer to the hash list.
3425  * @param entry
3426  *   Pointer to exist resource entry object.
3427  * @param key
3428  *   Key of the new entry.
3429  * @param ctx_cb
3430  *   Pointer to new encap_decap resource.
3431  *
3432  * @return
3433  *   0 on matching, none-zero otherwise.
3434  */
3435 int
3436 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3437                              struct mlx5_hlist_entry *entry,
3438                              uint64_t key __rte_unused, void *cb_ctx)
3439 {
3440         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3441         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3442         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3443
3444         cache_resource = container_of(entry,
3445                                       struct mlx5_flow_dv_encap_decap_resource,
3446                                       entry);
3447         if (resource->reformat_type == cache_resource->reformat_type &&
3448             resource->ft_type == cache_resource->ft_type &&
3449             resource->flags == cache_resource->flags &&
3450             resource->size == cache_resource->size &&
3451             !memcmp((const void *)resource->buf,
3452                     (const void *)cache_resource->buf,
3453                     resource->size))
3454                 return 0;
3455         return -1;
3456 }
3457
3458 /**
3459  * Allocate encap_decap resource.
3460  *
3461  * @param list
3462  *   Pointer to the hash list.
3463  * @param entry
3464  *   Pointer to exist resource entry object.
3465  * @param ctx_cb
3466  *   Pointer to new encap_decap resource.
3467  *
3468  * @return
3469  *   0 on matching, none-zero otherwise.
3470  */
3471 struct mlx5_hlist_entry *
3472 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3473                               uint64_t key __rte_unused,
3474                               void *cb_ctx)
3475 {
3476         struct mlx5_dev_ctx_shared *sh = list->ctx;
3477         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3478         struct mlx5dv_dr_domain *domain;
3479         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3480         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3481         uint32_t idx;
3482         int ret;
3483
3484         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3485                 domain = sh->fdb_domain;
3486         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3487                 domain = sh->rx_domain;
3488         else
3489                 domain = sh->tx_domain;
3490         /* Register new encap/decap resource. */
3491         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3492                                        &idx);
3493         if (!cache_resource) {
3494                 rte_flow_error_set(ctx->error, ENOMEM,
3495                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3496                                    "cannot allocate resource memory");
3497                 return NULL;
3498         }
3499         *cache_resource = *resource;
3500         cache_resource->idx = idx;
3501         ret = mlx5_flow_os_create_flow_action_packet_reformat
3502                                         (sh->ctx, domain, cache_resource,
3503                                          &cache_resource->action);
3504         if (ret) {
3505                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3506                 rte_flow_error_set(ctx->error, ENOMEM,
3507                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3508                                    NULL, "cannot create action");
3509                 return NULL;
3510         }
3511
3512         return &cache_resource->entry;
3513 }
3514
3515 /**
3516  * Find existing encap/decap resource or create and register a new one.
3517  *
3518  * @param[in, out] dev
3519  *   Pointer to rte_eth_dev structure.
3520  * @param[in, out] resource
3521  *   Pointer to encap/decap resource.
3522  * @parm[in, out] dev_flow
3523  *   Pointer to the dev_flow.
3524  * @param[out] error
3525  *   pointer to error structure.
3526  *
3527  * @return
3528  *   0 on success otherwise -errno and errno is set.
3529  */
3530 static int
3531 flow_dv_encap_decap_resource_register
3532                         (struct rte_eth_dev *dev,
3533                          struct mlx5_flow_dv_encap_decap_resource *resource,
3534                          struct mlx5_flow *dev_flow,
3535                          struct rte_flow_error *error)
3536 {
3537         struct mlx5_priv *priv = dev->data->dev_private;
3538         struct mlx5_dev_ctx_shared *sh = priv->sh;
3539         struct mlx5_hlist_entry *entry;
3540         union {
3541                 struct {
3542                         uint32_t ft_type:8;
3543                         uint32_t refmt_type:8;
3544                         /*
3545                          * Header reformat actions can be shared between
3546                          * non-root tables. One bit to indicate non-root
3547                          * table or not.
3548                          */
3549                         uint32_t is_root:1;
3550                         uint32_t reserve:15;
3551                 };
3552                 uint32_t v32;
3553         } encap_decap_key = {
3554                 {
3555                         .ft_type = resource->ft_type,
3556                         .refmt_type = resource->reformat_type,
3557                         .is_root = !!dev_flow->dv.group,
3558                         .reserve = 0,
3559                 }
3560         };
3561         struct mlx5_flow_cb_ctx ctx = {
3562                 .error = error,
3563                 .data = resource,
3564         };
3565         uint64_t key64;
3566
3567         resource->flags = dev_flow->dv.group ? 0 : 1;
3568         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3569                                  sizeof(encap_decap_key.v32), 0);
3570         if (resource->reformat_type !=
3571             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3572             resource->size)
3573                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3574         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3575         if (!entry)
3576                 return -rte_errno;
3577         resource = container_of(entry, typeof(*resource), entry);
3578         dev_flow->dv.encap_decap = resource;
3579         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3580         return 0;
3581 }
3582
3583 /**
3584  * Find existing table jump resource or create and register a new one.
3585  *
3586  * @param[in, out] dev
3587  *   Pointer to rte_eth_dev structure.
3588  * @param[in, out] tbl
3589  *   Pointer to flow table resource.
3590  * @parm[in, out] dev_flow
3591  *   Pointer to the dev_flow.
3592  * @param[out] error
3593  *   pointer to error structure.
3594  *
3595  * @return
3596  *   0 on success otherwise -errno and errno is set.
3597  */
3598 static int
3599 flow_dv_jump_tbl_resource_register
3600                         (struct rte_eth_dev *dev __rte_unused,
3601                          struct mlx5_flow_tbl_resource *tbl,
3602                          struct mlx5_flow *dev_flow,
3603                          struct rte_flow_error *error __rte_unused)
3604 {
3605         struct mlx5_flow_tbl_data_entry *tbl_data =
3606                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3607
3608         MLX5_ASSERT(tbl);
3609         MLX5_ASSERT(tbl_data->jump.action);
3610         dev_flow->handle->rix_jump = tbl_data->idx;
3611         dev_flow->dv.jump = &tbl_data->jump;
3612         return 0;
3613 }
3614
3615 int
3616 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3617                          struct mlx5_cache_entry *entry, void *cb_ctx)
3618 {
3619         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3620         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3621         struct mlx5_flow_dv_port_id_action_resource *res =
3622                         container_of(entry, typeof(*res), entry);
3623
3624         return ref->port_id != res->port_id;
3625 }
3626
3627 struct mlx5_cache_entry *
3628 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3629                           struct mlx5_cache_entry *entry __rte_unused,
3630                           void *cb_ctx)
3631 {
3632         struct mlx5_dev_ctx_shared *sh = list->ctx;
3633         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3634         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3635         struct mlx5_flow_dv_port_id_action_resource *cache;
3636         uint32_t idx;
3637         int ret;
3638
3639         /* Register new port id action resource. */
3640         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3641         if (!cache) {
3642                 rte_flow_error_set(ctx->error, ENOMEM,
3643                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3644                                    "cannot allocate port_id action cache memory");
3645                 return NULL;
3646         }
3647         *cache = *ref;
3648         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3649                                                         ref->port_id,
3650                                                         &cache->action);
3651         if (ret) {
3652                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3653                 rte_flow_error_set(ctx->error, ENOMEM,
3654                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3655                                    "cannot create action");
3656                 return NULL;
3657         }
3658         cache->idx = idx;
3659         return &cache->entry;
3660 }
3661
3662 /**
3663  * Find existing table port ID resource or create and register a new one.
3664  *
3665  * @param[in, out] dev
3666  *   Pointer to rte_eth_dev structure.
3667  * @param[in, out] resource
3668  *   Pointer to port ID action resource.
3669  * @parm[in, out] dev_flow
3670  *   Pointer to the dev_flow.
3671  * @param[out] error
3672  *   pointer to error structure.
3673  *
3674  * @return
3675  *   0 on success otherwise -errno and errno is set.
3676  */
3677 static int
3678 flow_dv_port_id_action_resource_register
3679                         (struct rte_eth_dev *dev,
3680                          struct mlx5_flow_dv_port_id_action_resource *resource,
3681                          struct mlx5_flow *dev_flow,
3682                          struct rte_flow_error *error)
3683 {
3684         struct mlx5_priv *priv = dev->data->dev_private;
3685         struct mlx5_cache_entry *entry;
3686         struct mlx5_flow_dv_port_id_action_resource *cache;
3687         struct mlx5_flow_cb_ctx ctx = {
3688                 .error = error,
3689                 .data = resource,
3690         };
3691
3692         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3693         if (!entry)
3694                 return -rte_errno;
3695         cache = container_of(entry, typeof(*cache), entry);
3696         dev_flow->dv.port_id_action = cache;
3697         dev_flow->handle->rix_port_id_action = cache->idx;
3698         return 0;
3699 }
3700
3701 int
3702 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3703                          struct mlx5_cache_entry *entry, void *cb_ctx)
3704 {
3705         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3706         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3707         struct mlx5_flow_dv_push_vlan_action_resource *res =
3708                         container_of(entry, typeof(*res), entry);
3709
3710         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3711 }
3712
3713 struct mlx5_cache_entry *
3714 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3715                           struct mlx5_cache_entry *entry __rte_unused,
3716                           void *cb_ctx)
3717 {
3718         struct mlx5_dev_ctx_shared *sh = list->ctx;
3719         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3720         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3721         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3722         struct mlx5dv_dr_domain *domain;
3723         uint32_t idx;
3724         int ret;
3725
3726         /* Register new port id action resource. */
3727         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3728         if (!cache) {
3729                 rte_flow_error_set(ctx->error, ENOMEM,
3730                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3731                                    "cannot allocate push_vlan action cache memory");
3732                 return NULL;
3733         }
3734         *cache = *ref;
3735         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3736                 domain = sh->fdb_domain;
3737         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3738                 domain = sh->rx_domain;
3739         else
3740                 domain = sh->tx_domain;
3741         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3742                                                         &cache->action);
3743         if (ret) {
3744                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3745                 rte_flow_error_set(ctx->error, ENOMEM,
3746                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3747                                    "cannot create push vlan action");
3748                 return NULL;
3749         }
3750         cache->idx = idx;
3751         return &cache->entry;
3752 }
3753
3754 /**
3755  * Find existing push vlan resource or create and register a new one.
3756  *
3757  * @param [in, out] dev
3758  *   Pointer to rte_eth_dev structure.
3759  * @param[in, out] resource
3760  *   Pointer to port ID action resource.
3761  * @parm[in, out] dev_flow
3762  *   Pointer to the dev_flow.
3763  * @param[out] error
3764  *   pointer to error structure.
3765  *
3766  * @return
3767  *   0 on success otherwise -errno and errno is set.
3768  */
3769 static int
3770 flow_dv_push_vlan_action_resource_register
3771                        (struct rte_eth_dev *dev,
3772                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3773                         struct mlx5_flow *dev_flow,
3774                         struct rte_flow_error *error)
3775 {
3776         struct mlx5_priv *priv = dev->data->dev_private;
3777         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3778         struct mlx5_cache_entry *entry;
3779         struct mlx5_flow_cb_ctx ctx = {
3780                 .error = error,
3781                 .data = resource,
3782         };
3783
3784         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3785         if (!entry)
3786                 return -rte_errno;
3787         cache = container_of(entry, typeof(*cache), entry);
3788
3789         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3790         dev_flow->dv.push_vlan_res = cache;
3791         return 0;
3792 }
3793
3794 /**
3795  * Get the size of specific rte_flow_item_type hdr size
3796  *
3797  * @param[in] item_type
3798  *   Tested rte_flow_item_type.
3799  *
3800  * @return
3801  *   sizeof struct item_type, 0 if void or irrelevant.
3802  */
3803 static size_t
3804 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3805 {
3806         size_t retval;
3807
3808         switch (item_type) {
3809         case RTE_FLOW_ITEM_TYPE_ETH:
3810                 retval = sizeof(struct rte_ether_hdr);
3811                 break;
3812         case RTE_FLOW_ITEM_TYPE_VLAN:
3813                 retval = sizeof(struct rte_vlan_hdr);
3814                 break;
3815         case RTE_FLOW_ITEM_TYPE_IPV4:
3816                 retval = sizeof(struct rte_ipv4_hdr);
3817                 break;
3818         case RTE_FLOW_ITEM_TYPE_IPV6:
3819                 retval = sizeof(struct rte_ipv6_hdr);
3820                 break;
3821         case RTE_FLOW_ITEM_TYPE_UDP:
3822                 retval = sizeof(struct rte_udp_hdr);
3823                 break;
3824         case RTE_FLOW_ITEM_TYPE_TCP:
3825                 retval = sizeof(struct rte_tcp_hdr);
3826                 break;
3827         case RTE_FLOW_ITEM_TYPE_VXLAN:
3828         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3829                 retval = sizeof(struct rte_vxlan_hdr);
3830                 break;
3831         case RTE_FLOW_ITEM_TYPE_GRE:
3832         case RTE_FLOW_ITEM_TYPE_NVGRE:
3833                 retval = sizeof(struct rte_gre_hdr);
3834                 break;
3835         case RTE_FLOW_ITEM_TYPE_MPLS:
3836                 retval = sizeof(struct rte_mpls_hdr);
3837                 break;
3838         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3839         default:
3840                 retval = 0;
3841                 break;
3842         }
3843         return retval;
3844 }
3845
3846 #define MLX5_ENCAP_IPV4_VERSION         0x40
3847 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3848 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3849 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3850 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3851 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3852 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3853
3854 /**
3855  * Convert the encap action data from list of rte_flow_item to raw buffer
3856  *
3857  * @param[in] items
3858  *   Pointer to rte_flow_item objects list.
3859  * @param[out] buf
3860  *   Pointer to the output buffer.
3861  * @param[out] size
3862  *   Pointer to the output buffer size.
3863  * @param[out] error
3864  *   Pointer to the error structure.
3865  *
3866  * @return
3867  *   0 on success, a negative errno value otherwise and rte_errno is set.
3868  */
3869 static int
3870 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3871                            size_t *size, struct rte_flow_error *error)
3872 {
3873         struct rte_ether_hdr *eth = NULL;
3874         struct rte_vlan_hdr *vlan = NULL;
3875         struct rte_ipv4_hdr *ipv4 = NULL;
3876         struct rte_ipv6_hdr *ipv6 = NULL;
3877         struct rte_udp_hdr *udp = NULL;
3878         struct rte_vxlan_hdr *vxlan = NULL;
3879         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3880         struct rte_gre_hdr *gre = NULL;
3881         size_t len;
3882         size_t temp_size = 0;
3883
3884         if (!items)
3885                 return rte_flow_error_set(error, EINVAL,
3886                                           RTE_FLOW_ERROR_TYPE_ACTION,
3887                                           NULL, "invalid empty data");
3888         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3889                 len = flow_dv_get_item_hdr_len(items->type);
3890                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3891                         return rte_flow_error_set(error, EINVAL,
3892                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3893                                                   (void *)items->type,
3894                                                   "items total size is too big"
3895                                                   " for encap action");
3896                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3897                 switch (items->type) {
3898                 case RTE_FLOW_ITEM_TYPE_ETH:
3899                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3900                         break;
3901                 case RTE_FLOW_ITEM_TYPE_VLAN:
3902                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3903                         if (!eth)
3904                                 return rte_flow_error_set(error, EINVAL,
3905                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3906                                                 (void *)items->type,
3907                                                 "eth header not found");
3908                         if (!eth->ether_type)
3909                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3910                         break;
3911                 case RTE_FLOW_ITEM_TYPE_IPV4:
3912                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3913                         if (!vlan && !eth)
3914                                 return rte_flow_error_set(error, EINVAL,
3915                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3916                                                 (void *)items->type,
3917                                                 "neither eth nor vlan"
3918                                                 " header found");
3919                         if (vlan && !vlan->eth_proto)
3920                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3921                         else if (eth && !eth->ether_type)
3922                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3923                         if (!ipv4->version_ihl)
3924                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3925                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3926                         if (!ipv4->time_to_live)
3927                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3928                         break;
3929                 case RTE_FLOW_ITEM_TYPE_IPV6:
3930                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3931                         if (!vlan && !eth)
3932                                 return rte_flow_error_set(error, EINVAL,
3933                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3934                                                 (void *)items->type,
3935                                                 "neither eth nor vlan"
3936                                                 " header found");
3937                         if (vlan && !vlan->eth_proto)
3938                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3939                         else if (eth && !eth->ether_type)
3940                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3941                         if (!ipv6->vtc_flow)
3942                                 ipv6->vtc_flow =
3943                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3944                         if (!ipv6->hop_limits)
3945                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3946                         break;
3947                 case RTE_FLOW_ITEM_TYPE_UDP:
3948                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3949                         if (!ipv4 && !ipv6)
3950                                 return rte_flow_error_set(error, EINVAL,
3951                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3952                                                 (void *)items->type,
3953                                                 "ip header not found");
3954                         if (ipv4 && !ipv4->next_proto_id)
3955                                 ipv4->next_proto_id = IPPROTO_UDP;
3956                         else if (ipv6 && !ipv6->proto)
3957                                 ipv6->proto = IPPROTO_UDP;
3958                         break;
3959                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3960                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3961                         if (!udp)
3962                                 return rte_flow_error_set(error, EINVAL,
3963                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3964                                                 (void *)items->type,
3965                                                 "udp header not found");
3966                         if (!udp->dst_port)
3967                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3968                         if (!vxlan->vx_flags)
3969                                 vxlan->vx_flags =
3970                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3971                         break;
3972                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3973                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3974                         if (!udp)
3975                                 return rte_flow_error_set(error, EINVAL,
3976                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3977                                                 (void *)items->type,
3978                                                 "udp header not found");
3979                         if (!vxlan_gpe->proto)
3980                                 return rte_flow_error_set(error, EINVAL,
3981                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3982                                                 (void *)items->type,
3983                                                 "next protocol not found");
3984                         if (!udp->dst_port)
3985                                 udp->dst_port =
3986                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3987                         if (!vxlan_gpe->vx_flags)
3988                                 vxlan_gpe->vx_flags =
3989                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3990                         break;
3991                 case RTE_FLOW_ITEM_TYPE_GRE:
3992                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3993                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3994                         if (!gre->proto)
3995                                 return rte_flow_error_set(error, EINVAL,
3996                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3997                                                 (void *)items->type,
3998                                                 "next protocol not found");
3999                         if (!ipv4 && !ipv6)
4000                                 return rte_flow_error_set(error, EINVAL,
4001                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4002                                                 (void *)items->type,
4003                                                 "ip header not found");
4004                         if (ipv4 && !ipv4->next_proto_id)
4005                                 ipv4->next_proto_id = IPPROTO_GRE;
4006                         else if (ipv6 && !ipv6->proto)
4007                                 ipv6->proto = IPPROTO_GRE;
4008                         break;
4009                 case RTE_FLOW_ITEM_TYPE_VOID:
4010                         break;
4011                 default:
4012                         return rte_flow_error_set(error, EINVAL,
4013                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4014                                                   (void *)items->type,
4015                                                   "unsupported item type");
4016                         break;
4017                 }
4018                 temp_size += len;
4019         }
4020         *size = temp_size;
4021         return 0;
4022 }
4023
4024 static int
4025 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4026 {
4027         struct rte_ether_hdr *eth = NULL;
4028         struct rte_vlan_hdr *vlan = NULL;
4029         struct rte_ipv6_hdr *ipv6 = NULL;
4030         struct rte_udp_hdr *udp = NULL;
4031         char *next_hdr;
4032         uint16_t proto;
4033
4034         eth = (struct rte_ether_hdr *)data;
4035         next_hdr = (char *)(eth + 1);
4036         proto = RTE_BE16(eth->ether_type);
4037
4038         /* VLAN skipping */
4039         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4040                 vlan = (struct rte_vlan_hdr *)next_hdr;
4041                 proto = RTE_BE16(vlan->eth_proto);
4042                 next_hdr += sizeof(struct rte_vlan_hdr);
4043         }
4044
4045         /* HW calculates IPv4 csum. no need to proceed */
4046         if (proto == RTE_ETHER_TYPE_IPV4)
4047                 return 0;
4048
4049         /* non IPv4/IPv6 header. not supported */
4050         if (proto != RTE_ETHER_TYPE_IPV6) {
4051                 return rte_flow_error_set(error, ENOTSUP,
4052                                           RTE_FLOW_ERROR_TYPE_ACTION,
4053                                           NULL, "Cannot offload non IPv4/IPv6");
4054         }
4055
4056         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4057
4058         /* ignore non UDP */
4059         if (ipv6->proto != IPPROTO_UDP)
4060                 return 0;
4061
4062         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4063         udp->dgram_cksum = 0;
4064
4065         return 0;
4066 }
4067
4068 /**
4069  * Convert L2 encap action to DV specification.
4070  *
4071  * @param[in] dev
4072  *   Pointer to rte_eth_dev structure.
4073  * @param[in] action
4074  *   Pointer to action structure.
4075  * @param[in, out] dev_flow
4076  *   Pointer to the mlx5_flow.
4077  * @param[in] transfer
4078  *   Mark if the flow is E-Switch flow.
4079  * @param[out] error
4080  *   Pointer to the error structure.
4081  *
4082  * @return
4083  *   0 on success, a negative errno value otherwise and rte_errno is set.
4084  */
4085 static int
4086 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4087                                const struct rte_flow_action *action,
4088                                struct mlx5_flow *dev_flow,
4089                                uint8_t transfer,
4090                                struct rte_flow_error *error)
4091 {
4092         const struct rte_flow_item *encap_data;
4093         const struct rte_flow_action_raw_encap *raw_encap_data;
4094         struct mlx5_flow_dv_encap_decap_resource res = {
4095                 .reformat_type =
4096                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4097                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4098                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4099         };
4100
4101         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4102                 raw_encap_data =
4103                         (const struct rte_flow_action_raw_encap *)action->conf;
4104                 res.size = raw_encap_data->size;
4105                 memcpy(res.buf, raw_encap_data->data, res.size);
4106         } else {
4107                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4108                         encap_data =
4109                                 ((const struct rte_flow_action_vxlan_encap *)
4110                                                 action->conf)->definition;
4111                 else
4112                         encap_data =
4113                                 ((const struct rte_flow_action_nvgre_encap *)
4114                                                 action->conf)->definition;
4115                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4116                                                &res.size, error))
4117                         return -rte_errno;
4118         }
4119         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4120                 return -rte_errno;
4121         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4122                 return rte_flow_error_set(error, EINVAL,
4123                                           RTE_FLOW_ERROR_TYPE_ACTION,
4124                                           NULL, "can't create L2 encap action");
4125         return 0;
4126 }
4127
4128 /**
4129  * Convert L2 decap action to DV specification.
4130  *
4131  * @param[in] dev
4132  *   Pointer to rte_eth_dev structure.
4133  * @param[in, out] dev_flow
4134  *   Pointer to the mlx5_flow.
4135  * @param[in] transfer
4136  *   Mark if the flow is E-Switch flow.
4137  * @param[out] error
4138  *   Pointer to the error structure.
4139  *
4140  * @return
4141  *   0 on success, a negative errno value otherwise and rte_errno is set.
4142  */
4143 static int
4144 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4145                                struct mlx5_flow *dev_flow,
4146                                uint8_t transfer,
4147                                struct rte_flow_error *error)
4148 {
4149         struct mlx5_flow_dv_encap_decap_resource res = {
4150                 .size = 0,
4151                 .reformat_type =
4152                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4153                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4154                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4155         };
4156
4157         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4158                 return rte_flow_error_set(error, EINVAL,
4159                                           RTE_FLOW_ERROR_TYPE_ACTION,
4160                                           NULL, "can't create L2 decap action");
4161         return 0;
4162 }
4163
4164 /**
4165  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4166  *
4167  * @param[in] dev
4168  *   Pointer to rte_eth_dev structure.
4169  * @param[in] action
4170  *   Pointer to action structure.
4171  * @param[in, out] dev_flow
4172  *   Pointer to the mlx5_flow.
4173  * @param[in] attr
4174  *   Pointer to the flow attributes.
4175  * @param[out] error
4176  *   Pointer to the error structure.
4177  *
4178  * @return
4179  *   0 on success, a negative errno value otherwise and rte_errno is set.
4180  */
4181 static int
4182 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4183                                 const struct rte_flow_action *action,
4184                                 struct mlx5_flow *dev_flow,
4185                                 const struct rte_flow_attr *attr,
4186                                 struct rte_flow_error *error)
4187 {
4188         const struct rte_flow_action_raw_encap *encap_data;
4189         struct mlx5_flow_dv_encap_decap_resource res;
4190
4191         memset(&res, 0, sizeof(res));
4192         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4193         res.size = encap_data->size;
4194         memcpy(res.buf, encap_data->data, res.size);
4195         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4196                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4197                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4198         if (attr->transfer)
4199                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4200         else
4201                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4202                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4203         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4204                 return rte_flow_error_set(error, EINVAL,
4205                                           RTE_FLOW_ERROR_TYPE_ACTION,
4206                                           NULL, "can't create encap action");
4207         return 0;
4208 }
4209
4210 /**
4211  * Create action push VLAN.
4212  *
4213  * @param[in] dev
4214  *   Pointer to rte_eth_dev structure.
4215  * @param[in] attr
4216  *   Pointer to the flow attributes.
4217  * @param[in] vlan
4218  *   Pointer to the vlan to push to the Ethernet header.
4219  * @param[in, out] dev_flow
4220  *   Pointer to the mlx5_flow.
4221  * @param[out] error
4222  *   Pointer to the error structure.
4223  *
4224  * @return
4225  *   0 on success, a negative errno value otherwise and rte_errno is set.
4226  */
4227 static int
4228 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4229                                 const struct rte_flow_attr *attr,
4230                                 const struct rte_vlan_hdr *vlan,
4231                                 struct mlx5_flow *dev_flow,
4232                                 struct rte_flow_error *error)
4233 {
4234         struct mlx5_flow_dv_push_vlan_action_resource res;
4235
4236         memset(&res, 0, sizeof(res));
4237         res.vlan_tag =
4238                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4239                                  vlan->vlan_tci);
4240         if (attr->transfer)
4241                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4242         else
4243                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4244                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4245         return flow_dv_push_vlan_action_resource_register
4246                                             (dev, &res, dev_flow, error);
4247 }
4248
4249 /**
4250  * Validate the modify-header actions.
4251  *
4252  * @param[in] action_flags
4253  *   Holds the actions detected until now.
4254  * @param[in] action
4255  *   Pointer to the modify action.
4256  * @param[out] error
4257  *   Pointer to error structure.
4258  *
4259  * @return
4260  *   0 on success, a negative errno value otherwise and rte_errno is set.
4261  */
4262 static int
4263 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4264                                    const struct rte_flow_action *action,
4265                                    struct rte_flow_error *error)
4266 {
4267         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4268                 return rte_flow_error_set(error, EINVAL,
4269                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4270                                           NULL, "action configuration not set");
4271         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4272                 return rte_flow_error_set(error, EINVAL,
4273                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4274                                           "can't have encap action before"
4275                                           " modify action");
4276         return 0;
4277 }
4278
4279 /**
4280  * Validate the modify-header MAC address actions.
4281  *
4282  * @param[in] action_flags
4283  *   Holds the actions detected until now.
4284  * @param[in] action
4285  *   Pointer to the modify action.
4286  * @param[in] item_flags
4287  *   Holds the items detected.
4288  * @param[out] error
4289  *   Pointer to error structure.
4290  *
4291  * @return
4292  *   0 on success, a negative errno value otherwise and rte_errno is set.
4293  */
4294 static int
4295 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4296                                    const struct rte_flow_action *action,
4297                                    const uint64_t item_flags,
4298                                    struct rte_flow_error *error)
4299 {
4300         int ret = 0;
4301
4302         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4303         if (!ret) {
4304                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4305                         return rte_flow_error_set(error, EINVAL,
4306                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4307                                                   NULL,
4308                                                   "no L2 item in pattern");
4309         }
4310         return ret;
4311 }
4312
4313 /**
4314  * Validate the modify-header IPv4 address actions.
4315  *
4316  * @param[in] action_flags
4317  *   Holds the actions detected until now.
4318  * @param[in] action
4319  *   Pointer to the modify action.
4320  * @param[in] item_flags
4321  *   Holds the items detected.
4322  * @param[out] error
4323  *   Pointer to error structure.
4324  *
4325  * @return
4326  *   0 on success, a negative errno value otherwise and rte_errno is set.
4327  */
4328 static int
4329 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4330                                     const struct rte_flow_action *action,
4331                                     const uint64_t item_flags,
4332                                     struct rte_flow_error *error)
4333 {
4334         int ret = 0;
4335         uint64_t layer;
4336
4337         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4338         if (!ret) {
4339                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4340                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4341                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4342                 if (!(item_flags & layer))
4343                         return rte_flow_error_set(error, EINVAL,
4344                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4345                                                   NULL,
4346                                                   "no ipv4 item in pattern");
4347         }
4348         return ret;
4349 }
4350
4351 /**
4352  * Validate the modify-header IPv6 address actions.
4353  *
4354  * @param[in] action_flags
4355  *   Holds the actions detected until now.
4356  * @param[in] action
4357  *   Pointer to the modify action.
4358  * @param[in] item_flags
4359  *   Holds the items detected.
4360  * @param[out] error
4361  *   Pointer to error structure.
4362  *
4363  * @return
4364  *   0 on success, a negative errno value otherwise and rte_errno is set.
4365  */
4366 static int
4367 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4368                                     const struct rte_flow_action *action,
4369                                     const uint64_t item_flags,
4370                                     struct rte_flow_error *error)
4371 {
4372         int ret = 0;
4373         uint64_t layer;
4374
4375         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4376         if (!ret) {
4377                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4378                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4379                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4380                 if (!(item_flags & layer))
4381                         return rte_flow_error_set(error, EINVAL,
4382                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4383                                                   NULL,
4384                                                   "no ipv6 item in pattern");
4385         }
4386         return ret;
4387 }
4388
4389 /**
4390  * Validate the modify-header TP actions.
4391  *
4392  * @param[in] action_flags
4393  *   Holds the actions detected until now.
4394  * @param[in] action
4395  *   Pointer to the modify action.
4396  * @param[in] item_flags
4397  *   Holds the items detected.
4398  * @param[out] error
4399  *   Pointer to error structure.
4400  *
4401  * @return
4402  *   0 on success, a negative errno value otherwise and rte_errno is set.
4403  */
4404 static int
4405 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4406                                   const struct rte_flow_action *action,
4407                                   const uint64_t item_flags,
4408                                   struct rte_flow_error *error)
4409 {
4410         int ret = 0;
4411         uint64_t layer;
4412
4413         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4414         if (!ret) {
4415                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4416                                  MLX5_FLOW_LAYER_INNER_L4 :
4417                                  MLX5_FLOW_LAYER_OUTER_L4;
4418                 if (!(item_flags & layer))
4419                         return rte_flow_error_set(error, EINVAL,
4420                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4421                                                   NULL, "no transport layer "
4422                                                   "in pattern");
4423         }
4424         return ret;
4425 }
4426
4427 /**
4428  * Validate the modify-header actions of increment/decrement
4429  * TCP Sequence-number.
4430  *
4431  * @param[in] action_flags
4432  *   Holds the actions detected until now.
4433  * @param[in] action
4434  *   Pointer to the modify action.
4435  * @param[in] item_flags
4436  *   Holds the items detected.
4437  * @param[out] error
4438  *   Pointer to error structure.
4439  *
4440  * @return
4441  *   0 on success, a negative errno value otherwise and rte_errno is set.
4442  */
4443 static int
4444 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4445                                        const struct rte_flow_action *action,
4446                                        const uint64_t item_flags,
4447                                        struct rte_flow_error *error)
4448 {
4449         int ret = 0;
4450         uint64_t layer;
4451
4452         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4453         if (!ret) {
4454                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4455                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4456                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4457                 if (!(item_flags & layer))
4458                         return rte_flow_error_set(error, EINVAL,
4459                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4460                                                   NULL, "no TCP item in"
4461                                                   " pattern");
4462                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4463                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4464                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4465                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4466                         return rte_flow_error_set(error, EINVAL,
4467                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4468                                                   NULL,
4469                                                   "cannot decrease and increase"
4470                                                   " TCP sequence number"
4471                                                   " at the same time");
4472         }
4473         return ret;
4474 }
4475
4476 /**
4477  * Validate the modify-header actions of increment/decrement
4478  * TCP Acknowledgment number.
4479  *
4480  * @param[in] action_flags
4481  *   Holds the actions detected until now.
4482  * @param[in] action
4483  *   Pointer to the modify action.
4484  * @param[in] item_flags
4485  *   Holds the items detected.
4486  * @param[out] error
4487  *   Pointer to error structure.
4488  *
4489  * @return
4490  *   0 on success, a negative errno value otherwise and rte_errno is set.
4491  */
4492 static int
4493 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4494                                        const struct rte_flow_action *action,
4495                                        const uint64_t item_flags,
4496                                        struct rte_flow_error *error)
4497 {
4498         int ret = 0;
4499         uint64_t layer;
4500
4501         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4502         if (!ret) {
4503                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4504                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4505                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4506                 if (!(item_flags & layer))
4507                         return rte_flow_error_set(error, EINVAL,
4508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4509                                                   NULL, "no TCP item in"
4510                                                   " pattern");
4511                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4512                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4513                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4514                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4515                         return rte_flow_error_set(error, EINVAL,
4516                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4517                                                   NULL,
4518                                                   "cannot decrease and increase"
4519                                                   " TCP acknowledgment number"
4520                                                   " at the same time");
4521         }
4522         return ret;
4523 }
4524
4525 /**
4526  * Validate the modify-header TTL actions.
4527  *
4528  * @param[in] action_flags
4529  *   Holds the actions detected until now.
4530  * @param[in] action
4531  *   Pointer to the modify action.
4532  * @param[in] item_flags
4533  *   Holds the items detected.
4534  * @param[out] error
4535  *   Pointer to error structure.
4536  *
4537  * @return
4538  *   0 on success, a negative errno value otherwise and rte_errno is set.
4539  */
4540 static int
4541 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4542                                    const struct rte_flow_action *action,
4543                                    const uint64_t item_flags,
4544                                    struct rte_flow_error *error)
4545 {
4546         int ret = 0;
4547         uint64_t layer;
4548
4549         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4550         if (!ret) {
4551                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4552                                  MLX5_FLOW_LAYER_INNER_L3 :
4553                                  MLX5_FLOW_LAYER_OUTER_L3;
4554                 if (!(item_flags & layer))
4555                         return rte_flow_error_set(error, EINVAL,
4556                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4557                                                   NULL,
4558                                                   "no IP protocol in pattern");
4559         }
4560         return ret;
4561 }
4562
4563 /**
4564  * Validate the generic modify field actions.
4565  * @param[in] dev
4566  *   Pointer to the rte_eth_dev structure.
4567  * @param[in] action_flags
4568  *   Holds the actions detected until now.
4569  * @param[in] action
4570  *   Pointer to the modify action.
4571  * @param[in] attr
4572  *   Pointer to the flow attributes.
4573  * @param[out] error
4574  *   Pointer to error structure.
4575  *
4576  * @return
4577  *   Number of header fields to modify (0 or more) on success,
4578  *   a negative errno value otherwise and rte_errno is set.
4579  */
4580 static int
4581 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4582                                    const uint64_t action_flags,
4583                                    const struct rte_flow_action *action,
4584                                    const struct rte_flow_attr *attr,
4585                                    struct rte_flow_error *error)
4586 {
4587         int ret = 0;
4588         struct mlx5_priv *priv = dev->data->dev_private;
4589         struct mlx5_dev_config *config = &priv->config;
4590         const struct rte_flow_action_modify_field *action_modify_field =
4591                 action->conf;
4592         uint32_t dst_width =
4593                 mlx5_flow_item_field_width(action_modify_field->dst.field);
4594         uint32_t src_width =
4595                 mlx5_flow_item_field_width(action_modify_field->src.field);
4596
4597         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4598         if (ret)
4599                 return ret;
4600
4601         if (action_modify_field->width == 0)
4602                 return rte_flow_error_set(error, EINVAL,
4603                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4604                                 "no bits are requested to be modified");
4605         else if (action_modify_field->width > dst_width ||
4606                  action_modify_field->width > src_width)
4607                 return rte_flow_error_set(error, EINVAL,
4608                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4609                                 "cannot modify more bits than"
4610                                 " the width of a field");
4611         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4612             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4613                 if ((action_modify_field->dst.offset +
4614                      action_modify_field->width > dst_width) ||
4615                     (action_modify_field->dst.offset % 32))
4616                         return rte_flow_error_set(error, EINVAL,
4617                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4618                                         "destination offset is too big"
4619                                         " or not aligned to 4 bytes");
4620                 if (action_modify_field->dst.level &&
4621                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4622                         return rte_flow_error_set(error, ENOTSUP,
4623                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4624                                         "inner header fields modification"
4625                                         " is not supported");
4626         }
4627         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4628             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4629                 if (!attr->transfer && !attr->group)
4630                         return rte_flow_error_set(error, ENOTSUP,
4631                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4632                                         "modify field action is not"
4633                                         " supported for group 0");
4634                 if ((action_modify_field->src.offset +
4635                      action_modify_field->width > src_width) ||
4636                     (action_modify_field->src.offset % 32))
4637                         return rte_flow_error_set(error, EINVAL,
4638                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4639                                         "source offset is too big"
4640                                         " or not aligned to 4 bytes");
4641                 if (action_modify_field->src.level &&
4642                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4643                         return rte_flow_error_set(error, ENOTSUP,
4644                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4645                                         "inner header fields modification"
4646                                         " is not supported");
4647         }
4648         if (action_modify_field->dst.field ==
4649             action_modify_field->src.field)
4650                 return rte_flow_error_set(error, EINVAL,
4651                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4652                                 "source and destination fields"
4653                                 " cannot be the same");
4654         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4655             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4656                 return rte_flow_error_set(error, EINVAL,
4657                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4658                                 "immediate value or a pointer to it"
4659                                 " cannot be used as a destination");
4660         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4661             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4662                 return rte_flow_error_set(error, ENOTSUP,
4663                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4664                                 "modifications of an arbitrary"
4665                                 " place in a packet is not supported");
4666         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4667             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4668                 return rte_flow_error_set(error, ENOTSUP,
4669                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4670                                 "modifications of the 802.1Q Tag"
4671                                 " Identifier is not supported");
4672         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4673             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4674                 return rte_flow_error_set(error, ENOTSUP,
4675                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4676                                 "modifications of the VXLAN Network"
4677                                 " Identifier is not supported");
4678         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4679             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4680                 return rte_flow_error_set(error, ENOTSUP,
4681                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4682                                 "modifications of the GENEVE Network"
4683                                 " Identifier is not supported");
4684         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4685             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4686             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4687             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4688                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4689                     !mlx5_flow_ext_mreg_supported(dev))
4690                         return rte_flow_error_set(error, ENOTSUP,
4691                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4692                                         "cannot modify mark or metadata without"
4693                                         " extended metadata register support");
4694         }
4695         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4696                 return rte_flow_error_set(error, ENOTSUP,
4697                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4698                                 "add and sub operations"
4699                                 " are not supported");
4700         return (action_modify_field->width / 32) +
4701                !!(action_modify_field->width % 32);
4702 }
4703
4704 /**
4705  * Validate jump action.
4706  *
4707  * @param[in] action
4708  *   Pointer to the jump action.
4709  * @param[in] action_flags
4710  *   Holds the actions detected until now.
4711  * @param[in] attributes
4712  *   Pointer to flow attributes
4713  * @param[in] external
4714  *   Action belongs to flow rule created by request external to PMD.
4715  * @param[out] error
4716  *   Pointer to error structure.
4717  *
4718  * @return
4719  *   0 on success, a negative errno value otherwise and rte_errno is set.
4720  */
4721 static int
4722 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4723                              const struct mlx5_flow_tunnel *tunnel,
4724                              const struct rte_flow_action *action,
4725                              uint64_t action_flags,
4726                              const struct rte_flow_attr *attributes,
4727                              bool external, struct rte_flow_error *error)
4728 {
4729         uint32_t target_group, table;
4730         int ret = 0;
4731         struct flow_grp_info grp_info = {
4732                 .external = !!external,
4733                 .transfer = !!attributes->transfer,
4734                 .fdb_def_rule = 1,
4735                 .std_tbl_fix = 0
4736         };
4737         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4738                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4739                 return rte_flow_error_set(error, EINVAL,
4740                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4741                                           "can't have 2 fate actions in"
4742                                           " same flow");
4743         if (!action->conf)
4744                 return rte_flow_error_set(error, EINVAL,
4745                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4746                                           NULL, "action configuration not set");
4747         target_group =
4748                 ((const struct rte_flow_action_jump *)action->conf)->group;
4749         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4750                                        &grp_info, error);
4751         if (ret)
4752                 return ret;
4753         if (attributes->group == target_group &&
4754             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4755                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4756                 return rte_flow_error_set(error, EINVAL,
4757                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4758                                           "target group must be other than"
4759                                           " the current flow group");
4760         return 0;
4761 }
4762
4763 /*
4764  * Validate the port_id action.
4765  *
4766  * @param[in] dev
4767  *   Pointer to rte_eth_dev structure.
4768  * @param[in] action_flags
4769  *   Bit-fields that holds the actions detected until now.
4770  * @param[in] action
4771  *   Port_id RTE action structure.
4772  * @param[in] attr
4773  *   Attributes of flow that includes this action.
4774  * @param[out] error
4775  *   Pointer to error structure.
4776  *
4777  * @return
4778  *   0 on success, a negative errno value otherwise and rte_errno is set.
4779  */
4780 static int
4781 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4782                                 uint64_t action_flags,
4783                                 const struct rte_flow_action *action,
4784                                 const struct rte_flow_attr *attr,
4785                                 struct rte_flow_error *error)
4786 {
4787         const struct rte_flow_action_port_id *port_id;
4788         struct mlx5_priv *act_priv;
4789         struct mlx5_priv *dev_priv;
4790         uint16_t port;
4791
4792         if (!attr->transfer)
4793                 return rte_flow_error_set(error, ENOTSUP,
4794                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4795                                           NULL,
4796                                           "port id action is valid in transfer"
4797                                           " mode only");
4798         if (!action || !action->conf)
4799                 return rte_flow_error_set(error, ENOTSUP,
4800                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4801                                           NULL,
4802                                           "port id action parameters must be"
4803                                           " specified");
4804         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4805                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4806                 return rte_flow_error_set(error, EINVAL,
4807                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4808                                           "can have only one fate actions in"
4809                                           " a flow");
4810         dev_priv = mlx5_dev_to_eswitch_info(dev);
4811         if (!dev_priv)
4812                 return rte_flow_error_set(error, rte_errno,
4813                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4814                                           NULL,
4815                                           "failed to obtain E-Switch info");
4816         port_id = action->conf;
4817         port = port_id->original ? dev->data->port_id : port_id->id;
4818         act_priv = mlx5_port_to_eswitch_info(port, false);
4819         if (!act_priv)
4820                 return rte_flow_error_set
4821                                 (error, rte_errno,
4822                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4823                                  "failed to obtain E-Switch port id for port");
4824         if (act_priv->domain_id != dev_priv->domain_id)
4825                 return rte_flow_error_set
4826                                 (error, EINVAL,
4827                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4828                                  "port does not belong to"
4829                                  " E-Switch being configured");
4830         return 0;
4831 }
4832
4833 /**
4834  * Get the maximum number of modify header actions.
4835  *
4836  * @param dev
4837  *   Pointer to rte_eth_dev structure.
4838  * @param flags
4839  *   Flags bits to check if root level.
4840  *
4841  * @return
4842  *   Max number of modify header actions device can support.
4843  */
4844 static inline unsigned int
4845 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4846                               uint64_t flags)
4847 {
4848         /*
4849          * There's no way to directly query the max capacity from FW.
4850          * The maximal value on root table should be assumed to be supported.
4851          */
4852         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4853                 return MLX5_MAX_MODIFY_NUM;
4854         else
4855                 return MLX5_ROOT_TBL_MODIFY_NUM;
4856 }
4857
4858 /**
4859  * Validate the meter action.
4860  *
4861  * @param[in] dev
4862  *   Pointer to rte_eth_dev structure.
4863  * @param[in] action_flags
4864  *   Bit-fields that holds the actions detected until now.
4865  * @param[in] action
4866  *   Pointer to the meter action.
4867  * @param[in] attr
4868  *   Attributes of flow that includes this action.
4869  * @param[out] error
4870  *   Pointer to error structure.
4871  *
4872  * @return
4873  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4874  */
4875 static int
4876 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4877                                 uint64_t action_flags,
4878                                 const struct rte_flow_action *action,
4879                                 const struct rte_flow_attr *attr,
4880                                 bool *def_policy,
4881                                 struct rte_flow_error *error)
4882 {
4883         struct mlx5_priv *priv = dev->data->dev_private;
4884         const struct rte_flow_action_meter *am = action->conf;
4885         struct mlx5_flow_meter_info *fm;
4886         struct mlx5_flow_meter_policy *mtr_policy;
4887         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
4888
4889         if (!am)
4890                 return rte_flow_error_set(error, EINVAL,
4891                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4892                                           "meter action conf is NULL");
4893
4894         if (action_flags & MLX5_FLOW_ACTION_METER)
4895                 return rte_flow_error_set(error, ENOTSUP,
4896                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4897                                           "meter chaining not support");
4898         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4899                 return rte_flow_error_set(error, ENOTSUP,
4900                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4901                                           "meter with jump not support");
4902         if (!priv->mtr_en)
4903                 return rte_flow_error_set(error, ENOTSUP,
4904                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4905                                           NULL,
4906                                           "meter action not supported");
4907         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
4908         if (!fm)
4909                 return rte_flow_error_set(error, EINVAL,
4910                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4911                                           "Meter not found");
4912         /* aso meter can always be shared by different domains */
4913         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
4914             !(fm->transfer == attr->transfer ||
4915               (!fm->ingress && !attr->ingress && attr->egress) ||
4916               (!fm->egress && !attr->egress && attr->ingress)))
4917                 return rte_flow_error_set(error, EINVAL,
4918                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4919                         "Flow attributes domain are either invalid "
4920                         "or have a domain conflict with current "
4921                         "meter attributes");
4922         if (fm->def_policy) {
4923                 if (!((attr->transfer &&
4924                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
4925                         (attr->egress &&
4926                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
4927                         (attr->ingress &&
4928                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
4929                         return rte_flow_error_set(error, EINVAL,
4930                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4931                                           "Flow attributes domain "
4932                                           "have a conflict with current "
4933                                           "meter domain attributes");
4934                 *def_policy = true;
4935         } else {
4936                 mtr_policy = mlx5_flow_meter_policy_find(dev,
4937                                                 fm->policy_id, NULL);
4938                 if (!mtr_policy)
4939                         return rte_flow_error_set(error, EINVAL,
4940                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4941                                           "Invalid policy id for meter ");
4942                 if (!((attr->transfer && mtr_policy->transfer) ||
4943                         (attr->egress && mtr_policy->egress) ||
4944                         (attr->ingress && mtr_policy->ingress)))
4945                         return rte_flow_error_set(error, EINVAL,
4946                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4947                                           "Flow attributes domain "
4948                                           "have a conflict with current "
4949                                           "meter domain attributes");
4950                 *def_policy = false;
4951         }
4952         return 0;
4953 }
4954
4955 /**
4956  * Validate the age action.
4957  *
4958  * @param[in] action_flags
4959  *   Holds the actions detected until now.
4960  * @param[in] action
4961  *   Pointer to the age action.
4962  * @param[in] dev
4963  *   Pointer to the Ethernet device structure.
4964  * @param[out] error
4965  *   Pointer to error structure.
4966  *
4967  * @return
4968  *   0 on success, a negative errno value otherwise and rte_errno is set.
4969  */
4970 static int
4971 flow_dv_validate_action_age(uint64_t action_flags,
4972                             const struct rte_flow_action *action,
4973                             struct rte_eth_dev *dev,
4974                             struct rte_flow_error *error)
4975 {
4976         struct mlx5_priv *priv = dev->data->dev_private;
4977         const struct rte_flow_action_age *age = action->conf;
4978
4979         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4980             !priv->sh->aso_age_mng))
4981                 return rte_flow_error_set(error, ENOTSUP,
4982                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4983                                           NULL,
4984                                           "age action not supported");
4985         if (!(action->conf))
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4988                                           "configuration cannot be null");
4989         if (!(age->timeout))
4990                 return rte_flow_error_set(error, EINVAL,
4991                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4992                                           "invalid timeout value 0");
4993         if (action_flags & MLX5_FLOW_ACTION_AGE)
4994                 return rte_flow_error_set(error, EINVAL,
4995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4996                                           "duplicate age actions set");
4997         return 0;
4998 }
4999
5000 /**
5001  * Validate the modify-header IPv4 DSCP actions.
5002  *
5003  * @param[in] action_flags
5004  *   Holds the actions detected until now.
5005  * @param[in] action
5006  *   Pointer to the modify action.
5007  * @param[in] item_flags
5008  *   Holds the items detected.
5009  * @param[out] error
5010  *   Pointer to error structure.
5011  *
5012  * @return
5013  *   0 on success, a negative errno value otherwise and rte_errno is set.
5014  */
5015 static int
5016 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5017                                          const struct rte_flow_action *action,
5018                                          const uint64_t item_flags,
5019                                          struct rte_flow_error *error)
5020 {
5021         int ret = 0;
5022
5023         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5024         if (!ret) {
5025                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5026                         return rte_flow_error_set(error, EINVAL,
5027                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5028                                                   NULL,
5029                                                   "no ipv4 item in pattern");
5030         }
5031         return ret;
5032 }
5033
5034 /**
5035  * Validate the modify-header IPv6 DSCP actions.
5036  *
5037  * @param[in] action_flags
5038  *   Holds the actions detected until now.
5039  * @param[in] action
5040  *   Pointer to the modify action.
5041  * @param[in] item_flags
5042  *   Holds the items detected.
5043  * @param[out] error
5044  *   Pointer to error structure.
5045  *
5046  * @return
5047  *   0 on success, a negative errno value otherwise and rte_errno is set.
5048  */
5049 static int
5050 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5051                                          const struct rte_flow_action *action,
5052                                          const uint64_t item_flags,
5053                                          struct rte_flow_error *error)
5054 {
5055         int ret = 0;
5056
5057         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5058         if (!ret) {
5059                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5060                         return rte_flow_error_set(error, EINVAL,
5061                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5062                                                   NULL,
5063                                                   "no ipv6 item in pattern");
5064         }
5065         return ret;
5066 }
5067
5068 /**
5069  * Match modify-header resource.
5070  *
5071  * @param list
5072  *   Pointer to the hash list.
5073  * @param entry
5074  *   Pointer to exist resource entry object.
5075  * @param key
5076  *   Key of the new entry.
5077  * @param ctx
5078  *   Pointer to new modify-header resource.
5079  *
5080  * @return
5081  *   0 on matching, non-zero otherwise.
5082  */
5083 int
5084 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5085                         struct mlx5_hlist_entry *entry,
5086                         uint64_t key __rte_unused, void *cb_ctx)
5087 {
5088         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5089         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5090         struct mlx5_flow_dv_modify_hdr_resource *resource =
5091                         container_of(entry, typeof(*resource), entry);
5092         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5093
5094         key_len += ref->actions_num * sizeof(ref->actions[0]);
5095         return ref->actions_num != resource->actions_num ||
5096                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5097 }
5098
5099 struct mlx5_hlist_entry *
5100 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5101                          void *cb_ctx)
5102 {
5103         struct mlx5_dev_ctx_shared *sh = list->ctx;
5104         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5105         struct mlx5dv_dr_domain *ns;
5106         struct mlx5_flow_dv_modify_hdr_resource *entry;
5107         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5108         int ret;
5109         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5110         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5111
5112         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5113                             SOCKET_ID_ANY);
5114         if (!entry) {
5115                 rte_flow_error_set(ctx->error, ENOMEM,
5116                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5117                                    "cannot allocate resource memory");
5118                 return NULL;
5119         }
5120         rte_memcpy(&entry->ft_type,
5121                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5122                    key_len + data_len);
5123         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5124                 ns = sh->fdb_domain;
5125         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5126                 ns = sh->tx_domain;
5127         else
5128                 ns = sh->rx_domain;
5129         ret = mlx5_flow_os_create_flow_action_modify_header
5130                                         (sh->ctx, ns, entry,
5131                                          data_len, &entry->action);
5132         if (ret) {
5133                 mlx5_free(entry);
5134                 rte_flow_error_set(ctx->error, ENOMEM,
5135                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5136                                    NULL, "cannot create modification action");
5137                 return NULL;
5138         }
5139         return &entry->entry;
5140 }
5141
5142 /**
5143  * Validate the sample action.
5144  *
5145  * @param[in, out] action_flags
5146  *   Holds the actions detected until now.
5147  * @param[in] action
5148  *   Pointer to the sample action.
5149  * @param[in] dev
5150  *   Pointer to the Ethernet device structure.
5151  * @param[in] attr
5152  *   Attributes of flow that includes this action.
5153  * @param[in] item_flags
5154  *   Holds the items detected.
5155  * @param[in] rss
5156  *   Pointer to the RSS action.
5157  * @param[out] sample_rss
5158  *   Pointer to the RSS action in sample action list.
5159  * @param[out] count
5160  *   Pointer to the COUNT action in sample action list.
5161  * @param[out] fdb_mirror_limit
5162  *   Pointer to the FDB mirror limitation flag.
5163  * @param[out] error
5164  *   Pointer to error structure.
5165  *
5166  * @return
5167  *   0 on success, a negative errno value otherwise and rte_errno is set.
5168  */
5169 static int
5170 flow_dv_validate_action_sample(uint64_t *action_flags,
5171                                const struct rte_flow_action *action,
5172                                struct rte_eth_dev *dev,
5173                                const struct rte_flow_attr *attr,
5174                                uint64_t item_flags,
5175                                const struct rte_flow_action_rss *rss,
5176                                const struct rte_flow_action_rss **sample_rss,
5177                                const struct rte_flow_action_count **count,
5178                                int *fdb_mirror_limit,
5179                                struct rte_flow_error *error)
5180 {
5181         struct mlx5_priv *priv = dev->data->dev_private;
5182         struct mlx5_dev_config *dev_conf = &priv->config;
5183         const struct rte_flow_action_sample *sample = action->conf;
5184         const struct rte_flow_action *act;
5185         uint64_t sub_action_flags = 0;
5186         uint16_t queue_index = 0xFFFF;
5187         int actions_n = 0;
5188         int ret;
5189
5190         if (!sample)
5191                 return rte_flow_error_set(error, EINVAL,
5192                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5193                                           "configuration cannot be NULL");
5194         if (sample->ratio == 0)
5195                 return rte_flow_error_set(error, EINVAL,
5196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5197                                           "ratio value starts from 1");
5198         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5199                 return rte_flow_error_set(error, ENOTSUP,
5200                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5201                                           NULL,
5202                                           "sample action not supported");
5203         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5204                 return rte_flow_error_set(error, EINVAL,
5205                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5206                                           "Multiple sample actions not "
5207                                           "supported");
5208         if (*action_flags & MLX5_FLOW_ACTION_METER)
5209                 return rte_flow_error_set(error, EINVAL,
5210                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5211                                           "wrong action order, meter should "
5212                                           "be after sample action");
5213         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5214                 return rte_flow_error_set(error, EINVAL,
5215                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5216                                           "wrong action order, jump should "
5217                                           "be after sample action");
5218         act = sample->actions;
5219         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5220                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5221                         return rte_flow_error_set(error, ENOTSUP,
5222                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5223                                                   act, "too many actions");
5224                 switch (act->type) {
5225                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5226                         ret = mlx5_flow_validate_action_queue(act,
5227                                                               sub_action_flags,
5228                                                               dev,
5229                                                               attr, error);
5230                         if (ret < 0)
5231                                 return ret;
5232                         queue_index = ((const struct rte_flow_action_queue *)
5233                                                         (act->conf))->index;
5234                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5235                         ++actions_n;
5236                         break;
5237                 case RTE_FLOW_ACTION_TYPE_RSS:
5238                         *sample_rss = act->conf;
5239                         ret = mlx5_flow_validate_action_rss(act,
5240                                                             sub_action_flags,
5241                                                             dev, attr,
5242                                                             item_flags,
5243                                                             error);
5244                         if (ret < 0)
5245                                 return ret;
5246                         if (rss && *sample_rss &&
5247                             ((*sample_rss)->level != rss->level ||
5248                             (*sample_rss)->types != rss->types))
5249                                 return rte_flow_error_set(error, ENOTSUP,
5250                                         RTE_FLOW_ERROR_TYPE_ACTION,
5251                                         NULL,
5252                                         "Can't use the different RSS types "
5253                                         "or level in the same flow");
5254                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5255                                 queue_index = (*sample_rss)->queue[0];
5256                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5257                         ++actions_n;
5258                         break;
5259                 case RTE_FLOW_ACTION_TYPE_MARK:
5260                         ret = flow_dv_validate_action_mark(dev, act,
5261                                                            sub_action_flags,
5262                                                            attr, error);
5263                         if (ret < 0)
5264                                 return ret;
5265                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5266                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5267                                                 MLX5_FLOW_ACTION_MARK_EXT;
5268                         else
5269                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5270                         ++actions_n;
5271                         break;
5272                 case RTE_FLOW_ACTION_TYPE_COUNT:
5273                         ret = flow_dv_validate_action_count
5274                                 (dev, is_shared_action_count(act),
5275                                  *action_flags | sub_action_flags,
5276                                  error);
5277                         if (ret < 0)
5278                                 return ret;
5279                         *count = act->conf;
5280                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5281                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5282                         ++actions_n;
5283                         break;
5284                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5285                         ret = flow_dv_validate_action_port_id(dev,
5286                                                               sub_action_flags,
5287                                                               act,
5288                                                               attr,
5289                                                               error);
5290                         if (ret)
5291                                 return ret;
5292                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5293                         ++actions_n;
5294                         break;
5295                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5296                         ret = flow_dv_validate_action_raw_encap_decap
5297                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5298                                  &actions_n, action, item_flags, error);
5299                         if (ret < 0)
5300                                 return ret;
5301                         ++actions_n;
5302                         break;
5303                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5304                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5305                         ret = flow_dv_validate_action_l2_encap(dev,
5306                                                                sub_action_flags,
5307                                                                act, attr,
5308                                                                error);
5309                         if (ret < 0)
5310                                 return ret;
5311                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5312                         ++actions_n;
5313                         break;
5314                 default:
5315                         return rte_flow_error_set(error, ENOTSUP,
5316                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5317                                                   NULL,
5318                                                   "Doesn't support optional "
5319                                                   "action");
5320                 }
5321         }
5322         if (attr->ingress && !attr->transfer) {
5323                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5324                                           MLX5_FLOW_ACTION_RSS)))
5325                         return rte_flow_error_set(error, EINVAL,
5326                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5327                                                   NULL,
5328                                                   "Ingress must has a dest "
5329                                                   "QUEUE for Sample");
5330         } else if (attr->egress && !attr->transfer) {
5331                 return rte_flow_error_set(error, ENOTSUP,
5332                                           RTE_FLOW_ERROR_TYPE_ACTION,
5333                                           NULL,
5334                                           "Sample Only support Ingress "
5335                                           "or E-Switch");
5336         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5337                 MLX5_ASSERT(attr->transfer);
5338                 if (sample->ratio > 1)
5339                         return rte_flow_error_set(error, ENOTSUP,
5340                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5341                                                   NULL,
5342                                                   "E-Switch doesn't support "
5343                                                   "any optional action "
5344                                                   "for sampling");
5345                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5346                         return rte_flow_error_set(error, ENOTSUP,
5347                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5348                                                   NULL,
5349                                                   "unsupported action QUEUE");
5350                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5351                         return rte_flow_error_set(error, ENOTSUP,
5352                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5353                                                   NULL,
5354                                                   "unsupported action QUEUE");
5355                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5356                         return rte_flow_error_set(error, EINVAL,
5357                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5358                                                   NULL,
5359                                                   "E-Switch must has a dest "
5360                                                   "port for mirroring");
5361                 if (!priv->config.hca_attr.reg_c_preserve &&
5362                      priv->representor_id != -1)
5363                         *fdb_mirror_limit = 1;
5364         }
5365         /* Continue validation for Xcap actions.*/
5366         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5367             (queue_index == 0xFFFF ||
5368              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5369                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5370                      MLX5_FLOW_XCAP_ACTIONS)
5371                         return rte_flow_error_set(error, ENOTSUP,
5372                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5373                                                   NULL, "encap and decap "
5374                                                   "combination aren't "
5375                                                   "supported");
5376                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5377                                                         MLX5_FLOW_ACTION_ENCAP))
5378                         return rte_flow_error_set(error, ENOTSUP,
5379                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5380                                                   NULL, "encap is not supported"
5381                                                   " for ingress traffic");
5382         }
5383         return 0;
5384 }
5385
5386 /**
5387  * Find existing modify-header resource or create and register a new one.
5388  *
5389  * @param dev[in, out]
5390  *   Pointer to rte_eth_dev structure.
5391  * @param[in, out] resource
5392  *   Pointer to modify-header resource.
5393  * @parm[in, out] dev_flow
5394  *   Pointer to the dev_flow.
5395  * @param[out] error
5396  *   pointer to error structure.
5397  *
5398  * @return
5399  *   0 on success otherwise -errno and errno is set.
5400  */
5401 static int
5402 flow_dv_modify_hdr_resource_register
5403                         (struct rte_eth_dev *dev,
5404                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5405                          struct mlx5_flow *dev_flow,
5406                          struct rte_flow_error *error)
5407 {
5408         struct mlx5_priv *priv = dev->data->dev_private;
5409         struct mlx5_dev_ctx_shared *sh = priv->sh;
5410         uint32_t key_len = sizeof(*resource) -
5411                            offsetof(typeof(*resource), ft_type) +
5412                            resource->actions_num * sizeof(resource->actions[0]);
5413         struct mlx5_hlist_entry *entry;
5414         struct mlx5_flow_cb_ctx ctx = {
5415                 .error = error,
5416                 .data = resource,
5417         };
5418         uint64_t key64;
5419
5420         resource->flags = dev_flow->dv.group ? 0 :
5421                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5422         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5423                                     resource->flags))
5424                 return rte_flow_error_set(error, EOVERFLOW,
5425                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5426                                           "too many modify header items");
5427         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5428         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5429         if (!entry)
5430                 return -rte_errno;
5431         resource = container_of(entry, typeof(*resource), entry);
5432         dev_flow->handle->dvh.modify_hdr = resource;
5433         return 0;
5434 }
5435
5436 /**
5437  * Get DV flow counter by index.
5438  *
5439  * @param[in] dev
5440  *   Pointer to the Ethernet device structure.
5441  * @param[in] idx
5442  *   mlx5 flow counter index in the container.
5443  * @param[out] ppool
5444  *   mlx5 flow counter pool in the container.
5445  *
5446  * @return
5447  *   Pointer to the counter, NULL otherwise.
5448  */
5449 static struct mlx5_flow_counter *
5450 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5451                            uint32_t idx,
5452                            struct mlx5_flow_counter_pool **ppool)
5453 {
5454         struct mlx5_priv *priv = dev->data->dev_private;
5455         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5456         struct mlx5_flow_counter_pool *pool;
5457
5458         /* Decrease to original index and clear shared bit. */
5459         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5460         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5461         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5462         MLX5_ASSERT(pool);
5463         if (ppool)
5464                 *ppool = pool;
5465         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5466 }
5467
5468 /**
5469  * Check the devx counter belongs to the pool.
5470  *
5471  * @param[in] pool
5472  *   Pointer to the counter pool.
5473  * @param[in] id
5474  *   The counter devx ID.
5475  *
5476  * @return
5477  *   True if counter belongs to the pool, false otherwise.
5478  */
5479 static bool
5480 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5481 {
5482         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5483                    MLX5_COUNTERS_PER_POOL;
5484
5485         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5486                 return true;
5487         return false;
5488 }
5489
5490 /**
5491  * Get a pool by devx counter ID.
5492  *
5493  * @param[in] cmng
5494  *   Pointer to the counter management.
5495  * @param[in] id
5496  *   The counter devx ID.
5497  *
5498  * @return
5499  *   The counter pool pointer if exists, NULL otherwise,
5500  */
5501 static struct mlx5_flow_counter_pool *
5502 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5503 {
5504         uint32_t i;
5505         struct mlx5_flow_counter_pool *pool = NULL;
5506
5507         rte_spinlock_lock(&cmng->pool_update_sl);
5508         /* Check last used pool. */
5509         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5510             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5511                 pool = cmng->pools[cmng->last_pool_idx];
5512                 goto out;
5513         }
5514         /* ID out of range means no suitable pool in the container. */
5515         if (id > cmng->max_id || id < cmng->min_id)
5516                 goto out;
5517         /*
5518          * Find the pool from the end of the container, since mostly counter
5519          * ID is sequence increasing, and the last pool should be the needed
5520          * one.
5521          */
5522         i = cmng->n_valid;
5523         while (i--) {
5524                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5525
5526                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5527                         pool = pool_tmp;
5528                         break;
5529                 }
5530         }
5531 out:
5532         rte_spinlock_unlock(&cmng->pool_update_sl);
5533         return pool;
5534 }
5535
5536 /**
5537  * Resize a counter container.
5538  *
5539  * @param[in] dev
5540  *   Pointer to the Ethernet device structure.
5541  *
5542  * @return
5543  *   0 on success, otherwise negative errno value and rte_errno is set.
5544  */
5545 static int
5546 flow_dv_container_resize(struct rte_eth_dev *dev)
5547 {
5548         struct mlx5_priv *priv = dev->data->dev_private;
5549         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5550         void *old_pools = cmng->pools;
5551         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5552         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5553         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5554
5555         if (!pools) {
5556                 rte_errno = ENOMEM;
5557                 return -ENOMEM;
5558         }
5559         if (old_pools)
5560                 memcpy(pools, old_pools, cmng->n *
5561                                        sizeof(struct mlx5_flow_counter_pool *));
5562         cmng->n = resize;
5563         cmng->pools = pools;
5564         if (old_pools)
5565                 mlx5_free(old_pools);
5566         return 0;
5567 }
5568
5569 /**
5570  * Query a devx flow counter.
5571  *
5572  * @param[in] dev
5573  *   Pointer to the Ethernet device structure.
5574  * @param[in] counter
5575  *   Index to the flow counter.
5576  * @param[out] pkts
5577  *   The statistics value of packets.
5578  * @param[out] bytes
5579  *   The statistics value of bytes.
5580  *
5581  * @return
5582  *   0 on success, otherwise a negative errno value and rte_errno is set.
5583  */
5584 static inline int
5585 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5586                      uint64_t *bytes)
5587 {
5588         struct mlx5_priv *priv = dev->data->dev_private;
5589         struct mlx5_flow_counter_pool *pool = NULL;
5590         struct mlx5_flow_counter *cnt;
5591         int offset;
5592
5593         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5594         MLX5_ASSERT(pool);
5595         if (priv->sh->cmng.counter_fallback)
5596                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5597                                         0, pkts, bytes, 0, NULL, NULL, 0);
5598         rte_spinlock_lock(&pool->sl);
5599         if (!pool->raw) {
5600                 *pkts = 0;
5601                 *bytes = 0;
5602         } else {
5603                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5604                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5605                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5606         }
5607         rte_spinlock_unlock(&pool->sl);
5608         return 0;
5609 }
5610
5611 /**
5612  * Create and initialize a new counter pool.
5613  *
5614  * @param[in] dev
5615  *   Pointer to the Ethernet device structure.
5616  * @param[out] dcs
5617  *   The devX counter handle.
5618  * @param[in] age
5619  *   Whether the pool is for counter that was allocated for aging.
5620  * @param[in/out] cont_cur
5621  *   Pointer to the container pointer, it will be update in pool resize.
5622  *
5623  * @return
5624  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5625  */
5626 static struct mlx5_flow_counter_pool *
5627 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5628                     uint32_t age)
5629 {
5630         struct mlx5_priv *priv = dev->data->dev_private;
5631         struct mlx5_flow_counter_pool *pool;
5632         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5633         bool fallback = priv->sh->cmng.counter_fallback;
5634         uint32_t size = sizeof(*pool);
5635
5636         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5637         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5638         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5639         if (!pool) {
5640                 rte_errno = ENOMEM;
5641                 return NULL;
5642         }
5643         pool->raw = NULL;
5644         pool->is_aged = !!age;
5645         pool->query_gen = 0;
5646         pool->min_dcs = dcs;
5647         rte_spinlock_init(&pool->sl);
5648         rte_spinlock_init(&pool->csl);
5649         TAILQ_INIT(&pool->counters[0]);
5650         TAILQ_INIT(&pool->counters[1]);
5651         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5652         rte_spinlock_lock(&cmng->pool_update_sl);
5653         pool->index = cmng->n_valid;
5654         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5655                 mlx5_free(pool);
5656                 rte_spinlock_unlock(&cmng->pool_update_sl);
5657                 return NULL;
5658         }
5659         cmng->pools[pool->index] = pool;
5660         cmng->n_valid++;
5661         if (unlikely(fallback)) {
5662                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5663
5664                 if (base < cmng->min_id)
5665                         cmng->min_id = base;
5666                 if (base > cmng->max_id)
5667                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5668                 cmng->last_pool_idx = pool->index;
5669         }
5670         rte_spinlock_unlock(&cmng->pool_update_sl);
5671         return pool;
5672 }
5673
5674 /**
5675  * Prepare a new counter and/or a new counter pool.
5676  *
5677  * @param[in] dev
5678  *   Pointer to the Ethernet device structure.
5679  * @param[out] cnt_free
5680  *   Where to put the pointer of a new counter.
5681  * @param[in] age
5682  *   Whether the pool is for counter that was allocated for aging.
5683  *
5684  * @return
5685  *   The counter pool pointer and @p cnt_free is set on success,
5686  *   NULL otherwise and rte_errno is set.
5687  */
5688 static struct mlx5_flow_counter_pool *
5689 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5690                              struct mlx5_flow_counter **cnt_free,
5691                              uint32_t age)
5692 {
5693         struct mlx5_priv *priv = dev->data->dev_private;
5694         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5695         struct mlx5_flow_counter_pool *pool;
5696         struct mlx5_counters tmp_tq;
5697         struct mlx5_devx_obj *dcs = NULL;
5698         struct mlx5_flow_counter *cnt;
5699         enum mlx5_counter_type cnt_type =
5700                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5701         bool fallback = priv->sh->cmng.counter_fallback;
5702         uint32_t i;
5703
5704         if (fallback) {
5705                 /* bulk_bitmap must be 0 for single counter allocation. */
5706                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5707                 if (!dcs)
5708                         return NULL;
5709                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5710                 if (!pool) {
5711                         pool = flow_dv_pool_create(dev, dcs, age);
5712                         if (!pool) {
5713                                 mlx5_devx_cmd_destroy(dcs);
5714                                 return NULL;
5715                         }
5716                 }
5717                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5718                 cnt = MLX5_POOL_GET_CNT(pool, i);
5719                 cnt->pool = pool;
5720                 cnt->dcs_when_free = dcs;
5721                 *cnt_free = cnt;
5722                 return pool;
5723         }
5724         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5725         if (!dcs) {
5726                 rte_errno = ENODATA;
5727                 return NULL;
5728         }
5729         pool = flow_dv_pool_create(dev, dcs, age);
5730         if (!pool) {
5731                 mlx5_devx_cmd_destroy(dcs);
5732                 return NULL;
5733         }
5734         TAILQ_INIT(&tmp_tq);
5735         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5736                 cnt = MLX5_POOL_GET_CNT(pool, i);
5737                 cnt->pool = pool;
5738                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5739         }
5740         rte_spinlock_lock(&cmng->csl[cnt_type]);
5741         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5742         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5743         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5744         (*cnt_free)->pool = pool;
5745         return pool;
5746 }
5747
5748 /**
5749  * Allocate a flow counter.
5750  *
5751  * @param[in] dev
5752  *   Pointer to the Ethernet device structure.
5753  * @param[in] age
5754  *   Whether the counter was allocated for aging.
5755  *
5756  * @return
5757  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5758  */
5759 static uint32_t
5760 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5761 {
5762         struct mlx5_priv *priv = dev->data->dev_private;
5763         struct mlx5_flow_counter_pool *pool = NULL;
5764         struct mlx5_flow_counter *cnt_free = NULL;
5765         bool fallback = priv->sh->cmng.counter_fallback;
5766         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5767         enum mlx5_counter_type cnt_type =
5768                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5769         uint32_t cnt_idx;
5770
5771         if (!priv->config.devx) {
5772                 rte_errno = ENOTSUP;
5773                 return 0;
5774         }
5775         /* Get free counters from container. */
5776         rte_spinlock_lock(&cmng->csl[cnt_type]);
5777         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5778         if (cnt_free)
5779                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5780         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5781         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5782                 goto err;
5783         pool = cnt_free->pool;
5784         if (fallback)
5785                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5786         /* Create a DV counter action only in the first time usage. */
5787         if (!cnt_free->action) {
5788                 uint16_t offset;
5789                 struct mlx5_devx_obj *dcs;
5790                 int ret;
5791
5792                 if (!fallback) {
5793                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5794                         dcs = pool->min_dcs;
5795                 } else {
5796                         offset = 0;
5797                         dcs = cnt_free->dcs_when_free;
5798                 }
5799                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5800                                                             &cnt_free->action);
5801                 if (ret) {
5802                         rte_errno = errno;
5803                         goto err;
5804                 }
5805         }
5806         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5807                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5808         /* Update the counter reset values. */
5809         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5810                                  &cnt_free->bytes))
5811                 goto err;
5812         if (!fallback && !priv->sh->cmng.query_thread_on)
5813                 /* Start the asynchronous batch query by the host thread. */
5814                 mlx5_set_query_alarm(priv->sh);
5815         /*
5816          * When the count action isn't shared (by ID), shared_info field is
5817          * used for indirect action API's refcnt.
5818          * When the counter action is not shared neither by ID nor by indirect
5819          * action API, shared info must be 1.
5820          */
5821         cnt_free->shared_info.refcnt = 1;
5822         return cnt_idx;
5823 err:
5824         if (cnt_free) {
5825                 cnt_free->pool = pool;
5826                 if (fallback)
5827                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5828                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5829                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5830                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5831         }
5832         return 0;
5833 }
5834
5835 /**
5836  * Allocate a shared flow counter.
5837  *
5838  * @param[in] ctx
5839  *   Pointer to the shared counter configuration.
5840  * @param[in] data
5841  *   Pointer to save the allocated counter index.
5842  *
5843  * @return
5844  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5845  */
5846
5847 static int32_t
5848 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5849 {
5850         struct mlx5_shared_counter_conf *conf = ctx;
5851         struct rte_eth_dev *dev = conf->dev;
5852         struct mlx5_flow_counter *cnt;
5853
5854         data->dword = flow_dv_counter_alloc(dev, 0);
5855         data->dword |= MLX5_CNT_SHARED_OFFSET;
5856         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5857         cnt->shared_info.id = conf->id;
5858         return 0;
5859 }
5860
5861 /**
5862  * Get a shared flow counter.
5863  *
5864  * @param[in] dev
5865  *   Pointer to the Ethernet device structure.
5866  * @param[in] id
5867  *   Counter identifier.
5868  *
5869  * @return
5870  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5871  */
5872 static uint32_t
5873 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5874 {
5875         struct mlx5_priv *priv = dev->data->dev_private;
5876         struct mlx5_shared_counter_conf conf = {
5877                 .dev = dev,
5878                 .id = id,
5879         };
5880         union mlx5_l3t_data data = {
5881                 .dword = 0,
5882         };
5883
5884         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5885                                flow_dv_counter_alloc_shared_cb, &conf);
5886         return data.dword;
5887 }
5888
5889 /**
5890  * Get age param from counter index.
5891  *
5892  * @param[in] dev
5893  *   Pointer to the Ethernet device structure.
5894  * @param[in] counter
5895  *   Index to the counter handler.
5896  *
5897  * @return
5898  *   The aging parameter specified for the counter index.
5899  */
5900 static struct mlx5_age_param*
5901 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5902                                 uint32_t counter)
5903 {
5904         struct mlx5_flow_counter *cnt;
5905         struct mlx5_flow_counter_pool *pool = NULL;
5906
5907         flow_dv_counter_get_by_idx(dev, counter, &pool);
5908         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5909         cnt = MLX5_POOL_GET_CNT(pool, counter);
5910         return MLX5_CNT_TO_AGE(cnt);
5911 }
5912
5913 /**
5914  * Remove a flow counter from aged counter list.
5915  *
5916  * @param[in] dev
5917  *   Pointer to the Ethernet device structure.
5918  * @param[in] counter
5919  *   Index to the counter handler.
5920  * @param[in] cnt
5921  *   Pointer to the counter handler.
5922  */
5923 static void
5924 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5925                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5926 {
5927         struct mlx5_age_info *age_info;
5928         struct mlx5_age_param *age_param;
5929         struct mlx5_priv *priv = dev->data->dev_private;
5930         uint16_t expected = AGE_CANDIDATE;
5931
5932         age_info = GET_PORT_AGE_INFO(priv);
5933         age_param = flow_dv_counter_idx_get_age(dev, counter);
5934         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5935                                          AGE_FREE, false, __ATOMIC_RELAXED,
5936                                          __ATOMIC_RELAXED)) {
5937                 /**
5938                  * We need the lock even it is age timeout,
5939                  * since counter may still in process.
5940                  */
5941                 rte_spinlock_lock(&age_info->aged_sl);
5942                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5943                 rte_spinlock_unlock(&age_info->aged_sl);
5944                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5945         }
5946 }
5947
5948 /**
5949  * Release a flow counter.
5950  *
5951  * @param[in] dev
5952  *   Pointer to the Ethernet device structure.
5953  * @param[in] counter
5954  *   Index to the counter handler.
5955  */
5956 static void
5957 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5958 {
5959         struct mlx5_priv *priv = dev->data->dev_private;
5960         struct mlx5_flow_counter_pool *pool = NULL;
5961         struct mlx5_flow_counter *cnt;
5962         enum mlx5_counter_type cnt_type;
5963
5964         if (!counter)
5965                 return;
5966         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5967         MLX5_ASSERT(pool);
5968         /*
5969          * If the counter action is shared by ID, the l3t_clear_entry function
5970          * reduces its references counter. If after the reduction the action is
5971          * still referenced, the function returns here and does not release it.
5972          */
5973         if (IS_LEGACY_SHARED_CNT(counter) &&
5974             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5975                 return;
5976         /*
5977          * If the counter action is shared by indirect action API, the atomic
5978          * function reduces its references counter. If after the reduction the
5979          * action is still referenced, the function returns here and does not
5980          * release it.
5981          * When the counter action is not shared neither by ID nor by indirect
5982          * action API, shared info is 1 before the reduction, so this condition
5983          * is failed and function doesn't return here.
5984          */
5985         if (!IS_LEGACY_SHARED_CNT(counter) &&
5986             __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED))
5987                 return;
5988         if (pool->is_aged)
5989                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5990         cnt->pool = pool;
5991         /*
5992          * Put the counter back to list to be updated in none fallback mode.
5993          * Currently, we are using two list alternately, while one is in query,
5994          * add the freed counter to the other list based on the pool query_gen
5995          * value. After query finishes, add counter the list to the global
5996          * container counter list. The list changes while query starts. In
5997          * this case, lock will not be needed as query callback and release
5998          * function both operate with the different list.
5999          */
6000         if (!priv->sh->cmng.counter_fallback) {
6001                 rte_spinlock_lock(&pool->csl);
6002                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6003                 rte_spinlock_unlock(&pool->csl);
6004         } else {
6005                 cnt->dcs_when_free = cnt->dcs_when_active;
6006                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6007                                            MLX5_COUNTER_TYPE_ORIGIN;
6008                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6009                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6010                                   cnt, next);
6011                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6012         }
6013 }
6014
6015 /**
6016  * Resize a meter id container.
6017  *
6018  * @param[in] dev
6019  *   Pointer to the Ethernet device structure.
6020  *
6021  * @return
6022  *   0 on success, otherwise negative errno value and rte_errno is set.
6023  */
6024 static int
6025 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6026 {
6027         struct mlx5_priv *priv = dev->data->dev_private;
6028         struct mlx5_aso_mtr_pools_mng *pools_mng =
6029                                 &priv->sh->mtrmng->pools_mng;
6030         void *old_pools = pools_mng->pools;
6031         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6032         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6033         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6034
6035         if (!pools) {
6036                 rte_errno = ENOMEM;
6037                 return -ENOMEM;
6038         }
6039         if (!pools_mng->n)
6040                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6041                         mlx5_free(pools);
6042                         return -ENOMEM;
6043                 }
6044         if (old_pools)
6045                 memcpy(pools, old_pools, pools_mng->n *
6046                                        sizeof(struct mlx5_aso_mtr_pool *));
6047         pools_mng->n = resize;
6048         pools_mng->pools = pools;
6049         if (old_pools)
6050                 mlx5_free(old_pools);
6051         return 0;
6052 }
6053
6054 /**
6055  * Prepare a new meter and/or a new meter pool.
6056  *
6057  * @param[in] dev
6058  *   Pointer to the Ethernet device structure.
6059  * @param[out] mtr_free
6060  *   Where to put the pointer of a new meter.g.
6061  *
6062  * @return
6063  *   The meter pool pointer and @mtr_free is set on success,
6064  *   NULL otherwise and rte_errno is set.
6065  */
6066 static struct mlx5_aso_mtr_pool *
6067 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6068                              struct mlx5_aso_mtr **mtr_free)
6069 {
6070         struct mlx5_priv *priv = dev->data->dev_private;
6071         struct mlx5_aso_mtr_pools_mng *pools_mng =
6072                                 &priv->sh->mtrmng->pools_mng;
6073         struct mlx5_aso_mtr_pool *pool = NULL;
6074         struct mlx5_devx_obj *dcs = NULL;
6075         uint32_t i;
6076         uint32_t log_obj_size;
6077
6078         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6079         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6080                         priv->sh->pdn, log_obj_size);
6081         if (!dcs) {
6082                 rte_errno = ENODATA;
6083                 return NULL;
6084         }
6085         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6086         if (!pool) {
6087                 rte_errno = ENOMEM;
6088                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6089                 return NULL;
6090         }
6091         pool->devx_obj = dcs;
6092         pool->index = pools_mng->n_valid;
6093         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6094                 mlx5_free(pool);
6095                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6096                 return NULL;
6097         }
6098         pools_mng->pools[pool->index] = pool;
6099         pools_mng->n_valid++;
6100         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6101                 pool->mtrs[i].offset = i;
6102                 LIST_INSERT_HEAD(&pools_mng->meters,
6103                                                 &pool->mtrs[i], next);
6104         }
6105         pool->mtrs[0].offset = 0;
6106         *mtr_free = &pool->mtrs[0];
6107         return pool;
6108 }
6109
6110 /**
6111  * Release a flow meter into pool.
6112  *
6113  * @param[in] dev
6114  *   Pointer to the Ethernet device structure.
6115  * @param[in] mtr_idx
6116  *   Index to aso flow meter.
6117  */
6118 static void
6119 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6120 {
6121         struct mlx5_priv *priv = dev->data->dev_private;
6122         struct mlx5_aso_mtr_pools_mng *pools_mng =
6123                                 &priv->sh->mtrmng->pools_mng;
6124         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6125
6126         MLX5_ASSERT(aso_mtr);
6127         rte_spinlock_lock(&pools_mng->mtrsl);
6128         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6129         aso_mtr->state = ASO_METER_FREE;
6130         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6131         rte_spinlock_unlock(&pools_mng->mtrsl);
6132 }
6133
6134 /**
6135  * Allocate a aso flow meter.
6136  *
6137  * @param[in] dev
6138  *   Pointer to the Ethernet device structure.
6139  *
6140  * @return
6141  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6142  */
6143 static uint32_t
6144 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6145 {
6146         struct mlx5_priv *priv = dev->data->dev_private;
6147         struct mlx5_aso_mtr *mtr_free = NULL;
6148         struct mlx5_aso_mtr_pools_mng *pools_mng =
6149                                 &priv->sh->mtrmng->pools_mng;
6150         struct mlx5_aso_mtr_pool *pool;
6151         uint32_t mtr_idx = 0;
6152
6153         if (!priv->config.devx) {
6154                 rte_errno = ENOTSUP;
6155                 return 0;
6156         }
6157         /* Allocate the flow meter memory. */
6158         /* Get free meters from management. */
6159         rte_spinlock_lock(&pools_mng->mtrsl);
6160         mtr_free = LIST_FIRST(&pools_mng->meters);
6161         if (mtr_free)
6162                 LIST_REMOVE(mtr_free, next);
6163         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6164                 rte_spinlock_unlock(&pools_mng->mtrsl);
6165                 return 0;
6166         }
6167         mtr_free->state = ASO_METER_WAIT;
6168         rte_spinlock_unlock(&pools_mng->mtrsl);
6169         pool = container_of(mtr_free,
6170                         struct mlx5_aso_mtr_pool,
6171                         mtrs[mtr_free->offset]);
6172         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6173         if (!mtr_free->fm.meter_action) {
6174 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6175                 struct rte_flow_error error;
6176                 uint8_t reg_id;
6177
6178                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6179                 mtr_free->fm.meter_action =
6180                         mlx5_glue->dv_create_flow_action_aso
6181                                                 (priv->sh->rx_domain,
6182                                                  pool->devx_obj->obj,
6183                                                  mtr_free->offset,
6184                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6185                                                  reg_id - REG_C_0);
6186 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6187                 if (!mtr_free->fm.meter_action) {
6188                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6189                         return 0;
6190                 }
6191         }
6192         return mtr_idx;
6193 }
6194
6195 /**
6196  * Verify the @p attributes will be correctly understood by the NIC and store
6197  * them in the @p flow if everything is correct.
6198  *
6199  * @param[in] dev
6200  *   Pointer to dev struct.
6201  * @param[in] attributes
6202  *   Pointer to flow attributes
6203  * @param[in] external
6204  *   This flow rule is created by request external to PMD.
6205  * @param[out] error
6206  *   Pointer to error structure.
6207  *
6208  * @return
6209  *   - 0 on success and non root table.
6210  *   - 1 on success and root table.
6211  *   - a negative errno value otherwise and rte_errno is set.
6212  */
6213 static int
6214 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6215                             const struct mlx5_flow_tunnel *tunnel,
6216                             const struct rte_flow_attr *attributes,
6217                             const struct flow_grp_info *grp_info,
6218                             struct rte_flow_error *error)
6219 {
6220         struct mlx5_priv *priv = dev->data->dev_private;
6221         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6222         int ret = 0;
6223
6224 #ifndef HAVE_MLX5DV_DR
6225         RTE_SET_USED(tunnel);
6226         RTE_SET_USED(grp_info);
6227         if (attributes->group)
6228                 return rte_flow_error_set(error, ENOTSUP,
6229                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6230                                           NULL,
6231                                           "groups are not supported");
6232 #else
6233         uint32_t table = 0;
6234
6235         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6236                                        grp_info, error);
6237         if (ret)
6238                 return ret;
6239         if (!table)
6240                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6241 #endif
6242         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6243             attributes->priority > lowest_priority)
6244                 return rte_flow_error_set(error, ENOTSUP,
6245                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6246                                           NULL,
6247                                           "priority out of range");
6248         if (attributes->transfer) {
6249                 if (!priv->config.dv_esw_en)
6250                         return rte_flow_error_set
6251                                 (error, ENOTSUP,
6252                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6253                                  "E-Switch dr is not supported");
6254                 if (!(priv->representor || priv->master))
6255                         return rte_flow_error_set
6256                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6257                                  NULL, "E-Switch configuration can only be"
6258                                  " done by a master or a representor device");
6259                 if (attributes->egress)
6260                         return rte_flow_error_set
6261                                 (error, ENOTSUP,
6262                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6263                                  "egress is not supported");
6264         }
6265         if (!(attributes->egress ^ attributes->ingress))
6266                 return rte_flow_error_set(error, ENOTSUP,
6267                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6268                                           "must specify exactly one of "
6269                                           "ingress or egress");
6270         return ret;
6271 }
6272
6273 /**
6274  * Internal validation function. For validating both actions and items.
6275  *
6276  * @param[in] dev
6277  *   Pointer to the rte_eth_dev structure.
6278  * @param[in] attr
6279  *   Pointer to the flow attributes.
6280  * @param[in] items
6281  *   Pointer to the list of items.
6282  * @param[in] actions
6283  *   Pointer to the list of actions.
6284  * @param[in] external
6285  *   This flow rule is created by request external to PMD.
6286  * @param[in] hairpin
6287  *   Number of hairpin TX actions, 0 means classic flow.
6288  * @param[out] error
6289  *   Pointer to the error structure.
6290  *
6291  * @return
6292  *   0 on success, a negative errno value otherwise and rte_errno is set.
6293  */
6294 static int
6295 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6296                  const struct rte_flow_item items[],
6297                  const struct rte_flow_action actions[],
6298                  bool external, int hairpin, struct rte_flow_error *error)
6299 {
6300         int ret;
6301         uint64_t action_flags = 0;
6302         uint64_t item_flags = 0;
6303         uint64_t last_item = 0;
6304         uint8_t next_protocol = 0xff;
6305         uint16_t ether_type = 0;
6306         int actions_n = 0;
6307         uint8_t item_ipv6_proto = 0;
6308         int fdb_mirror_limit = 0;
6309         int modify_after_mirror = 0;
6310         const struct rte_flow_item *geneve_item = NULL;
6311         const struct rte_flow_item *gre_item = NULL;
6312         const struct rte_flow_item *gtp_item = NULL;
6313         const struct rte_flow_action_raw_decap *decap;
6314         const struct rte_flow_action_raw_encap *encap;
6315         const struct rte_flow_action_rss *rss = NULL;
6316         const struct rte_flow_action_rss *sample_rss = NULL;
6317         const struct rte_flow_action_count *sample_count = NULL;
6318         const struct rte_flow_item_tcp nic_tcp_mask = {
6319                 .hdr = {
6320                         .tcp_flags = 0xFF,
6321                         .src_port = RTE_BE16(UINT16_MAX),
6322                         .dst_port = RTE_BE16(UINT16_MAX),
6323                 }
6324         };
6325         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6326                 .hdr = {
6327                         .src_addr =
6328                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6329                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6330                         .dst_addr =
6331                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6332                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6333                         .vtc_flow = RTE_BE32(0xffffffff),
6334                         .proto = 0xff,
6335                         .hop_limits = 0xff,
6336                 },
6337                 .has_frag_ext = 1,
6338         };
6339         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6340                 .hdr = {
6341                         .common = {
6342                                 .u32 =
6343                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6344                                         .type = 0xFF,
6345                                         }).u32),
6346                         },
6347                         .dummy[0] = 0xffffffff,
6348                 },
6349         };
6350         struct mlx5_priv *priv = dev->data->dev_private;
6351         struct mlx5_dev_config *dev_conf = &priv->config;
6352         uint16_t queue_index = 0xFFFF;
6353         const struct rte_flow_item_vlan *vlan_m = NULL;
6354         uint32_t rw_act_num = 0;
6355         uint64_t is_root;
6356         const struct mlx5_flow_tunnel *tunnel;
6357         struct flow_grp_info grp_info = {
6358                 .external = !!external,
6359                 .transfer = !!attr->transfer,
6360                 .fdb_def_rule = !!priv->fdb_def_rule,
6361         };
6362         const struct rte_eth_hairpin_conf *conf;
6363         bool def_policy = false;
6364
6365         if (items == NULL)
6366                 return -1;
6367         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
6368                 tunnel = flow_items_to_tunnel(items);
6369                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6370                                 MLX5_FLOW_ACTION_DECAP;
6371         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
6372                 tunnel = flow_actions_to_tunnel(actions);
6373                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6374         } else {
6375                 tunnel = NULL;
6376         }
6377         if (tunnel && priv->representor)
6378                 return rte_flow_error_set(error, ENOTSUP,
6379                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6380                                           "decap not supported "
6381                                           "for VF representor");
6382         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6383                                 (dev, tunnel, attr, items, actions);
6384         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6385         if (ret < 0)
6386                 return ret;
6387         is_root = (uint64_t)ret;
6388         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6389                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6390                 int type = items->type;
6391
6392                 if (!mlx5_flow_os_item_supported(type))
6393                         return rte_flow_error_set(error, ENOTSUP,
6394                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6395                                                   NULL, "item not supported");
6396                 switch (type) {
6397                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6398                         if (items[0].type != (typeof(items[0].type))
6399                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
6400                                 return rte_flow_error_set
6401                                                 (error, EINVAL,
6402                                                 RTE_FLOW_ERROR_TYPE_ITEM,
6403                                                 NULL, "MLX5 private items "
6404                                                 "must be the first");
6405                         break;
6406                 case RTE_FLOW_ITEM_TYPE_VOID:
6407                         break;
6408                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6409                         ret = flow_dv_validate_item_port_id
6410                                         (dev, items, attr, item_flags, error);
6411                         if (ret < 0)
6412                                 return ret;
6413                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6414                         break;
6415                 case RTE_FLOW_ITEM_TYPE_ETH:
6416                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6417                                                           true, error);
6418                         if (ret < 0)
6419                                 return ret;
6420                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6421                                              MLX5_FLOW_LAYER_OUTER_L2;
6422                         if (items->mask != NULL && items->spec != NULL) {
6423                                 ether_type =
6424                                         ((const struct rte_flow_item_eth *)
6425                                          items->spec)->type;
6426                                 ether_type &=
6427                                         ((const struct rte_flow_item_eth *)
6428                                          items->mask)->type;
6429                                 ether_type = rte_be_to_cpu_16(ether_type);
6430                         } else {
6431                                 ether_type = 0;
6432                         }
6433                         break;
6434                 case RTE_FLOW_ITEM_TYPE_VLAN:
6435                         ret = flow_dv_validate_item_vlan(items, item_flags,
6436                                                          dev, error);
6437                         if (ret < 0)
6438                                 return ret;
6439                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6440                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6441                         if (items->mask != NULL && items->spec != NULL) {
6442                                 ether_type =
6443                                         ((const struct rte_flow_item_vlan *)
6444                                          items->spec)->inner_type;
6445                                 ether_type &=
6446                                         ((const struct rte_flow_item_vlan *)
6447                                          items->mask)->inner_type;
6448                                 ether_type = rte_be_to_cpu_16(ether_type);
6449                         } else {
6450                                 ether_type = 0;
6451                         }
6452                         /* Store outer VLAN mask for of_push_vlan action. */
6453                         if (!tunnel)
6454                                 vlan_m = items->mask;
6455                         break;
6456                 case RTE_FLOW_ITEM_TYPE_IPV4:
6457                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6458                                                   &item_flags, &tunnel);
6459                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6460                                                          last_item, ether_type,
6461                                                          error);
6462                         if (ret < 0)
6463                                 return ret;
6464                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6465                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6466                         if (items->mask != NULL &&
6467                             ((const struct rte_flow_item_ipv4 *)
6468                              items->mask)->hdr.next_proto_id) {
6469                                 next_protocol =
6470                                         ((const struct rte_flow_item_ipv4 *)
6471                                          (items->spec))->hdr.next_proto_id;
6472                                 next_protocol &=
6473                                         ((const struct rte_flow_item_ipv4 *)
6474                                          (items->mask))->hdr.next_proto_id;
6475                         } else {
6476                                 /* Reset for inner layer. */
6477                                 next_protocol = 0xff;
6478                         }
6479                         break;
6480                 case RTE_FLOW_ITEM_TYPE_IPV6:
6481                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6482                                                   &item_flags, &tunnel);
6483                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6484                                                            last_item,
6485                                                            ether_type,
6486                                                            &nic_ipv6_mask,
6487                                                            error);
6488                         if (ret < 0)
6489                                 return ret;
6490                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6491                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6492                         if (items->mask != NULL &&
6493                             ((const struct rte_flow_item_ipv6 *)
6494                              items->mask)->hdr.proto) {
6495                                 item_ipv6_proto =
6496                                         ((const struct rte_flow_item_ipv6 *)
6497                                          items->spec)->hdr.proto;
6498                                 next_protocol =
6499                                         ((const struct rte_flow_item_ipv6 *)
6500                                          items->spec)->hdr.proto;
6501                                 next_protocol &=
6502                                         ((const struct rte_flow_item_ipv6 *)
6503                                          items->mask)->hdr.proto;
6504                         } else {
6505                                 /* Reset for inner layer. */
6506                                 next_protocol = 0xff;
6507                         }
6508                         break;
6509                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6510                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6511                                                                   item_flags,
6512                                                                   error);
6513                         if (ret < 0)
6514                                 return ret;
6515                         last_item = tunnel ?
6516                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6517                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6518                         if (items->mask != NULL &&
6519                             ((const struct rte_flow_item_ipv6_frag_ext *)
6520                              items->mask)->hdr.next_header) {
6521                                 next_protocol =
6522                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6523                                  items->spec)->hdr.next_header;
6524                                 next_protocol &=
6525                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6526                                  items->mask)->hdr.next_header;
6527                         } else {
6528                                 /* Reset for inner layer. */
6529                                 next_protocol = 0xff;
6530                         }
6531                         break;
6532                 case RTE_FLOW_ITEM_TYPE_TCP:
6533                         ret = mlx5_flow_validate_item_tcp
6534                                                 (items, item_flags,
6535                                                  next_protocol,
6536                                                  &nic_tcp_mask,
6537                                                  error);
6538                         if (ret < 0)
6539                                 return ret;
6540                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6541                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6542                         break;
6543                 case RTE_FLOW_ITEM_TYPE_UDP:
6544                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6545                                                           next_protocol,
6546                                                           error);
6547                         if (ret < 0)
6548                                 return ret;
6549                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6550                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6551                         break;
6552                 case RTE_FLOW_ITEM_TYPE_GRE:
6553                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6554                                                           next_protocol, error);
6555                         if (ret < 0)
6556                                 return ret;
6557                         gre_item = items;
6558                         last_item = MLX5_FLOW_LAYER_GRE;
6559                         break;
6560                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6561                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6562                                                             next_protocol,
6563                                                             error);
6564                         if (ret < 0)
6565                                 return ret;
6566                         last_item = MLX5_FLOW_LAYER_NVGRE;
6567                         break;
6568                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6569                         ret = mlx5_flow_validate_item_gre_key
6570                                 (items, item_flags, gre_item, error);
6571                         if (ret < 0)
6572                                 return ret;
6573                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6574                         break;
6575                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6576                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6577                                                             error);
6578                         if (ret < 0)
6579                                 return ret;
6580                         last_item = MLX5_FLOW_LAYER_VXLAN;
6581                         break;
6582                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6583                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6584                                                                 item_flags, dev,
6585                                                                 error);
6586                         if (ret < 0)
6587                                 return ret;
6588                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6589                         break;
6590                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6591                         ret = mlx5_flow_validate_item_geneve(items,
6592                                                              item_flags, dev,
6593                                                              error);
6594                         if (ret < 0)
6595                                 return ret;
6596                         geneve_item = items;
6597                         last_item = MLX5_FLOW_LAYER_GENEVE;
6598                         break;
6599                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6600                         ret = mlx5_flow_validate_item_geneve_opt(items,
6601                                                                  last_item,
6602                                                                  geneve_item,
6603                                                                  dev,
6604                                                                  error);
6605                         if (ret < 0)
6606                                 return ret;
6607                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6608                         break;
6609                 case RTE_FLOW_ITEM_TYPE_MPLS:
6610                         ret = mlx5_flow_validate_item_mpls(dev, items,
6611                                                            item_flags,
6612                                                            last_item, error);
6613                         if (ret < 0)
6614                                 return ret;
6615                         last_item = MLX5_FLOW_LAYER_MPLS;
6616                         break;
6617
6618                 case RTE_FLOW_ITEM_TYPE_MARK:
6619                         ret = flow_dv_validate_item_mark(dev, items, attr,
6620                                                          error);
6621                         if (ret < 0)
6622                                 return ret;
6623                         last_item = MLX5_FLOW_ITEM_MARK;
6624                         break;
6625                 case RTE_FLOW_ITEM_TYPE_META:
6626                         ret = flow_dv_validate_item_meta(dev, items, attr,
6627                                                          error);
6628                         if (ret < 0)
6629                                 return ret;
6630                         last_item = MLX5_FLOW_ITEM_METADATA;
6631                         break;
6632                 case RTE_FLOW_ITEM_TYPE_ICMP:
6633                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6634                                                            next_protocol,
6635                                                            error);
6636                         if (ret < 0)
6637                                 return ret;
6638                         last_item = MLX5_FLOW_LAYER_ICMP;
6639                         break;
6640                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6641                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6642                                                             next_protocol,
6643                                                             error);
6644                         if (ret < 0)
6645                                 return ret;
6646                         item_ipv6_proto = IPPROTO_ICMPV6;
6647                         last_item = MLX5_FLOW_LAYER_ICMP6;
6648                         break;
6649                 case RTE_FLOW_ITEM_TYPE_TAG:
6650                         ret = flow_dv_validate_item_tag(dev, items,
6651                                                         attr, error);
6652                         if (ret < 0)
6653                                 return ret;
6654                         last_item = MLX5_FLOW_ITEM_TAG;
6655                         break;
6656                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6657                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6658                         break;
6659                 case RTE_FLOW_ITEM_TYPE_GTP:
6660                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6661                                                         error);
6662                         if (ret < 0)
6663                                 return ret;
6664                         gtp_item = items;
6665                         last_item = MLX5_FLOW_LAYER_GTP;
6666                         break;
6667                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6668                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6669                                                             gtp_item, attr,
6670                                                             error);
6671                         if (ret < 0)
6672                                 return ret;
6673                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6674                         break;
6675                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6676                         /* Capacity will be checked in the translate stage. */
6677                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6678                                                             last_item,
6679                                                             ether_type,
6680                                                             &nic_ecpri_mask,
6681                                                             error);
6682                         if (ret < 0)
6683                                 return ret;
6684                         last_item = MLX5_FLOW_LAYER_ECPRI;
6685                         break;
6686                 default:
6687                         return rte_flow_error_set(error, ENOTSUP,
6688                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6689                                                   NULL, "item not supported");
6690                 }
6691                 item_flags |= last_item;
6692         }
6693         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6694                 int type = actions->type;
6695                 bool shared_count = false;
6696
6697                 if (!mlx5_flow_os_action_supported(type))
6698                         return rte_flow_error_set(error, ENOTSUP,
6699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6700                                                   actions,
6701                                                   "action not supported");
6702                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
6703                         return rte_flow_error_set(error, ENOTSUP,
6704                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6705                                                   actions, "too many actions");
6706                 if (action_flags &
6707                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
6708                         return rte_flow_error_set(error, ENOTSUP,
6709                                 RTE_FLOW_ERROR_TYPE_ACTION,
6710                                 NULL, "meter action with policy "
6711                                 "must be the last action");
6712                 switch (type) {
6713                 case RTE_FLOW_ACTION_TYPE_VOID:
6714                         break;
6715                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
6716                         ret = flow_dv_validate_action_port_id(dev,
6717                                                               action_flags,
6718                                                               actions,
6719                                                               attr,
6720                                                               error);
6721                         if (ret)
6722                                 return ret;
6723                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6724                         ++actions_n;
6725                         break;
6726                 case RTE_FLOW_ACTION_TYPE_FLAG:
6727                         ret = flow_dv_validate_action_flag(dev, action_flags,
6728                                                            attr, error);
6729                         if (ret < 0)
6730                                 return ret;
6731                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6732                                 /* Count all modify-header actions as one. */
6733                                 if (!(action_flags &
6734                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6735                                         ++actions_n;
6736                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
6737                                                 MLX5_FLOW_ACTION_MARK_EXT;
6738                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6739                                         modify_after_mirror = 1;
6740
6741                         } else {
6742                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
6743                                 ++actions_n;
6744                         }
6745                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6746                         break;
6747                 case RTE_FLOW_ACTION_TYPE_MARK:
6748                         ret = flow_dv_validate_action_mark(dev, actions,
6749                                                            action_flags,
6750                                                            attr, error);
6751                         if (ret < 0)
6752                                 return ret;
6753                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6754                                 /* Count all modify-header actions as one. */
6755                                 if (!(action_flags &
6756                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6757                                         ++actions_n;
6758                                 action_flags |= MLX5_FLOW_ACTION_MARK |
6759                                                 MLX5_FLOW_ACTION_MARK_EXT;
6760                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6761                                         modify_after_mirror = 1;
6762                         } else {
6763                                 action_flags |= MLX5_FLOW_ACTION_MARK;
6764                                 ++actions_n;
6765                         }
6766                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6767                         break;
6768                 case RTE_FLOW_ACTION_TYPE_SET_META:
6769                         ret = flow_dv_validate_action_set_meta(dev, actions,
6770                                                                action_flags,
6771                                                                attr, error);
6772                         if (ret < 0)
6773                                 return ret;
6774                         /* Count all modify-header actions as one action. */
6775                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6776                                 ++actions_n;
6777                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6778                                 modify_after_mirror = 1;
6779                         action_flags |= MLX5_FLOW_ACTION_SET_META;
6780                         rw_act_num += MLX5_ACT_NUM_SET_META;
6781                         break;
6782                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
6783                         ret = flow_dv_validate_action_set_tag(dev, actions,
6784                                                               action_flags,
6785                                                               attr, error);
6786                         if (ret < 0)
6787                                 return ret;
6788                         /* Count all modify-header actions as one action. */
6789                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6790                                 ++actions_n;
6791                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6792                                 modify_after_mirror = 1;
6793                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6794                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6795                         break;
6796                 case RTE_FLOW_ACTION_TYPE_DROP:
6797                         ret = mlx5_flow_validate_action_drop(action_flags,
6798                                                              attr, error);
6799                         if (ret < 0)
6800                                 return ret;
6801                         action_flags |= MLX5_FLOW_ACTION_DROP;
6802                         ++actions_n;
6803                         break;
6804                 case RTE_FLOW_ACTION_TYPE_QUEUE:
6805                         ret = mlx5_flow_validate_action_queue(actions,
6806                                                               action_flags, dev,
6807                                                               attr, error);
6808                         if (ret < 0)
6809                                 return ret;
6810                         queue_index = ((const struct rte_flow_action_queue *)
6811                                                         (actions->conf))->index;
6812                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
6813                         ++actions_n;
6814                         break;
6815                 case RTE_FLOW_ACTION_TYPE_RSS:
6816                         rss = actions->conf;
6817                         ret = mlx5_flow_validate_action_rss(actions,
6818                                                             action_flags, dev,
6819                                                             attr, item_flags,
6820                                                             error);
6821                         if (ret < 0)
6822                                 return ret;
6823                         if (rss && sample_rss &&
6824                             (sample_rss->level != rss->level ||
6825                             sample_rss->types != rss->types))
6826                                 return rte_flow_error_set(error, ENOTSUP,
6827                                         RTE_FLOW_ERROR_TYPE_ACTION,
6828                                         NULL,
6829                                         "Can't use the different RSS types "
6830                                         "or level in the same flow");
6831                         if (rss != NULL && rss->queue_num)
6832                                 queue_index = rss->queue[0];
6833                         action_flags |= MLX5_FLOW_ACTION_RSS;
6834                         ++actions_n;
6835                         break;
6836                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6837                         ret =
6838                         mlx5_flow_validate_action_default_miss(action_flags,
6839                                         attr, error);
6840                         if (ret < 0)
6841                                 return ret;
6842                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6843                         ++actions_n;
6844                         break;
6845                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
6846                 case RTE_FLOW_ACTION_TYPE_COUNT:
6847                         shared_count = is_shared_action_count(actions);
6848                         ret = flow_dv_validate_action_count(dev, shared_count,
6849                                                             action_flags,
6850                                                             error);
6851                         if (ret < 0)
6852                                 return ret;
6853                         action_flags |= MLX5_FLOW_ACTION_COUNT;
6854                         ++actions_n;
6855                         break;
6856                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6857                         if (flow_dv_validate_action_pop_vlan(dev,
6858                                                              action_flags,
6859                                                              actions,
6860                                                              item_flags, attr,
6861                                                              error))
6862                                 return -rte_errno;
6863                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6864                                 modify_after_mirror = 1;
6865                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6866                         ++actions_n;
6867                         break;
6868                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6869                         ret = flow_dv_validate_action_push_vlan(dev,
6870                                                                 action_flags,
6871                                                                 vlan_m,
6872                                                                 actions, attr,
6873                                                                 error);
6874                         if (ret < 0)
6875                                 return ret;
6876                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6877                                 modify_after_mirror = 1;
6878                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6879                         ++actions_n;
6880                         break;
6881                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6882                         ret = flow_dv_validate_action_set_vlan_pcp
6883                                                 (action_flags, actions, error);
6884                         if (ret < 0)
6885                                 return ret;
6886                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6887                                 modify_after_mirror = 1;
6888                         /* Count PCP with push_vlan command. */
6889                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
6890                         break;
6891                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6892                         ret = flow_dv_validate_action_set_vlan_vid
6893                                                 (item_flags, action_flags,
6894                                                  actions, error);
6895                         if (ret < 0)
6896                                 return ret;
6897                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6898                                 modify_after_mirror = 1;
6899                         /* Count VID with push_vlan command. */
6900                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6901                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
6902                         break;
6903                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6904                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6905                         ret = flow_dv_validate_action_l2_encap(dev,
6906                                                                action_flags,
6907                                                                actions, attr,
6908                                                                error);
6909                         if (ret < 0)
6910                                 return ret;
6911                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
6912                         ++actions_n;
6913                         break;
6914                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6915                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6916                         ret = flow_dv_validate_action_decap(dev, action_flags,
6917                                                             actions, item_flags,
6918                                                             attr, error);
6919                         if (ret < 0)
6920                                 return ret;
6921                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6922                                 modify_after_mirror = 1;
6923                         action_flags |= MLX5_FLOW_ACTION_DECAP;
6924                         ++actions_n;
6925                         break;
6926                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6927                         ret = flow_dv_validate_action_raw_encap_decap
6928                                 (dev, NULL, actions->conf, attr, &action_flags,
6929                                  &actions_n, actions, item_flags, error);
6930                         if (ret < 0)
6931                                 return ret;
6932                         break;
6933                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6934                         decap = actions->conf;
6935                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
6936                                 ;
6937                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6938                                 encap = NULL;
6939                                 actions--;
6940                         } else {
6941                                 encap = actions->conf;
6942                         }
6943                         ret = flow_dv_validate_action_raw_encap_decap
6944                                            (dev,
6945                                             decap ? decap : &empty_decap, encap,
6946                                             attr, &action_flags, &actions_n,
6947                                             actions, item_flags, error);
6948                         if (ret < 0)
6949                                 return ret;
6950                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
6951                             (action_flags & MLX5_FLOW_ACTION_DECAP))
6952                                 modify_after_mirror = 1;
6953                         break;
6954                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6955                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6956                         ret = flow_dv_validate_action_modify_mac(action_flags,
6957                                                                  actions,
6958                                                                  item_flags,
6959                                                                  error);
6960                         if (ret < 0)
6961                                 return ret;
6962                         /* Count all modify-header actions as one action. */
6963                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6964                                 ++actions_n;
6965                         action_flags |= actions->type ==
6966                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
6967                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
6968                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
6969                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6970                                 modify_after_mirror = 1;
6971                         /*
6972                          * Even if the source and destination MAC addresses have
6973                          * overlap in the header with 4B alignment, the convert
6974                          * function will handle them separately and 4 SW actions
6975                          * will be created. And 2 actions will be added each
6976                          * time no matter how many bytes of address will be set.
6977                          */
6978                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
6979                         break;
6980                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6981                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6982                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
6983                                                                   actions,
6984                                                                   item_flags,
6985                                                                   error);
6986                         if (ret < 0)
6987                                 return ret;
6988                         /* Count all modify-header actions as one action. */
6989                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6990                                 ++actions_n;
6991                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6992                                 modify_after_mirror = 1;
6993                         action_flags |= actions->type ==
6994                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
6995                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
6996                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
6997                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
6998                         break;
6999                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7000                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7001                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7002                                                                   actions,
7003                                                                   item_flags,
7004                                                                   error);
7005                         if (ret < 0)
7006                                 return ret;
7007                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7008                                 return rte_flow_error_set(error, ENOTSUP,
7009                                         RTE_FLOW_ERROR_TYPE_ACTION,
7010                                         actions,
7011                                         "Can't change header "
7012                                         "with ICMPv6 proto");
7013                         /* Count all modify-header actions as one action. */
7014                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7015                                 ++actions_n;
7016                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7017                                 modify_after_mirror = 1;
7018                         action_flags |= actions->type ==
7019                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7020                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7021                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7022                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7023                         break;
7024                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7025                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7026                         ret = flow_dv_validate_action_modify_tp(action_flags,
7027                                                                 actions,
7028                                                                 item_flags,
7029                                                                 error);
7030                         if (ret < 0)
7031                                 return ret;
7032                         /* Count all modify-header actions as one action. */
7033                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7034                                 ++actions_n;
7035                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7036                                 modify_after_mirror = 1;
7037                         action_flags |= actions->type ==
7038                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7039                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7040                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7041                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7042                         break;
7043                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7044                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7045                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7046                                                                  actions,
7047                                                                  item_flags,
7048                                                                  error);
7049                         if (ret < 0)
7050                                 return ret;
7051                         /* Count all modify-header actions as one action. */
7052                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7053                                 ++actions_n;
7054                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7055                                 modify_after_mirror = 1;
7056                         action_flags |= actions->type ==
7057                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7058                                                 MLX5_FLOW_ACTION_SET_TTL :
7059                                                 MLX5_FLOW_ACTION_DEC_TTL;
7060                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7061                         break;
7062                 case RTE_FLOW_ACTION_TYPE_JUMP:
7063                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7064                                                            action_flags,
7065                                                            attr, external,
7066                                                            error);
7067                         if (ret)
7068                                 return ret;
7069                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7070                             fdb_mirror_limit)
7071                                 return rte_flow_error_set(error, EINVAL,
7072                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7073                                                   NULL,
7074                                                   "sample and jump action combination is not supported");
7075                         ++actions_n;
7076                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7077                         break;
7078                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7079                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7080                         ret = flow_dv_validate_action_modify_tcp_seq
7081                                                                 (action_flags,
7082                                                                  actions,
7083                                                                  item_flags,
7084                                                                  error);
7085                         if (ret < 0)
7086                                 return ret;
7087                         /* Count all modify-header actions as one action. */
7088                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7089                                 ++actions_n;
7090                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7091                                 modify_after_mirror = 1;
7092                         action_flags |= actions->type ==
7093                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7094                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7095                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7096                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7097                         break;
7098                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7099                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7100                         ret = flow_dv_validate_action_modify_tcp_ack
7101                                                                 (action_flags,
7102                                                                  actions,
7103                                                                  item_flags,
7104                                                                  error);
7105                         if (ret < 0)
7106                                 return ret;
7107                         /* Count all modify-header actions as one action. */
7108                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7109                                 ++actions_n;
7110                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7111                                 modify_after_mirror = 1;
7112                         action_flags |= actions->type ==
7113                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7114                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7115                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7116                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7117                         break;
7118                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7119                         break;
7120                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7121                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7122                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7123                         break;
7124                 case RTE_FLOW_ACTION_TYPE_METER:
7125                         ret = mlx5_flow_validate_action_meter(dev,
7126                                                               action_flags,
7127                                                               actions, attr,
7128                                                               &def_policy,
7129                                                               error);
7130                         if (ret < 0)
7131                                 return ret;
7132                         action_flags |= MLX5_FLOW_ACTION_METER;
7133                         if (!def_policy)
7134                                 action_flags |=
7135                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7136                         ++actions_n;
7137                         /* Meter action will add one more TAG action. */
7138                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7139                         break;
7140                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7141                         if (!attr->transfer && !attr->group)
7142                                 return rte_flow_error_set(error, ENOTSUP,
7143                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7144                                                                            NULL,
7145                           "Shared ASO age action is not supported for group 0");
7146                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7147                                 return rte_flow_error_set
7148                                                   (error, EINVAL,
7149                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7150                                                    NULL,
7151                                                    "duplicate age actions set");
7152                         action_flags |= MLX5_FLOW_ACTION_AGE;
7153                         ++actions_n;
7154                         break;
7155                 case RTE_FLOW_ACTION_TYPE_AGE:
7156                         ret = flow_dv_validate_action_age(action_flags,
7157                                                           actions, dev,
7158                                                           error);
7159                         if (ret < 0)
7160                                 return ret;
7161                         /*
7162                          * Validate the regular AGE action (using counter)
7163                          * mutual exclusion with share counter actions.
7164                          */
7165                         if (!priv->sh->flow_hit_aso_en) {
7166                                 if (shared_count)
7167                                         return rte_flow_error_set
7168                                                 (error, EINVAL,
7169                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7170                                                 NULL,
7171                                                 "old age and shared count combination is not supported");
7172                                 if (sample_count)
7173                                         return rte_flow_error_set
7174                                                 (error, EINVAL,
7175                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7176                                                 NULL,
7177                                                 "old age action and count must be in the same sub flow");
7178                         }
7179                         action_flags |= MLX5_FLOW_ACTION_AGE;
7180                         ++actions_n;
7181                         break;
7182                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7183                         ret = flow_dv_validate_action_modify_ipv4_dscp
7184                                                          (action_flags,
7185                                                           actions,
7186                                                           item_flags,
7187                                                           error);
7188                         if (ret < 0)
7189                                 return ret;
7190                         /* Count all modify-header actions as one action. */
7191                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7192                                 ++actions_n;
7193                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7194                                 modify_after_mirror = 1;
7195                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7196                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7197                         break;
7198                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7199                         ret = flow_dv_validate_action_modify_ipv6_dscp
7200                                                                 (action_flags,
7201                                                                  actions,
7202                                                                  item_flags,
7203                                                                  error);
7204                         if (ret < 0)
7205                                 return ret;
7206                         /* Count all modify-header actions as one action. */
7207                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7208                                 ++actions_n;
7209                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7210                                 modify_after_mirror = 1;
7211                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7212                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7213                         break;
7214                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7215                         ret = flow_dv_validate_action_sample(&action_flags,
7216                                                              actions, dev,
7217                                                              attr, item_flags,
7218                                                              rss, &sample_rss,
7219                                                              &sample_count,
7220                                                              &fdb_mirror_limit,
7221                                                              error);
7222                         if (ret < 0)
7223                                 return ret;
7224                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7225                         ++actions_n;
7226                         break;
7227                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7228                         if (actions[0].type != (typeof(actions[0].type))
7229                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
7230                                 return rte_flow_error_set
7231                                                 (error, EINVAL,
7232                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7233                                                 NULL, "MLX5 private action "
7234                                                 "must be the first");
7235
7236                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
7237                         break;
7238                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7239                         ret = flow_dv_validate_action_modify_field(dev,
7240                                                                    action_flags,
7241                                                                    actions,
7242                                                                    attr,
7243                                                                    error);
7244                         if (ret < 0)
7245                                 return ret;
7246                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7247                                 modify_after_mirror = 1;
7248                         /* Count all modify-header actions as one action. */
7249                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
7250                                 ++actions_n;
7251                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7252                         rw_act_num += ret;
7253                         break;
7254                 default:
7255                         return rte_flow_error_set(error, ENOTSUP,
7256                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7257                                                   actions,
7258                                                   "action not supported");
7259                 }
7260         }
7261         /*
7262          * Validate actions in flow rules
7263          * - Explicit decap action is prohibited by the tunnel offload API.
7264          * - Drop action in tunnel steer rule is prohibited by the API.
7265          * - Application cannot use MARK action because it's value can mask
7266          *   tunnel default miss nitification.
7267          * - JUMP in tunnel match rule has no support in current PMD
7268          *   implementation.
7269          * - TAG & META are reserved for future uses.
7270          */
7271         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7272                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7273                                             MLX5_FLOW_ACTION_MARK     |
7274                                             MLX5_FLOW_ACTION_SET_TAG  |
7275                                             MLX5_FLOW_ACTION_SET_META |
7276                                             MLX5_FLOW_ACTION_DROP;
7277
7278                 if (action_flags & bad_actions_mask)
7279                         return rte_flow_error_set
7280                                         (error, EINVAL,
7281                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7282                                         "Invalid RTE action in tunnel "
7283                                         "set decap rule");
7284                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7285                         return rte_flow_error_set
7286                                         (error, EINVAL,
7287                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7288                                         "tunnel set decap rule must terminate "
7289                                         "with JUMP");
7290                 if (!attr->ingress)
7291                         return rte_flow_error_set
7292                                         (error, EINVAL,
7293                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7294                                         "tunnel flows for ingress traffic only");
7295         }
7296         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7297                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7298                                             MLX5_FLOW_ACTION_MARK    |
7299                                             MLX5_FLOW_ACTION_SET_TAG |
7300                                             MLX5_FLOW_ACTION_SET_META;
7301
7302                 if (action_flags & bad_actions_mask)
7303                         return rte_flow_error_set
7304                                         (error, EINVAL,
7305                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7306                                         "Invalid RTE action in tunnel "
7307                                         "set match rule");
7308         }
7309         /*
7310          * Validate the drop action mutual exclusion with other actions.
7311          * Drop action is mutually-exclusive with any other action, except for
7312          * Count action.
7313          * Drop action compatibility with tunnel offload was already validated.
7314          */
7315         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7316                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7317         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7318             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7319                 return rte_flow_error_set(error, EINVAL,
7320                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7321                                           "Drop action is mutually-exclusive "
7322                                           "with any other action, except for "
7323                                           "Count action");
7324         /* Eswitch has few restrictions on using items and actions */
7325         if (attr->transfer) {
7326                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7327                     action_flags & MLX5_FLOW_ACTION_FLAG)
7328                         return rte_flow_error_set(error, ENOTSUP,
7329                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7330                                                   NULL,
7331                                                   "unsupported action FLAG");
7332                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7333                     action_flags & MLX5_FLOW_ACTION_MARK)
7334                         return rte_flow_error_set(error, ENOTSUP,
7335                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7336                                                   NULL,
7337                                                   "unsupported action MARK");
7338                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7339                         return rte_flow_error_set(error, ENOTSUP,
7340                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7341                                                   NULL,
7342                                                   "unsupported action QUEUE");
7343                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7344                         return rte_flow_error_set(error, ENOTSUP,
7345                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7346                                                   NULL,
7347                                                   "unsupported action RSS");
7348                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7349                         return rte_flow_error_set(error, EINVAL,
7350                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7351                                                   actions,
7352                                                   "no fate action is found");
7353         } else {
7354                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7355                         return rte_flow_error_set(error, EINVAL,
7356                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7357                                                   actions,
7358                                                   "no fate action is found");
7359         }
7360         /*
7361          * Continue validation for Xcap and VLAN actions.
7362          * If hairpin is working in explicit TX rule mode, there is no actions
7363          * splitting and the validation of hairpin ingress flow should be the
7364          * same as other standard flows.
7365          */
7366         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7367                              MLX5_FLOW_VLAN_ACTIONS)) &&
7368             (queue_index == 0xFFFF ||
7369              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7370              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7371              conf->tx_explicit != 0))) {
7372                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7373                     MLX5_FLOW_XCAP_ACTIONS)
7374                         return rte_flow_error_set(error, ENOTSUP,
7375                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7376                                                   NULL, "encap and decap "
7377                                                   "combination aren't supported");
7378                 if (!attr->transfer && attr->ingress) {
7379                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7380                                 return rte_flow_error_set
7381                                                 (error, ENOTSUP,
7382                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7383                                                  NULL, "encap is not supported"
7384                                                  " for ingress traffic");
7385                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7386                                 return rte_flow_error_set
7387                                                 (error, ENOTSUP,
7388                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7389                                                  NULL, "push VLAN action not "
7390                                                  "supported for ingress");
7391                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7392                                         MLX5_FLOW_VLAN_ACTIONS)
7393                                 return rte_flow_error_set
7394                                                 (error, ENOTSUP,
7395                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7396                                                  NULL, "no support for "
7397                                                  "multiple VLAN actions");
7398                 }
7399         }
7400         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7401                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7402                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7403                         attr->ingress)
7404                         return rte_flow_error_set
7405                                 (error, ENOTSUP,
7406                                 RTE_FLOW_ERROR_TYPE_ACTION,
7407                                 NULL, "fate action not supported for "
7408                                 "meter with policy");
7409                 if (attr->egress) {
7410                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7411                                 return rte_flow_error_set
7412                                         (error, ENOTSUP,
7413                                         RTE_FLOW_ERROR_TYPE_ACTION,
7414                                         NULL, "modify header action in egress "
7415                                         "cannot be done before meter action");
7416                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7417                                 return rte_flow_error_set
7418                                         (error, ENOTSUP,
7419                                         RTE_FLOW_ERROR_TYPE_ACTION,
7420                                         NULL, "encap action in egress "
7421                                         "cannot be done before meter action");
7422                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7423                                 return rte_flow_error_set
7424                                         (error, ENOTSUP,
7425                                         RTE_FLOW_ERROR_TYPE_ACTION,
7426                                         NULL, "push vlan action in egress "
7427                                         "cannot be done before meter action");
7428                 }
7429         }
7430         /*
7431          * Hairpin flow will add one more TAG action in TX implicit mode.
7432          * In TX explicit mode, there will be no hairpin flow ID.
7433          */
7434         if (hairpin > 0)
7435                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7436         /* extra metadata enabled: one more TAG action will be add. */
7437         if (dev_conf->dv_flow_en &&
7438             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7439             mlx5_flow_ext_mreg_supported(dev))
7440                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7441         if (rw_act_num >
7442                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7443                 return rte_flow_error_set(error, ENOTSUP,
7444                                           RTE_FLOW_ERROR_TYPE_ACTION,
7445                                           NULL, "too many header modify"
7446                                           " actions to support");
7447         }
7448         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7449         if (fdb_mirror_limit && modify_after_mirror)
7450                 return rte_flow_error_set(error, EINVAL,
7451                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7452                                 "sample before modify action is not supported");
7453         return 0;
7454 }
7455
7456 /**
7457  * Internal preparation function. Allocates the DV flow size,
7458  * this size is constant.
7459  *
7460  * @param[in] dev
7461  *   Pointer to the rte_eth_dev structure.
7462  * @param[in] attr
7463  *   Pointer to the flow attributes.
7464  * @param[in] items
7465  *   Pointer to the list of items.
7466  * @param[in] actions
7467  *   Pointer to the list of actions.
7468  * @param[out] error
7469  *   Pointer to the error structure.
7470  *
7471  * @return
7472  *   Pointer to mlx5_flow object on success,
7473  *   otherwise NULL and rte_errno is set.
7474  */
7475 static struct mlx5_flow *
7476 flow_dv_prepare(struct rte_eth_dev *dev,
7477                 const struct rte_flow_attr *attr __rte_unused,
7478                 const struct rte_flow_item items[] __rte_unused,
7479                 const struct rte_flow_action actions[] __rte_unused,
7480                 struct rte_flow_error *error)
7481 {
7482         uint32_t handle_idx = 0;
7483         struct mlx5_flow *dev_flow;
7484         struct mlx5_flow_handle *dev_handle;
7485         struct mlx5_priv *priv = dev->data->dev_private;
7486         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7487
7488         MLX5_ASSERT(wks);
7489         wks->skip_matcher_reg = 0;
7490         /* In case of corrupting the memory. */
7491         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7492                 rte_flow_error_set(error, ENOSPC,
7493                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7494                                    "not free temporary device flow");
7495                 return NULL;
7496         }
7497         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7498                                    &handle_idx);
7499         if (!dev_handle) {
7500                 rte_flow_error_set(error, ENOMEM,
7501                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7502                                    "not enough memory to create flow handle");
7503                 return NULL;
7504         }
7505         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7506         dev_flow = &wks->flows[wks->flow_idx++];
7507         memset(dev_flow, 0, sizeof(*dev_flow));
7508         dev_flow->handle = dev_handle;
7509         dev_flow->handle_idx = handle_idx;
7510         /*
7511          * In some old rdma-core releases, before continuing, a check of the
7512          * length of matching parameter will be done at first. It needs to use
7513          * the length without misc4 param. If the flow has misc4 support, then
7514          * the length needs to be adjusted accordingly. Each param member is
7515          * aligned with a 64B boundary naturally.
7516          */
7517         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7518                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7519         dev_flow->ingress = attr->ingress;
7520         dev_flow->dv.transfer = attr->transfer;
7521         return dev_flow;
7522 }
7523
7524 #ifdef RTE_LIBRTE_MLX5_DEBUG
7525 /**
7526  * Sanity check for match mask and value. Similar to check_valid_spec() in
7527  * kernel driver. If unmasked bit is present in value, it returns failure.
7528  *
7529  * @param match_mask
7530  *   pointer to match mask buffer.
7531  * @param match_value
7532  *   pointer to match value buffer.
7533  *
7534  * @return
7535  *   0 if valid, -EINVAL otherwise.
7536  */
7537 static int
7538 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7539 {
7540         uint8_t *m = match_mask;
7541         uint8_t *v = match_value;
7542         unsigned int i;
7543
7544         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7545                 if (v[i] & ~m[i]) {
7546                         DRV_LOG(ERR,
7547                                 "match_value differs from match_criteria"
7548                                 " %p[%u] != %p[%u]",
7549                                 match_value, i, match_mask, i);
7550                         return -EINVAL;
7551                 }
7552         }
7553         return 0;
7554 }
7555 #endif
7556
7557 /**
7558  * Add match of ip_version.
7559  *
7560  * @param[in] group
7561  *   Flow group.
7562  * @param[in] headers_v
7563  *   Values header pointer.
7564  * @param[in] headers_m
7565  *   Masks header pointer.
7566  * @param[in] ip_version
7567  *   The IP version to set.
7568  */
7569 static inline void
7570 flow_dv_set_match_ip_version(uint32_t group,
7571                              void *headers_v,
7572                              void *headers_m,
7573                              uint8_t ip_version)
7574 {
7575         if (group == 0)
7576                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7577         else
7578                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7579                          ip_version);
7580         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7581         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7582         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7583 }
7584
7585 /**
7586  * Add Ethernet item to matcher and to the value.
7587  *
7588  * @param[in, out] matcher
7589  *   Flow matcher.
7590  * @param[in, out] key
7591  *   Flow matcher value.
7592  * @param[in] item
7593  *   Flow pattern to translate.
7594  * @param[in] inner
7595  *   Item is inner pattern.
7596  */
7597 static void
7598 flow_dv_translate_item_eth(void *matcher, void *key,
7599                            const struct rte_flow_item *item, int inner,
7600                            uint32_t group)
7601 {
7602         const struct rte_flow_item_eth *eth_m = item->mask;
7603         const struct rte_flow_item_eth *eth_v = item->spec;
7604         const struct rte_flow_item_eth nic_mask = {
7605                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7606                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7607                 .type = RTE_BE16(0xffff),
7608                 .has_vlan = 0,
7609         };
7610         void *hdrs_m;
7611         void *hdrs_v;
7612         char *l24_v;
7613         unsigned int i;
7614
7615         if (!eth_v)
7616                 return;
7617         if (!eth_m)
7618                 eth_m = &nic_mask;
7619         if (inner) {
7620                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7621                                          inner_headers);
7622                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7623         } else {
7624                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7625                                          outer_headers);
7626                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7627         }
7628         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7629                &eth_m->dst, sizeof(eth_m->dst));
7630         /* The value must be in the range of the mask. */
7631         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7632         for (i = 0; i < sizeof(eth_m->dst); ++i)
7633                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7634         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7635                &eth_m->src, sizeof(eth_m->src));
7636         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7637         /* The value must be in the range of the mask. */
7638         for (i = 0; i < sizeof(eth_m->dst); ++i)
7639                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7640         /*
7641          * HW supports match on one Ethertype, the Ethertype following the last
7642          * VLAN tag of the packet (see PRM).
7643          * Set match on ethertype only if ETH header is not followed by VLAN.
7644          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7645          * ethertype, and use ip_version field instead.
7646          * eCPRI over Ether layer will use type value 0xAEFE.
7647          */
7648         if (eth_m->type == 0xFFFF) {
7649                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7650                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7651                 switch (eth_v->type) {
7652                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7653                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7654                         return;
7655                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7656                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7657                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7658                         return;
7659                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7660                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7661                         return;
7662                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7663                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7664                         return;
7665                 default:
7666                         break;
7667                 }
7668         }
7669         if (eth_m->has_vlan) {
7670                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7671                 if (eth_v->has_vlan) {
7672                         /*
7673                          * Here, when also has_more_vlan field in VLAN item is
7674                          * not set, only single-tagged packets will be matched.
7675                          */
7676                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7677                         return;
7678                 }
7679         }
7680         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7681                  rte_be_to_cpu_16(eth_m->type));
7682         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7683         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7684 }
7685
7686 /**
7687  * Add VLAN item to matcher and to the value.
7688  *
7689  * @param[in, out] dev_flow
7690  *   Flow descriptor.
7691  * @param[in, out] matcher
7692  *   Flow matcher.
7693  * @param[in, out] key
7694  *   Flow matcher value.
7695  * @param[in] item
7696  *   Flow pattern to translate.
7697  * @param[in] inner
7698  *   Item is inner pattern.
7699  */
7700 static void
7701 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
7702                             void *matcher, void *key,
7703                             const struct rte_flow_item *item,
7704                             int inner, uint32_t group)
7705 {
7706         const struct rte_flow_item_vlan *vlan_m = item->mask;
7707         const struct rte_flow_item_vlan *vlan_v = item->spec;
7708         void *hdrs_m;
7709         void *hdrs_v;
7710         uint16_t tci_m;
7711         uint16_t tci_v;
7712
7713         if (inner) {
7714                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7715                                          inner_headers);
7716                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7717         } else {
7718                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7719                                          outer_headers);
7720                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7721                 /*
7722                  * This is workaround, masks are not supported,
7723                  * and pre-validated.
7724                  */
7725                 if (vlan_v)
7726                         dev_flow->handle->vf_vlan.tag =
7727                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
7728         }
7729         /*
7730          * When VLAN item exists in flow, mark packet as tagged,
7731          * even if TCI is not specified.
7732          */
7733         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
7734                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7735                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7736         }
7737         if (!vlan_v)
7738                 return;
7739         if (!vlan_m)
7740                 vlan_m = &rte_flow_item_vlan_mask;
7741         tci_m = rte_be_to_cpu_16(vlan_m->tci);
7742         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
7743         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
7744         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
7745         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
7746         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
7747         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
7748         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
7749         /*
7750          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7751          * ethertype, and use ip_version field instead.
7752          */
7753         if (vlan_m->inner_type == 0xFFFF) {
7754                 switch (vlan_v->inner_type) {
7755                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7756                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7757                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7758                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7759                         return;
7760                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7761                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7762                         return;
7763                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7764                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7765                         return;
7766                 default:
7767                         break;
7768                 }
7769         }
7770         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
7771                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7772                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7773                 /* Only one vlan_tag bit can be set. */
7774                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7775                 return;
7776         }
7777         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7778                  rte_be_to_cpu_16(vlan_m->inner_type));
7779         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
7780                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
7781 }
7782
7783 /**
7784  * Add IPV4 item to matcher and to the value.
7785  *
7786  * @param[in, out] matcher
7787  *   Flow matcher.
7788  * @param[in, out] key
7789  *   Flow matcher value.
7790  * @param[in] item
7791  *   Flow pattern to translate.
7792  * @param[in] inner
7793  *   Item is inner pattern.
7794  * @param[in] group
7795  *   The group to insert the rule.
7796  */
7797 static void
7798 flow_dv_translate_item_ipv4(void *matcher, void *key,
7799                             const struct rte_flow_item *item,
7800                             int inner, uint32_t group)
7801 {
7802         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
7803         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
7804         const struct rte_flow_item_ipv4 nic_mask = {
7805                 .hdr = {
7806                         .src_addr = RTE_BE32(0xffffffff),
7807                         .dst_addr = RTE_BE32(0xffffffff),
7808                         .type_of_service = 0xff,
7809                         .next_proto_id = 0xff,
7810                         .time_to_live = 0xff,
7811                 },
7812         };
7813         void *headers_m;
7814         void *headers_v;
7815         char *l24_m;
7816         char *l24_v;
7817         uint8_t tos;
7818
7819         if (inner) {
7820                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7821                                          inner_headers);
7822                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7823         } else {
7824                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7825                                          outer_headers);
7826                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7827         }
7828         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
7829         if (!ipv4_v)
7830                 return;
7831         if (!ipv4_m)
7832                 ipv4_m = &nic_mask;
7833         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7834                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7835         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7836                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7837         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
7838         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
7839         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7840                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7841         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7842                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7843         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
7844         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
7845         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
7846         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
7847                  ipv4_m->hdr.type_of_service);
7848         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
7849         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
7850                  ipv4_m->hdr.type_of_service >> 2);
7851         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
7852         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7853                  ipv4_m->hdr.next_proto_id);
7854         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7855                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
7856         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7857                  ipv4_m->hdr.time_to_live);
7858         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7859                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
7860         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7861                  !!(ipv4_m->hdr.fragment_offset));
7862         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7863                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
7864 }
7865
7866 /**
7867  * Add IPV6 item to matcher and to the value.
7868  *
7869  * @param[in, out] matcher
7870  *   Flow matcher.
7871  * @param[in, out] key
7872  *   Flow matcher value.
7873  * @param[in] item
7874  *   Flow pattern to translate.
7875  * @param[in] inner
7876  *   Item is inner pattern.
7877  * @param[in] group
7878  *   The group to insert the rule.
7879  */
7880 static void
7881 flow_dv_translate_item_ipv6(void *matcher, void *key,
7882                             const struct rte_flow_item *item,
7883                             int inner, uint32_t group)
7884 {
7885         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
7886         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
7887         const struct rte_flow_item_ipv6 nic_mask = {
7888                 .hdr = {
7889                         .src_addr =
7890                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7891                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7892                         .dst_addr =
7893                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7894                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7895                         .vtc_flow = RTE_BE32(0xffffffff),
7896                         .proto = 0xff,
7897                         .hop_limits = 0xff,
7898                 },
7899         };
7900         void *headers_m;
7901         void *headers_v;
7902         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7903         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7904         char *l24_m;
7905         char *l24_v;
7906         uint32_t vtc_m;
7907         uint32_t vtc_v;
7908         int i;
7909         int size;
7910
7911         if (inner) {
7912                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7913                                          inner_headers);
7914                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7915         } else {
7916                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7917                                          outer_headers);
7918                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7919         }
7920         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
7921         if (!ipv6_v)
7922                 return;
7923         if (!ipv6_m)
7924                 ipv6_m = &nic_mask;
7925         size = sizeof(ipv6_m->hdr.dst_addr);
7926         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7927                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7928         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7929                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7930         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
7931         for (i = 0; i < size; ++i)
7932                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
7933         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7934                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7935         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7936                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7937         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
7938         for (i = 0; i < size; ++i)
7939                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
7940         /* TOS. */
7941         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
7942         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
7943         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
7944         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
7945         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
7946         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
7947         /* Label. */
7948         if (inner) {
7949                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
7950                          vtc_m);
7951                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
7952                          vtc_v);
7953         } else {
7954                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
7955                          vtc_m);
7956                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
7957                          vtc_v);
7958         }
7959         /* Protocol. */
7960         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7961                  ipv6_m->hdr.proto);
7962         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7963                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
7964         /* Hop limit. */
7965         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7966                  ipv6_m->hdr.hop_limits);
7967         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7968                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
7969         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7970                  !!(ipv6_m->has_frag_ext));
7971         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7972                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
7973 }
7974
7975 /**
7976  * Add IPV6 fragment extension item to matcher and to the value.
7977  *
7978  * @param[in, out] matcher
7979  *   Flow matcher.
7980  * @param[in, out] key
7981  *   Flow matcher value.
7982  * @param[in] item
7983  *   Flow pattern to translate.
7984  * @param[in] inner
7985  *   Item is inner pattern.
7986  */
7987 static void
7988 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
7989                                      const struct rte_flow_item *item,
7990                                      int inner)
7991 {
7992         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
7993         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
7994         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
7995                 .hdr = {
7996                         .next_header = 0xff,
7997                         .frag_data = RTE_BE16(0xffff),
7998                 },
7999         };
8000         void *headers_m;
8001         void *headers_v;
8002
8003         if (inner) {
8004                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8005                                          inner_headers);
8006                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8007         } else {
8008                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8009                                          outer_headers);
8010                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8011         }
8012         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8013         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8014         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8015         if (!ipv6_frag_ext_v)
8016                 return;
8017         if (!ipv6_frag_ext_m)
8018                 ipv6_frag_ext_m = &nic_mask;
8019         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8020                  ipv6_frag_ext_m->hdr.next_header);
8021         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8022                  ipv6_frag_ext_v->hdr.next_header &
8023                  ipv6_frag_ext_m->hdr.next_header);
8024 }
8025
8026 /**
8027  * Add TCP item to matcher and to the value.
8028  *
8029  * @param[in, out] matcher
8030  *   Flow matcher.
8031  * @param[in, out] key
8032  *   Flow matcher value.
8033  * @param[in] item
8034  *   Flow pattern to translate.
8035  * @param[in] inner
8036  *   Item is inner pattern.
8037  */
8038 static void
8039 flow_dv_translate_item_tcp(void *matcher, void *key,
8040                            const struct rte_flow_item *item,
8041                            int inner)
8042 {
8043         const struct rte_flow_item_tcp *tcp_m = item->mask;
8044         const struct rte_flow_item_tcp *tcp_v = item->spec;
8045         void *headers_m;
8046         void *headers_v;
8047
8048         if (inner) {
8049                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8050                                          inner_headers);
8051                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8052         } else {
8053                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8054                                          outer_headers);
8055                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8056         }
8057         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8058         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8059         if (!tcp_v)
8060                 return;
8061         if (!tcp_m)
8062                 tcp_m = &rte_flow_item_tcp_mask;
8063         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8064                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8065         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8066                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8067         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8068                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8069         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8070                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8071         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8072                  tcp_m->hdr.tcp_flags);
8073         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8074                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8075 }
8076
8077 /**
8078  * Add UDP item to matcher and to the value.
8079  *
8080  * @param[in, out] matcher
8081  *   Flow matcher.
8082  * @param[in, out] key
8083  *   Flow matcher value.
8084  * @param[in] item
8085  *   Flow pattern to translate.
8086  * @param[in] inner
8087  *   Item is inner pattern.
8088  */
8089 static void
8090 flow_dv_translate_item_udp(void *matcher, void *key,
8091                            const struct rte_flow_item *item,
8092                            int inner)
8093 {
8094         const struct rte_flow_item_udp *udp_m = item->mask;
8095         const struct rte_flow_item_udp *udp_v = item->spec;
8096         void *headers_m;
8097         void *headers_v;
8098
8099         if (inner) {
8100                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8101                                          inner_headers);
8102                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8103         } else {
8104                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8105                                          outer_headers);
8106                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8107         }
8108         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8109         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8110         if (!udp_v)
8111                 return;
8112         if (!udp_m)
8113                 udp_m = &rte_flow_item_udp_mask;
8114         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8115                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8116         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8117                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8118         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8119                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8120         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8121                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8122 }
8123
8124 /**
8125  * Add GRE optional Key item to matcher and to the value.
8126  *
8127  * @param[in, out] matcher
8128  *   Flow matcher.
8129  * @param[in, out] key
8130  *   Flow matcher value.
8131  * @param[in] item
8132  *   Flow pattern to translate.
8133  * @param[in] inner
8134  *   Item is inner pattern.
8135  */
8136 static void
8137 flow_dv_translate_item_gre_key(void *matcher, void *key,
8138                                    const struct rte_flow_item *item)
8139 {
8140         const rte_be32_t *key_m = item->mask;
8141         const rte_be32_t *key_v = item->spec;
8142         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8143         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8144         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8145
8146         /* GRE K bit must be on and should already be validated */
8147         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8148         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8149         if (!key_v)
8150                 return;
8151         if (!key_m)
8152                 key_m = &gre_key_default_mask;
8153         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8154                  rte_be_to_cpu_32(*key_m) >> 8);
8155         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8156                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8157         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8158                  rte_be_to_cpu_32(*key_m) & 0xFF);
8159         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8160                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8161 }
8162
8163 /**
8164  * Add GRE item to matcher and to the value.
8165  *
8166  * @param[in, out] matcher
8167  *   Flow matcher.
8168  * @param[in, out] key
8169  *   Flow matcher value.
8170  * @param[in] item
8171  *   Flow pattern to translate.
8172  * @param[in] inner
8173  *   Item is inner pattern.
8174  */
8175 static void
8176 flow_dv_translate_item_gre(void *matcher, void *key,
8177                            const struct rte_flow_item *item,
8178                            int inner)
8179 {
8180         const struct rte_flow_item_gre *gre_m = item->mask;
8181         const struct rte_flow_item_gre *gre_v = item->spec;
8182         void *headers_m;
8183         void *headers_v;
8184         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8185         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8186         struct {
8187                 union {
8188                         __extension__
8189                         struct {
8190                                 uint16_t version:3;
8191                                 uint16_t rsvd0:9;
8192                                 uint16_t s_present:1;
8193                                 uint16_t k_present:1;
8194                                 uint16_t rsvd_bit1:1;
8195                                 uint16_t c_present:1;
8196                         };
8197                         uint16_t value;
8198                 };
8199         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8200
8201         if (inner) {
8202                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8203                                          inner_headers);
8204                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8205         } else {
8206                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8207                                          outer_headers);
8208                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8209         }
8210         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8211         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8212         if (!gre_v)
8213                 return;
8214         if (!gre_m)
8215                 gre_m = &rte_flow_item_gre_mask;
8216         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8217                  rte_be_to_cpu_16(gre_m->protocol));
8218         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8219                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8220         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8221         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8222         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8223                  gre_crks_rsvd0_ver_m.c_present);
8224         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8225                  gre_crks_rsvd0_ver_v.c_present &
8226                  gre_crks_rsvd0_ver_m.c_present);
8227         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8228                  gre_crks_rsvd0_ver_m.k_present);
8229         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8230                  gre_crks_rsvd0_ver_v.k_present &
8231                  gre_crks_rsvd0_ver_m.k_present);
8232         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8233                  gre_crks_rsvd0_ver_m.s_present);
8234         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8235                  gre_crks_rsvd0_ver_v.s_present &
8236                  gre_crks_rsvd0_ver_m.s_present);
8237 }
8238
8239 /**
8240  * Add NVGRE item to matcher and to the value.
8241  *
8242  * @param[in, out] matcher
8243  *   Flow matcher.
8244  * @param[in, out] key
8245  *   Flow matcher value.
8246  * @param[in] item
8247  *   Flow pattern to translate.
8248  * @param[in] inner
8249  *   Item is inner pattern.
8250  */
8251 static void
8252 flow_dv_translate_item_nvgre(void *matcher, void *key,
8253                              const struct rte_flow_item *item,
8254                              int inner)
8255 {
8256         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8257         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8258         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8259         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8260         const char *tni_flow_id_m;
8261         const char *tni_flow_id_v;
8262         char *gre_key_m;
8263         char *gre_key_v;
8264         int size;
8265         int i;
8266
8267         /* For NVGRE, GRE header fields must be set with defined values. */
8268         const struct rte_flow_item_gre gre_spec = {
8269                 .c_rsvd0_ver = RTE_BE16(0x2000),
8270                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8271         };
8272         const struct rte_flow_item_gre gre_mask = {
8273                 .c_rsvd0_ver = RTE_BE16(0xB000),
8274                 .protocol = RTE_BE16(UINT16_MAX),
8275         };
8276         const struct rte_flow_item gre_item = {
8277                 .spec = &gre_spec,
8278                 .mask = &gre_mask,
8279                 .last = NULL,
8280         };
8281         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8282         if (!nvgre_v)
8283                 return;
8284         if (!nvgre_m)
8285                 nvgre_m = &rte_flow_item_nvgre_mask;
8286         tni_flow_id_m = (const char *)nvgre_m->tni;
8287         tni_flow_id_v = (const char *)nvgre_v->tni;
8288         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8289         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8290         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8291         memcpy(gre_key_m, tni_flow_id_m, size);
8292         for (i = 0; i < size; ++i)
8293                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8294 }
8295
8296 /**
8297  * Add VXLAN item to matcher and to the value.
8298  *
8299  * @param[in, out] matcher
8300  *   Flow matcher.
8301  * @param[in, out] key
8302  *   Flow matcher value.
8303  * @param[in] item
8304  *   Flow pattern to translate.
8305  * @param[in] inner
8306  *   Item is inner pattern.
8307  */
8308 static void
8309 flow_dv_translate_item_vxlan(void *matcher, void *key,
8310                              const struct rte_flow_item *item,
8311                              int inner)
8312 {
8313         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8314         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8315         void *headers_m;
8316         void *headers_v;
8317         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8318         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8319         char *vni_m;
8320         char *vni_v;
8321         uint16_t dport;
8322         int size;
8323         int i;
8324
8325         if (inner) {
8326                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8327                                          inner_headers);
8328                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8329         } else {
8330                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8331                                          outer_headers);
8332                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8333         }
8334         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8335                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8336         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8337                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8338                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8339         }
8340         if (!vxlan_v)
8341                 return;
8342         if (!vxlan_m)
8343                 vxlan_m = &rte_flow_item_vxlan_mask;
8344         size = sizeof(vxlan_m->vni);
8345         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8346         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8347         memcpy(vni_m, vxlan_m->vni, size);
8348         for (i = 0; i < size; ++i)
8349                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8350 }
8351
8352 /**
8353  * Add VXLAN-GPE item to matcher and to the value.
8354  *
8355  * @param[in, out] matcher
8356  *   Flow matcher.
8357  * @param[in, out] key
8358  *   Flow matcher value.
8359  * @param[in] item
8360  *   Flow pattern to translate.
8361  * @param[in] inner
8362  *   Item is inner pattern.
8363  */
8364
8365 static void
8366 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8367                                  const struct rte_flow_item *item, int inner)
8368 {
8369         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8370         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8371         void *headers_m;
8372         void *headers_v;
8373         void *misc_m =
8374                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8375         void *misc_v =
8376                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8377         char *vni_m;
8378         char *vni_v;
8379         uint16_t dport;
8380         int size;
8381         int i;
8382         uint8_t flags_m = 0xff;
8383         uint8_t flags_v = 0xc;
8384
8385         if (inner) {
8386                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8387                                          inner_headers);
8388                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8389         } else {
8390                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8391                                          outer_headers);
8392                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8393         }
8394         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8395                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8396         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8397                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8398                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8399         }
8400         if (!vxlan_v)
8401                 return;
8402         if (!vxlan_m)
8403                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8404         size = sizeof(vxlan_m->vni);
8405         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8406         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8407         memcpy(vni_m, vxlan_m->vni, size);
8408         for (i = 0; i < size; ++i)
8409                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8410         if (vxlan_m->flags) {
8411                 flags_m = vxlan_m->flags;
8412                 flags_v = vxlan_v->flags;
8413         }
8414         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8415         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8416         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8417                  vxlan_m->protocol);
8418         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8419                  vxlan_v->protocol);
8420 }
8421
8422 /**
8423  * Add Geneve item to matcher and to the value.
8424  *
8425  * @param[in, out] matcher
8426  *   Flow matcher.
8427  * @param[in, out] key
8428  *   Flow matcher value.
8429  * @param[in] item
8430  *   Flow pattern to translate.
8431  * @param[in] inner
8432  *   Item is inner pattern.
8433  */
8434
8435 static void
8436 flow_dv_translate_item_geneve(void *matcher, void *key,
8437                               const struct rte_flow_item *item, int inner)
8438 {
8439         const struct rte_flow_item_geneve *geneve_m = item->mask;
8440         const struct rte_flow_item_geneve *geneve_v = item->spec;
8441         void *headers_m;
8442         void *headers_v;
8443         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8444         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8445         uint16_t dport;
8446         uint16_t gbhdr_m;
8447         uint16_t gbhdr_v;
8448         char *vni_m;
8449         char *vni_v;
8450         size_t size, i;
8451
8452         if (inner) {
8453                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8454                                          inner_headers);
8455                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8456         } else {
8457                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8458                                          outer_headers);
8459                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8460         }
8461         dport = MLX5_UDP_PORT_GENEVE;
8462         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8463                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8464                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8465         }
8466         if (!geneve_v)
8467                 return;
8468         if (!geneve_m)
8469                 geneve_m = &rte_flow_item_geneve_mask;
8470         size = sizeof(geneve_m->vni);
8471         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8472         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8473         memcpy(vni_m, geneve_m->vni, size);
8474         for (i = 0; i < size; ++i)
8475                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8476         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8477                  rte_be_to_cpu_16(geneve_m->protocol));
8478         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8479                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8480         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8481         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8482         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8483                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8484         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8485                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8486         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8487                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8488         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8489                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8490                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8491 }
8492
8493 /**
8494  * Create Geneve TLV option resource.
8495  *
8496  * @param dev[in, out]
8497  *   Pointer to rte_eth_dev structure.
8498  * @param[in, out] tag_be24
8499  *   Tag value in big endian then R-shift 8.
8500  * @parm[in, out] dev_flow
8501  *   Pointer to the dev_flow.
8502  * @param[out] error
8503  *   pointer to error structure.
8504  *
8505  * @return
8506  *   0 on success otherwise -errno and errno is set.
8507  */
8508
8509 int
8510 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8511                                              const struct rte_flow_item *item,
8512                                              struct rte_flow_error *error)
8513 {
8514         struct mlx5_priv *priv = dev->data->dev_private;
8515         struct mlx5_dev_ctx_shared *sh = priv->sh;
8516         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8517                         sh->geneve_tlv_option_resource;
8518         struct mlx5_devx_obj *obj;
8519         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8520         int ret = 0;
8521
8522         if (!geneve_opt_v)
8523                 return -1;
8524         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8525         if (geneve_opt_resource != NULL) {
8526                 if (geneve_opt_resource->option_class ==
8527                         geneve_opt_v->option_class &&
8528                         geneve_opt_resource->option_type ==
8529                         geneve_opt_v->option_type &&
8530                         geneve_opt_resource->length ==
8531                         geneve_opt_v->option_len) {
8532                         /* We already have GENVE TLV option obj allocated. */
8533                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8534                                            __ATOMIC_RELAXED);
8535                 } else {
8536                         ret = rte_flow_error_set(error, ENOMEM,
8537                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8538                                 "Only one GENEVE TLV option supported");
8539                         goto exit;
8540                 }
8541         } else {
8542                 /* Create a GENEVE TLV object and resource. */
8543                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8544                                 geneve_opt_v->option_class,
8545                                 geneve_opt_v->option_type,
8546                                 geneve_opt_v->option_len);
8547                 if (!obj) {
8548                         ret = rte_flow_error_set(error, ENODATA,
8549                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8550                                 "Failed to create GENEVE TLV Devx object");
8551                         goto exit;
8552                 }
8553                 sh->geneve_tlv_option_resource =
8554                                 mlx5_malloc(MLX5_MEM_ZERO,
8555                                                 sizeof(*geneve_opt_resource),
8556                                                 0, SOCKET_ID_ANY);
8557                 if (!sh->geneve_tlv_option_resource) {
8558                         claim_zero(mlx5_devx_cmd_destroy(obj));
8559                         ret = rte_flow_error_set(error, ENOMEM,
8560                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8561                                 "GENEVE TLV object memory allocation failed");
8562                         goto exit;
8563                 }
8564                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8565                 geneve_opt_resource->obj = obj;
8566                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8567                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8568                 geneve_opt_resource->length = geneve_opt_v->option_len;
8569                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8570                                 __ATOMIC_RELAXED);
8571         }
8572 exit:
8573         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8574         return ret;
8575 }
8576
8577 /**
8578  * Add Geneve TLV option item to matcher.
8579  *
8580  * @param[in, out] dev
8581  *   Pointer to rte_eth_dev structure.
8582  * @param[in, out] matcher
8583  *   Flow matcher.
8584  * @param[in, out] key
8585  *   Flow matcher value.
8586  * @param[in] item
8587  *   Flow pattern to translate.
8588  * @param[out] error
8589  *   Pointer to error structure.
8590  */
8591 static int
8592 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8593                                   void *key, const struct rte_flow_item *item,
8594                                   struct rte_flow_error *error)
8595 {
8596         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8597         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8598         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8599         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8600         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8601                         misc_parameters_3);
8602         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8603         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8604         int ret = 0;
8605
8606         if (!geneve_opt_v)
8607                 return -1;
8608         if (!geneve_opt_m)
8609                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8610         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8611                                                            error);
8612         if (ret) {
8613                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8614                 return ret;
8615         }
8616         /*
8617          * Set the option length in GENEVE header if not requested.
8618          * The GENEVE TLV option length is expressed by the option length field
8619          * in the GENEVE header.
8620          * If the option length was not requested but the GENEVE TLV option item
8621          * is present we set the option length field implicitly.
8622          */
8623         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8624                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8625                          MLX5_GENEVE_OPTLEN_MASK);
8626                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8627                          geneve_opt_v->option_len + 1);
8628         }
8629         /* Set the data. */
8630         if (geneve_opt_v->data) {
8631                 memcpy(&opt_data_key, geneve_opt_v->data,
8632                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8633                                 sizeof(opt_data_key)));
8634                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8635                                 sizeof(opt_data_key));
8636                 memcpy(&opt_data_mask, geneve_opt_m->data,
8637                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8638                                 sizeof(opt_data_mask)));
8639                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8640                                 sizeof(opt_data_mask));
8641                 MLX5_SET(fte_match_set_misc3, misc3_m,
8642                                 geneve_tlv_option_0_data,
8643                                 rte_be_to_cpu_32(opt_data_mask));
8644                 MLX5_SET(fte_match_set_misc3, misc3_v,
8645                                 geneve_tlv_option_0_data,
8646                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8647         }
8648         return ret;
8649 }
8650
8651 /**
8652  * Add MPLS item to matcher and to the value.
8653  *
8654  * @param[in, out] matcher
8655  *   Flow matcher.
8656  * @param[in, out] key
8657  *   Flow matcher value.
8658  * @param[in] item
8659  *   Flow pattern to translate.
8660  * @param[in] prev_layer
8661  *   The protocol layer indicated in previous item.
8662  * @param[in] inner
8663  *   Item is inner pattern.
8664  */
8665 static void
8666 flow_dv_translate_item_mpls(void *matcher, void *key,
8667                             const struct rte_flow_item *item,
8668                             uint64_t prev_layer,
8669                             int inner)
8670 {
8671         const uint32_t *in_mpls_m = item->mask;
8672         const uint32_t *in_mpls_v = item->spec;
8673         uint32_t *out_mpls_m = 0;
8674         uint32_t *out_mpls_v = 0;
8675         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8676         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8677         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8678                                      misc_parameters_2);
8679         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8680         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8681         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8682
8683         switch (prev_layer) {
8684         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8685                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
8686                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8687                          MLX5_UDP_PORT_MPLS);
8688                 break;
8689         case MLX5_FLOW_LAYER_GRE:
8690                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
8691                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8692                          RTE_ETHER_TYPE_MPLS);
8693                 break;
8694         default:
8695                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8696                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8697                          IPPROTO_MPLS);
8698                 break;
8699         }
8700         if (!in_mpls_v)
8701                 return;
8702         if (!in_mpls_m)
8703                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
8704         switch (prev_layer) {
8705         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8706                 out_mpls_m =
8707                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8708                                                  outer_first_mpls_over_udp);
8709                 out_mpls_v =
8710                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8711                                                  outer_first_mpls_over_udp);
8712                 break;
8713         case MLX5_FLOW_LAYER_GRE:
8714                 out_mpls_m =
8715                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8716                                                  outer_first_mpls_over_gre);
8717                 out_mpls_v =
8718                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8719                                                  outer_first_mpls_over_gre);
8720                 break;
8721         default:
8722                 /* Inner MPLS not over GRE is not supported. */
8723                 if (!inner) {
8724                         out_mpls_m =
8725                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8726                                                          misc2_m,
8727                                                          outer_first_mpls);
8728                         out_mpls_v =
8729                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8730                                                          misc2_v,
8731                                                          outer_first_mpls);
8732                 }
8733                 break;
8734         }
8735         if (out_mpls_m && out_mpls_v) {
8736                 *out_mpls_m = *in_mpls_m;
8737                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
8738         }
8739 }
8740
8741 /**
8742  * Add metadata register item to matcher
8743  *
8744  * @param[in, out] matcher
8745  *   Flow matcher.
8746  * @param[in, out] key
8747  *   Flow matcher value.
8748  * @param[in] reg_type
8749  *   Type of device metadata register
8750  * @param[in] value
8751  *   Register value
8752  * @param[in] mask
8753  *   Register mask
8754  */
8755 static void
8756 flow_dv_match_meta_reg(void *matcher, void *key,
8757                        enum modify_reg reg_type,
8758                        uint32_t data, uint32_t mask)
8759 {
8760         void *misc2_m =
8761                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
8762         void *misc2_v =
8763                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8764         uint32_t temp;
8765
8766         data &= mask;
8767         switch (reg_type) {
8768         case REG_A:
8769                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
8770                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
8771                 break;
8772         case REG_B:
8773                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
8774                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
8775                 break;
8776         case REG_C_0:
8777                 /*
8778                  * The metadata register C0 field might be divided into
8779                  * source vport index and META item value, we should set
8780                  * this field according to specified mask, not as whole one.
8781                  */
8782                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
8783                 temp |= mask;
8784                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
8785                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
8786                 temp &= ~mask;
8787                 temp |= data;
8788                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
8789                 break;
8790         case REG_C_1:
8791                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
8792                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
8793                 break;
8794         case REG_C_2:
8795                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
8796                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
8797                 break;
8798         case REG_C_3:
8799                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
8800                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
8801                 break;
8802         case REG_C_4:
8803                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
8804                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
8805                 break;
8806         case REG_C_5:
8807                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
8808                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
8809                 break;
8810         case REG_C_6:
8811                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
8812                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
8813                 break;
8814         case REG_C_7:
8815                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
8816                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
8817                 break;
8818         default:
8819                 MLX5_ASSERT(false);
8820                 break;
8821         }
8822 }
8823
8824 /**
8825  * Add MARK item to matcher
8826  *
8827  * @param[in] dev
8828  *   The device to configure through.
8829  * @param[in, out] matcher
8830  *   Flow matcher.
8831  * @param[in, out] key
8832  *   Flow matcher value.
8833  * @param[in] item
8834  *   Flow pattern to translate.
8835  */
8836 static void
8837 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
8838                             void *matcher, void *key,
8839                             const struct rte_flow_item *item)
8840 {
8841         struct mlx5_priv *priv = dev->data->dev_private;
8842         const struct rte_flow_item_mark *mark;
8843         uint32_t value;
8844         uint32_t mask;
8845
8846         mark = item->mask ? (const void *)item->mask :
8847                             &rte_flow_item_mark_mask;
8848         mask = mark->id & priv->sh->dv_mark_mask;
8849         mark = (const void *)item->spec;
8850         MLX5_ASSERT(mark);
8851         value = mark->id & priv->sh->dv_mark_mask & mask;
8852         if (mask) {
8853                 enum modify_reg reg;
8854
8855                 /* Get the metadata register index for the mark. */
8856                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
8857                 MLX5_ASSERT(reg > 0);
8858                 if (reg == REG_C_0) {
8859                         struct mlx5_priv *priv = dev->data->dev_private;
8860                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8861                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8862
8863                         mask &= msk_c0;
8864                         mask <<= shl_c0;
8865                         value <<= shl_c0;
8866                 }
8867                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8868         }
8869 }
8870
8871 /**
8872  * Add META item to matcher
8873  *
8874  * @param[in] dev
8875  *   The devich to configure through.
8876  * @param[in, out] matcher
8877  *   Flow matcher.
8878  * @param[in, out] key
8879  *   Flow matcher value.
8880  * @param[in] attr
8881  *   Attributes of flow that includes this item.
8882  * @param[in] item
8883  *   Flow pattern to translate.
8884  */
8885 static void
8886 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
8887                             void *matcher, void *key,
8888                             const struct rte_flow_attr *attr,
8889                             const struct rte_flow_item *item)
8890 {
8891         const struct rte_flow_item_meta *meta_m;
8892         const struct rte_flow_item_meta *meta_v;
8893
8894         meta_m = (const void *)item->mask;
8895         if (!meta_m)
8896                 meta_m = &rte_flow_item_meta_mask;
8897         meta_v = (const void *)item->spec;
8898         if (meta_v) {
8899                 int reg;
8900                 uint32_t value = meta_v->data;
8901                 uint32_t mask = meta_m->data;
8902
8903                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
8904                 if (reg < 0)
8905                         return;
8906                 MLX5_ASSERT(reg != REG_NON);
8907                 /*
8908                  * In datapath code there is no endianness
8909                  * coversions for perfromance reasons, all
8910                  * pattern conversions are done in rte_flow.
8911                  */
8912                 value = rte_cpu_to_be_32(value);
8913                 mask = rte_cpu_to_be_32(mask);
8914                 if (reg == REG_C_0) {
8915                         struct mlx5_priv *priv = dev->data->dev_private;
8916                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8917                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8918 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
8919                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
8920
8921                         value >>= shr_c0;
8922                         mask >>= shr_c0;
8923 #endif
8924                         value <<= shl_c0;
8925                         mask <<= shl_c0;
8926                         MLX5_ASSERT(msk_c0);
8927                         MLX5_ASSERT(!(~msk_c0 & mask));
8928                 }
8929                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8930         }
8931 }
8932
8933 /**
8934  * Add vport metadata Reg C0 item to matcher
8935  *
8936  * @param[in, out] matcher
8937  *   Flow matcher.
8938  * @param[in, out] key
8939  *   Flow matcher value.
8940  * @param[in] reg
8941  *   Flow pattern to translate.
8942  */
8943 static void
8944 flow_dv_translate_item_meta_vport(void *matcher, void *key,
8945                                   uint32_t value, uint32_t mask)
8946 {
8947         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
8948 }
8949
8950 /**
8951  * Add tag item to matcher
8952  *
8953  * @param[in] dev
8954  *   The devich to configure through.
8955  * @param[in, out] matcher
8956  *   Flow matcher.
8957  * @param[in, out] key
8958  *   Flow matcher value.
8959  * @param[in] item
8960  *   Flow pattern to translate.
8961  */
8962 static void
8963 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
8964                                 void *matcher, void *key,
8965                                 const struct rte_flow_item *item)
8966 {
8967         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
8968         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
8969         uint32_t mask, value;
8970
8971         MLX5_ASSERT(tag_v);
8972         value = tag_v->data;
8973         mask = tag_m ? tag_m->data : UINT32_MAX;
8974         if (tag_v->id == REG_C_0) {
8975                 struct mlx5_priv *priv = dev->data->dev_private;
8976                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8977                 uint32_t shl_c0 = rte_bsf32(msk_c0);
8978
8979                 mask &= msk_c0;
8980                 mask <<= shl_c0;
8981                 value <<= shl_c0;
8982         }
8983         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
8984 }
8985
8986 /**
8987  * Add TAG item to matcher
8988  *
8989  * @param[in] dev
8990  *   The devich to configure through.
8991  * @param[in, out] matcher
8992  *   Flow matcher.
8993  * @param[in, out] key
8994  *   Flow matcher value.
8995  * @param[in] item
8996  *   Flow pattern to translate.
8997  */
8998 static void
8999 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9000                            void *matcher, void *key,
9001                            const struct rte_flow_item *item)
9002 {
9003         const struct rte_flow_item_tag *tag_v = item->spec;
9004         const struct rte_flow_item_tag *tag_m = item->mask;
9005         enum modify_reg reg;
9006
9007         MLX5_ASSERT(tag_v);
9008         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9009         /* Get the metadata register index for the tag. */
9010         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9011         MLX5_ASSERT(reg > 0);
9012         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9013 }
9014
9015 /**
9016  * Add source vport match to the specified matcher.
9017  *
9018  * @param[in, out] matcher
9019  *   Flow matcher.
9020  * @param[in, out] key
9021  *   Flow matcher value.
9022  * @param[in] port
9023  *   Source vport value to match
9024  * @param[in] mask
9025  *   Mask
9026  */
9027 static void
9028 flow_dv_translate_item_source_vport(void *matcher, void *key,
9029                                     int16_t port, uint16_t mask)
9030 {
9031         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9032         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9033
9034         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9035         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9036 }
9037
9038 /**
9039  * Translate port-id item to eswitch match on  port-id.
9040  *
9041  * @param[in] dev
9042  *   The devich to configure through.
9043  * @param[in, out] matcher
9044  *   Flow matcher.
9045  * @param[in, out] key
9046  *   Flow matcher value.
9047  * @param[in] item
9048  *   Flow pattern to translate.
9049  * @param[in]
9050  *   Flow attributes.
9051  *
9052  * @return
9053  *   0 on success, a negative errno value otherwise.
9054  */
9055 static int
9056 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9057                                void *key, const struct rte_flow_item *item,
9058                                const struct rte_flow_attr *attr)
9059 {
9060         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9061         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9062         struct mlx5_priv *priv;
9063         uint16_t mask, id;
9064
9065         mask = pid_m ? pid_m->id : 0xffff;
9066         id = pid_v ? pid_v->id : dev->data->port_id;
9067         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9068         if (!priv)
9069                 return -rte_errno;
9070         /*
9071          * Translate to vport field or to metadata, depending on mode.
9072          * Kernel can use either misc.source_port or half of C0 metadata
9073          * register.
9074          */
9075         if (priv->vport_meta_mask) {
9076                 /*
9077                  * Provide the hint for SW steering library
9078                  * to insert the flow into ingress domain and
9079                  * save the extra vport match.
9080                  */
9081                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9082                     priv->pf_bond < 0 && attr->transfer)
9083                         flow_dv_translate_item_source_vport
9084                                 (matcher, key, priv->vport_id, mask);
9085                 /*
9086                  * We should always set the vport metadata register,
9087                  * otherwise the SW steering library can drop
9088                  * the rule if wire vport metadata value is not zero,
9089                  * it depends on kernel configuration.
9090                  */
9091                 flow_dv_translate_item_meta_vport(matcher, key,
9092                                                   priv->vport_meta_tag,
9093                                                   priv->vport_meta_mask);
9094         } else {
9095                 flow_dv_translate_item_source_vport(matcher, key,
9096                                                     priv->vport_id, mask);
9097         }
9098         return 0;
9099 }
9100
9101 /**
9102  * Add ICMP6 item to matcher and to the value.
9103  *
9104  * @param[in, out] matcher
9105  *   Flow matcher.
9106  * @param[in, out] key
9107  *   Flow matcher value.
9108  * @param[in] item
9109  *   Flow pattern to translate.
9110  * @param[in] inner
9111  *   Item is inner pattern.
9112  */
9113 static void
9114 flow_dv_translate_item_icmp6(void *matcher, void *key,
9115                               const struct rte_flow_item *item,
9116                               int inner)
9117 {
9118         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9119         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9120         void *headers_m;
9121         void *headers_v;
9122         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9123                                      misc_parameters_3);
9124         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9125         if (inner) {
9126                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9127                                          inner_headers);
9128                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9129         } else {
9130                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9131                                          outer_headers);
9132                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9133         }
9134         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9135         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9136         if (!icmp6_v)
9137                 return;
9138         if (!icmp6_m)
9139                 icmp6_m = &rte_flow_item_icmp6_mask;
9140         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9141         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9142                  icmp6_v->type & icmp6_m->type);
9143         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9144         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9145                  icmp6_v->code & icmp6_m->code);
9146 }
9147
9148 /**
9149  * Add ICMP item to matcher and to the value.
9150  *
9151  * @param[in, out] matcher
9152  *   Flow matcher.
9153  * @param[in, out] key
9154  *   Flow matcher value.
9155  * @param[in] item
9156  *   Flow pattern to translate.
9157  * @param[in] inner
9158  *   Item is inner pattern.
9159  */
9160 static void
9161 flow_dv_translate_item_icmp(void *matcher, void *key,
9162                             const struct rte_flow_item *item,
9163                             int inner)
9164 {
9165         const struct rte_flow_item_icmp *icmp_m = item->mask;
9166         const struct rte_flow_item_icmp *icmp_v = item->spec;
9167         uint32_t icmp_header_data_m = 0;
9168         uint32_t icmp_header_data_v = 0;
9169         void *headers_m;
9170         void *headers_v;
9171         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9172                                      misc_parameters_3);
9173         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9174         if (inner) {
9175                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9176                                          inner_headers);
9177                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9178         } else {
9179                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9180                                          outer_headers);
9181                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9182         }
9183         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9184         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9185         if (!icmp_v)
9186                 return;
9187         if (!icmp_m)
9188                 icmp_m = &rte_flow_item_icmp_mask;
9189         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9190                  icmp_m->hdr.icmp_type);
9191         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9192                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9193         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9194                  icmp_m->hdr.icmp_code);
9195         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9196                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9197         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9198         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9199         if (icmp_header_data_m) {
9200                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9201                 icmp_header_data_v |=
9202                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9203                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9204                          icmp_header_data_m);
9205                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9206                          icmp_header_data_v & icmp_header_data_m);
9207         }
9208 }
9209
9210 /**
9211  * Add GTP item to matcher and to the value.
9212  *
9213  * @param[in, out] matcher
9214  *   Flow matcher.
9215  * @param[in, out] key
9216  *   Flow matcher value.
9217  * @param[in] item
9218  *   Flow pattern to translate.
9219  * @param[in] inner
9220  *   Item is inner pattern.
9221  */
9222 static void
9223 flow_dv_translate_item_gtp(void *matcher, void *key,
9224                            const struct rte_flow_item *item, int inner)
9225 {
9226         const struct rte_flow_item_gtp *gtp_m = item->mask;
9227         const struct rte_flow_item_gtp *gtp_v = item->spec;
9228         void *headers_m;
9229         void *headers_v;
9230         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9231                                      misc_parameters_3);
9232         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9233         uint16_t dport = RTE_GTPU_UDP_PORT;
9234
9235         if (inner) {
9236                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9237                                          inner_headers);
9238                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9239         } else {
9240                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9241                                          outer_headers);
9242                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9243         }
9244         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9245                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9246                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9247         }
9248         if (!gtp_v)
9249                 return;
9250         if (!gtp_m)
9251                 gtp_m = &rte_flow_item_gtp_mask;
9252         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9253                  gtp_m->v_pt_rsv_flags);
9254         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9255                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9256         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9257         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9258                  gtp_v->msg_type & gtp_m->msg_type);
9259         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9260                  rte_be_to_cpu_32(gtp_m->teid));
9261         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9262                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9263 }
9264
9265 /**
9266  * Add GTP PSC item to matcher.
9267  *
9268  * @param[in, out] matcher
9269  *   Flow matcher.
9270  * @param[in, out] key
9271  *   Flow matcher value.
9272  * @param[in] item
9273  *   Flow pattern to translate.
9274  */
9275 static int
9276 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9277                                const struct rte_flow_item *item)
9278 {
9279         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9280         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9281         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9282                         misc_parameters_3);
9283         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9284         union {
9285                 uint32_t w32;
9286                 struct {
9287                         uint16_t seq_num;
9288                         uint8_t npdu_num;
9289                         uint8_t next_ext_header_type;
9290                 };
9291         } dw_2;
9292         uint8_t gtp_flags;
9293
9294         /* Always set E-flag match on one, regardless of GTP item settings. */
9295         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9296         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9297         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9298         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9299         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9300         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9301         /*Set next extension header type. */
9302         dw_2.seq_num = 0;
9303         dw_2.npdu_num = 0;
9304         dw_2.next_ext_header_type = 0xff;
9305         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9306                  rte_cpu_to_be_32(dw_2.w32));
9307         dw_2.seq_num = 0;
9308         dw_2.npdu_num = 0;
9309         dw_2.next_ext_header_type = 0x85;
9310         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9311                  rte_cpu_to_be_32(dw_2.w32));
9312         if (gtp_psc_v) {
9313                 union {
9314                         uint32_t w32;
9315                         struct {
9316                                 uint8_t len;
9317                                 uint8_t type_flags;
9318                                 uint8_t qfi;
9319                                 uint8_t reserved;
9320                         };
9321                 } dw_0;
9322
9323                 /*Set extension header PDU type and Qos. */
9324                 if (!gtp_psc_m)
9325                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9326                 dw_0.w32 = 0;
9327                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9328                 dw_0.qfi = gtp_psc_m->qfi;
9329                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9330                          rte_cpu_to_be_32(dw_0.w32));
9331                 dw_0.w32 = 0;
9332                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9333                                                         gtp_psc_m->pdu_type);
9334                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9335                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9336                          rte_cpu_to_be_32(dw_0.w32));
9337         }
9338         return 0;
9339 }
9340
9341 /**
9342  * Add eCPRI item to matcher and to the value.
9343  *
9344  * @param[in] dev
9345  *   The devich to configure through.
9346  * @param[in, out] matcher
9347  *   Flow matcher.
9348  * @param[in, out] key
9349  *   Flow matcher value.
9350  * @param[in] item
9351  *   Flow pattern to translate.
9352  * @param[in] samples
9353  *   Sample IDs to be used in the matching.
9354  */
9355 static void
9356 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9357                              void *key, const struct rte_flow_item *item)
9358 {
9359         struct mlx5_priv *priv = dev->data->dev_private;
9360         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9361         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9362         struct rte_ecpri_common_hdr common;
9363         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9364                                      misc_parameters_4);
9365         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9366         uint32_t *samples;
9367         void *dw_m;
9368         void *dw_v;
9369
9370         if (!ecpri_v)
9371                 return;
9372         if (!ecpri_m)
9373                 ecpri_m = &rte_flow_item_ecpri_mask;
9374         /*
9375          * Maximal four DW samples are supported in a single matching now.
9376          * Two are used now for a eCPRI matching:
9377          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9378          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9379          *    if any.
9380          */
9381         if (!ecpri_m->hdr.common.u32)
9382                 return;
9383         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9384         /* Need to take the whole DW as the mask to fill the entry. */
9385         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9386                             prog_sample_field_value_0);
9387         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9388                             prog_sample_field_value_0);
9389         /* Already big endian (network order) in the header. */
9390         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9391         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9392         /* Sample#0, used for matching type, offset 0. */
9393         MLX5_SET(fte_match_set_misc4, misc4_m,
9394                  prog_sample_field_id_0, samples[0]);
9395         /* It makes no sense to set the sample ID in the mask field. */
9396         MLX5_SET(fte_match_set_misc4, misc4_v,
9397                  prog_sample_field_id_0, samples[0]);
9398         /*
9399          * Checking if message body part needs to be matched.
9400          * Some wildcard rules only matching type field should be supported.
9401          */
9402         if (ecpri_m->hdr.dummy[0]) {
9403                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9404                 switch (common.type) {
9405                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9406                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9407                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9408                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9409                                             prog_sample_field_value_1);
9410                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9411                                             prog_sample_field_value_1);
9412                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9413                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9414                                             ecpri_m->hdr.dummy[0];
9415                         /* Sample#1, to match message body, offset 4. */
9416                         MLX5_SET(fte_match_set_misc4, misc4_m,
9417                                  prog_sample_field_id_1, samples[1]);
9418                         MLX5_SET(fte_match_set_misc4, misc4_v,
9419                                  prog_sample_field_id_1, samples[1]);
9420                         break;
9421                 default:
9422                         /* Others, do not match any sample ID. */
9423                         break;
9424                 }
9425         }
9426 }
9427
9428 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9429
9430 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9431         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9432                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9433
9434 /**
9435  * Calculate flow matcher enable bitmap.
9436  *
9437  * @param match_criteria
9438  *   Pointer to flow matcher criteria.
9439  *
9440  * @return
9441  *   Bitmap of enabled fields.
9442  */
9443 static uint8_t
9444 flow_dv_matcher_enable(uint32_t *match_criteria)
9445 {
9446         uint8_t match_criteria_enable;
9447
9448         match_criteria_enable =
9449                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9450                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9451         match_criteria_enable |=
9452                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9453                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9454         match_criteria_enable |=
9455                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9456                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9457         match_criteria_enable |=
9458                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9459                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9460         match_criteria_enable |=
9461                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9462                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9463         match_criteria_enable |=
9464                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9465                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9466         return match_criteria_enable;
9467 }
9468
9469 struct mlx5_hlist_entry *
9470 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9471 {
9472         struct mlx5_dev_ctx_shared *sh = list->ctx;
9473         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9474         struct rte_eth_dev *dev = ctx->dev;
9475         struct mlx5_flow_tbl_data_entry *tbl_data;
9476         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9477         struct rte_flow_error *error = ctx->error;
9478         union mlx5_flow_tbl_key key = { .v64 = key64 };
9479         struct mlx5_flow_tbl_resource *tbl;
9480         void *domain;
9481         uint32_t idx = 0;
9482         int ret;
9483
9484         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9485         if (!tbl_data) {
9486                 rte_flow_error_set(error, ENOMEM,
9487                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9488                                    NULL,
9489                                    "cannot allocate flow table data entry");
9490                 return NULL;
9491         }
9492         tbl_data->idx = idx;
9493         tbl_data->tunnel = tt_prm->tunnel;
9494         tbl_data->group_id = tt_prm->group_id;
9495         tbl_data->external = !!tt_prm->external;
9496         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9497         tbl_data->is_egress = !!key.is_egress;
9498         tbl_data->is_transfer = !!key.is_fdb;
9499         tbl_data->dummy = !!key.dummy;
9500         tbl_data->level = key.level;
9501         tbl_data->id = key.id;
9502         tbl = &tbl_data->tbl;
9503         if (key.dummy)
9504                 return &tbl_data->entry;
9505         if (key.is_fdb)
9506                 domain = sh->fdb_domain;
9507         else if (key.is_egress)
9508                 domain = sh->tx_domain;
9509         else
9510                 domain = sh->rx_domain;
9511         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9512         if (ret) {
9513                 rte_flow_error_set(error, ENOMEM,
9514                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9515                                    NULL, "cannot create flow table object");
9516                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9517                 return NULL;
9518         }
9519         if (key.level != 0) {
9520                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9521                                         (tbl->obj, &tbl_data->jump.action);
9522                 if (ret) {
9523                         rte_flow_error_set(error, ENOMEM,
9524                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9525                                            NULL,
9526                                            "cannot create flow jump action");
9527                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9528                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9529                         return NULL;
9530                 }
9531         }
9532         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9533               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9534               key.level, key.id);
9535         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9536                              flow_dv_matcher_create_cb,
9537                              flow_dv_matcher_match_cb,
9538                              flow_dv_matcher_remove_cb);
9539         return &tbl_data->entry;
9540 }
9541
9542 int
9543 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9544                      struct mlx5_hlist_entry *entry, uint64_t key64,
9545                      void *cb_ctx __rte_unused)
9546 {
9547         struct mlx5_flow_tbl_data_entry *tbl_data =
9548                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9549         union mlx5_flow_tbl_key key = { .v64 = key64 };
9550
9551         return tbl_data->level != key.level ||
9552                tbl_data->id != key.id ||
9553                tbl_data->dummy != key.dummy ||
9554                tbl_data->is_transfer != !!key.is_fdb ||
9555                tbl_data->is_egress != !!key.is_egress;
9556 }
9557
9558 /**
9559  * Get a flow table.
9560  *
9561  * @param[in, out] dev
9562  *   Pointer to rte_eth_dev structure.
9563  * @param[in] table_level
9564  *   Table level to use.
9565  * @param[in] egress
9566  *   Direction of the table.
9567  * @param[in] transfer
9568  *   E-Switch or NIC flow.
9569  * @param[in] dummy
9570  *   Dummy entry for dv API.
9571  * @param[in] table_id
9572  *   Table id to use.
9573  * @param[out] error
9574  *   pointer to error structure.
9575  *
9576  * @return
9577  *   Returns tables resource based on the index, NULL in case of failed.
9578  */
9579 struct mlx5_flow_tbl_resource *
9580 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9581                          uint32_t table_level, uint8_t egress,
9582                          uint8_t transfer,
9583                          bool external,
9584                          const struct mlx5_flow_tunnel *tunnel,
9585                          uint32_t group_id, uint8_t dummy,
9586                          uint32_t table_id,
9587                          struct rte_flow_error *error)
9588 {
9589         struct mlx5_priv *priv = dev->data->dev_private;
9590         union mlx5_flow_tbl_key table_key = {
9591                 {
9592                         .level = table_level,
9593                         .id = table_id,
9594                         .reserved = 0,
9595                         .dummy = !!dummy,
9596                         .is_fdb = !!transfer,
9597                         .is_egress = !!egress,
9598                 }
9599         };
9600         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9601                 .tunnel = tunnel,
9602                 .group_id = group_id,
9603                 .external = external,
9604         };
9605         struct mlx5_flow_cb_ctx ctx = {
9606                 .dev = dev,
9607                 .error = error,
9608                 .data = &tt_prm,
9609         };
9610         struct mlx5_hlist_entry *entry;
9611         struct mlx5_flow_tbl_data_entry *tbl_data;
9612
9613         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9614         if (!entry) {
9615                 rte_flow_error_set(error, ENOMEM,
9616                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9617                                    "cannot get table");
9618                 return NULL;
9619         }
9620         DRV_LOG(DEBUG, "table_level %u table_id %u "
9621                 "tunnel %u group %u registered.",
9622                 table_level, table_id,
9623                 tunnel ? tunnel->tunnel_id : 0, group_id);
9624         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9625         return &tbl_data->tbl;
9626 }
9627
9628 void
9629 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
9630                       struct mlx5_hlist_entry *entry)
9631 {
9632         struct mlx5_dev_ctx_shared *sh = list->ctx;
9633         struct mlx5_flow_tbl_data_entry *tbl_data =
9634                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9635
9636         MLX5_ASSERT(entry && sh);
9637         if (tbl_data->jump.action)
9638                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
9639         if (tbl_data->tbl.obj)
9640                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
9641         if (tbl_data->tunnel_offload && tbl_data->external) {
9642                 struct mlx5_hlist_entry *he;
9643                 struct mlx5_hlist *tunnel_grp_hash;
9644                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
9645                 union tunnel_tbl_key tunnel_key = {
9646                         .tunnel_id = tbl_data->tunnel ?
9647                                         tbl_data->tunnel->tunnel_id : 0,
9648                         .group = tbl_data->group_id
9649                 };
9650                 uint32_t table_level = tbl_data->level;
9651
9652                 tunnel_grp_hash = tbl_data->tunnel ?
9653                                         tbl_data->tunnel->groups :
9654                                         thub->groups;
9655                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
9656                 if (he)
9657                         mlx5_hlist_unregister(tunnel_grp_hash, he);
9658                 DRV_LOG(DEBUG,
9659                         "table_level %u id %u tunnel %u group %u released.",
9660                         table_level,
9661                         tbl_data->id,
9662                         tbl_data->tunnel ?
9663                         tbl_data->tunnel->tunnel_id : 0,
9664                         tbl_data->group_id);
9665         }
9666         mlx5_cache_list_destroy(&tbl_data->matchers);
9667         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
9668 }
9669
9670 /**
9671  * Release a flow table.
9672  *
9673  * @param[in] sh
9674  *   Pointer to device shared structure.
9675  * @param[in] tbl
9676  *   Table resource to be released.
9677  *
9678  * @return
9679  *   Returns 0 if table was released, else return 1;
9680  */
9681 static int
9682 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
9683                              struct mlx5_flow_tbl_resource *tbl)
9684 {
9685         struct mlx5_flow_tbl_data_entry *tbl_data =
9686                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9687
9688         if (!tbl)
9689                 return 0;
9690         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
9691 }
9692
9693 int
9694 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
9695                          struct mlx5_cache_entry *entry, void *cb_ctx)
9696 {
9697         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9698         struct mlx5_flow_dv_matcher *ref = ctx->data;
9699         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
9700                                                         entry);
9701
9702         return cur->crc != ref->crc ||
9703                cur->priority != ref->priority ||
9704                memcmp((const void *)cur->mask.buf,
9705                       (const void *)ref->mask.buf, ref->mask.size);
9706 }
9707
9708 struct mlx5_cache_entry *
9709 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
9710                           struct mlx5_cache_entry *entry __rte_unused,
9711                           void *cb_ctx)
9712 {
9713         struct mlx5_dev_ctx_shared *sh = list->ctx;
9714         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9715         struct mlx5_flow_dv_matcher *ref = ctx->data;
9716         struct mlx5_flow_dv_matcher *cache;
9717         struct mlx5dv_flow_matcher_attr dv_attr = {
9718                 .type = IBV_FLOW_ATTR_NORMAL,
9719                 .match_mask = (void *)&ref->mask,
9720         };
9721         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
9722                                                             typeof(*tbl), tbl);
9723         int ret;
9724
9725         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
9726         if (!cache) {
9727                 rte_flow_error_set(ctx->error, ENOMEM,
9728                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9729                                    "cannot create matcher");
9730                 return NULL;
9731         }
9732         *cache = *ref;
9733         dv_attr.match_criteria_enable =
9734                 flow_dv_matcher_enable(cache->mask.buf);
9735         dv_attr.priority = ref->priority;
9736         if (tbl->is_egress)
9737                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
9738         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
9739                                                &cache->matcher_object);
9740         if (ret) {
9741                 mlx5_free(cache);
9742                 rte_flow_error_set(ctx->error, ENOMEM,
9743                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9744                                    "cannot create matcher");
9745                 return NULL;
9746         }
9747         return &cache->entry;
9748 }
9749
9750 /**
9751  * Register the flow matcher.
9752  *
9753  * @param[in, out] dev
9754  *   Pointer to rte_eth_dev structure.
9755  * @param[in, out] matcher
9756  *   Pointer to flow matcher.
9757  * @param[in, out] key
9758  *   Pointer to flow table key.
9759  * @parm[in, out] dev_flow
9760  *   Pointer to the dev_flow.
9761  * @param[out] error
9762  *   pointer to error structure.
9763  *
9764  * @return
9765  *   0 on success otherwise -errno and errno is set.
9766  */
9767 static int
9768 flow_dv_matcher_register(struct rte_eth_dev *dev,
9769                          struct mlx5_flow_dv_matcher *ref,
9770                          union mlx5_flow_tbl_key *key,
9771                          struct mlx5_flow *dev_flow,
9772                          const struct mlx5_flow_tunnel *tunnel,
9773                          uint32_t group_id,
9774                          struct rte_flow_error *error)
9775 {
9776         struct mlx5_cache_entry *entry;
9777         struct mlx5_flow_dv_matcher *cache;
9778         struct mlx5_flow_tbl_resource *tbl;
9779         struct mlx5_flow_tbl_data_entry *tbl_data;
9780         struct mlx5_flow_cb_ctx ctx = {
9781                 .error = error,
9782                 .data = ref,
9783         };
9784
9785         /**
9786          * tunnel offload API requires this registration for cases when
9787          * tunnel match rule was inserted before tunnel set rule.
9788          */
9789         tbl = flow_dv_tbl_resource_get(dev, key->level,
9790                                        key->is_egress, key->is_fdb,
9791                                        dev_flow->external, tunnel,
9792                                        group_id, 0, key->id, error);
9793         if (!tbl)
9794                 return -rte_errno;      /* No need to refill the error info */
9795         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9796         ref->tbl = tbl;
9797         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
9798         if (!entry) {
9799                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9800                 return rte_flow_error_set(error, ENOMEM,
9801                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9802                                           "cannot allocate ref memory");
9803         }
9804         cache = container_of(entry, typeof(*cache), entry);
9805         dev_flow->handle->dvh.matcher = cache;
9806         return 0;
9807 }
9808
9809 struct mlx5_hlist_entry *
9810 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
9811 {
9812         struct mlx5_dev_ctx_shared *sh = list->ctx;
9813         struct rte_flow_error *error = ctx;
9814         struct mlx5_flow_dv_tag_resource *entry;
9815         uint32_t idx = 0;
9816         int ret;
9817
9818         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
9819         if (!entry) {
9820                 rte_flow_error_set(error, ENOMEM,
9821                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9822                                    "cannot allocate resource memory");
9823                 return NULL;
9824         }
9825         entry->idx = idx;
9826         entry->tag_id = key;
9827         ret = mlx5_flow_os_create_flow_action_tag(key,
9828                                                   &entry->action);
9829         if (ret) {
9830                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
9831                 rte_flow_error_set(error, ENOMEM,
9832                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9833                                    NULL, "cannot create action");
9834                 return NULL;
9835         }
9836         return &entry->entry;
9837 }
9838
9839 int
9840 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
9841                      struct mlx5_hlist_entry *entry, uint64_t key,
9842                      void *cb_ctx __rte_unused)
9843 {
9844         struct mlx5_flow_dv_tag_resource *tag =
9845                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9846
9847         return key != tag->tag_id;
9848 }
9849
9850 /**
9851  * Find existing tag resource or create and register a new one.
9852  *
9853  * @param dev[in, out]
9854  *   Pointer to rte_eth_dev structure.
9855  * @param[in, out] tag_be24
9856  *   Tag value in big endian then R-shift 8.
9857  * @parm[in, out] dev_flow
9858  *   Pointer to the dev_flow.
9859  * @param[out] error
9860  *   pointer to error structure.
9861  *
9862  * @return
9863  *   0 on success otherwise -errno and errno is set.
9864  */
9865 static int
9866 flow_dv_tag_resource_register
9867                         (struct rte_eth_dev *dev,
9868                          uint32_t tag_be24,
9869                          struct mlx5_flow *dev_flow,
9870                          struct rte_flow_error *error)
9871 {
9872         struct mlx5_priv *priv = dev->data->dev_private;
9873         struct mlx5_flow_dv_tag_resource *cache_resource;
9874         struct mlx5_hlist_entry *entry;
9875
9876         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
9877         if (entry) {
9878                 cache_resource = container_of
9879                         (entry, struct mlx5_flow_dv_tag_resource, entry);
9880                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
9881                 dev_flow->dv.tag_resource = cache_resource;
9882                 return 0;
9883         }
9884         return -rte_errno;
9885 }
9886
9887 void
9888 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
9889                       struct mlx5_hlist_entry *entry)
9890 {
9891         struct mlx5_dev_ctx_shared *sh = list->ctx;
9892         struct mlx5_flow_dv_tag_resource *tag =
9893                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9894
9895         MLX5_ASSERT(tag && sh && tag->action);
9896         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
9897         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
9898         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
9899 }
9900
9901 /**
9902  * Release the tag.
9903  *
9904  * @param dev
9905  *   Pointer to Ethernet device.
9906  * @param tag_idx
9907  *   Tag index.
9908  *
9909  * @return
9910  *   1 while a reference on it exists, 0 when freed.
9911  */
9912 static int
9913 flow_dv_tag_release(struct rte_eth_dev *dev,
9914                     uint32_t tag_idx)
9915 {
9916         struct mlx5_priv *priv = dev->data->dev_private;
9917         struct mlx5_flow_dv_tag_resource *tag;
9918
9919         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
9920         if (!tag)
9921                 return 0;
9922         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
9923                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
9924         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
9925 }
9926
9927 /**
9928  * Translate port ID action to vport.
9929  *
9930  * @param[in] dev
9931  *   Pointer to rte_eth_dev structure.
9932  * @param[in] action
9933  *   Pointer to the port ID action.
9934  * @param[out] dst_port_id
9935  *   The target port ID.
9936  * @param[out] error
9937  *   Pointer to the error structure.
9938  *
9939  * @return
9940  *   0 on success, a negative errno value otherwise and rte_errno is set.
9941  */
9942 static int
9943 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
9944                                  const struct rte_flow_action *action,
9945                                  uint32_t *dst_port_id,
9946                                  struct rte_flow_error *error)
9947 {
9948         uint32_t port;
9949         struct mlx5_priv *priv;
9950         const struct rte_flow_action_port_id *conf =
9951                         (const struct rte_flow_action_port_id *)action->conf;
9952
9953         port = conf->original ? dev->data->port_id : conf->id;
9954         priv = mlx5_port_to_eswitch_info(port, false);
9955         if (!priv)
9956                 return rte_flow_error_set(error, -rte_errno,
9957                                           RTE_FLOW_ERROR_TYPE_ACTION,
9958                                           NULL,
9959                                           "No eswitch info was found for port");
9960 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
9961         /*
9962          * This parameter is transferred to
9963          * mlx5dv_dr_action_create_dest_ib_port().
9964          */
9965         *dst_port_id = priv->dev_port;
9966 #else
9967         /*
9968          * Legacy mode, no LAG configurations is supported.
9969          * This parameter is transferred to
9970          * mlx5dv_dr_action_create_dest_vport().
9971          */
9972         *dst_port_id = priv->vport_id;
9973 #endif
9974         return 0;
9975 }
9976
9977 /**
9978  * Create a counter with aging configuration.
9979  *
9980  * @param[in] dev
9981  *   Pointer to rte_eth_dev structure.
9982  * @param[in] dev_flow
9983  *   Pointer to the mlx5_flow.
9984  * @param[out] count
9985  *   Pointer to the counter action configuration.
9986  * @param[in] age
9987  *   Pointer to the aging action configuration.
9988  *
9989  * @return
9990  *   Index to flow counter on success, 0 otherwise.
9991  */
9992 static uint32_t
9993 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
9994                                 struct mlx5_flow *dev_flow,
9995                                 const struct rte_flow_action_count *count,
9996                                 const struct rte_flow_action_age *age)
9997 {
9998         uint32_t counter;
9999         struct mlx5_age_param *age_param;
10000
10001         if (count && count->shared)
10002                 counter = flow_dv_counter_get_shared(dev, count->id);
10003         else
10004                 counter = flow_dv_counter_alloc(dev, !!age);
10005         if (!counter || age == NULL)
10006                 return counter;
10007         age_param = flow_dv_counter_idx_get_age(dev, counter);
10008         age_param->context = age->context ? age->context :
10009                 (void *)(uintptr_t)(dev_flow->flow_idx);
10010         age_param->timeout = age->timeout;
10011         age_param->port_id = dev->data->port_id;
10012         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10013         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10014         return counter;
10015 }
10016
10017 /**
10018  * Add Tx queue matcher
10019  *
10020  * @param[in] dev
10021  *   Pointer to the dev struct.
10022  * @param[in, out] matcher
10023  *   Flow matcher.
10024  * @param[in, out] key
10025  *   Flow matcher value.
10026  * @param[in] item
10027  *   Flow pattern to translate.
10028  * @param[in] inner
10029  *   Item is inner pattern.
10030  */
10031 static void
10032 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10033                                 void *matcher, void *key,
10034                                 const struct rte_flow_item *item)
10035 {
10036         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10037         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10038         void *misc_m =
10039                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10040         void *misc_v =
10041                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10042         struct mlx5_txq_ctrl *txq;
10043         uint32_t queue;
10044
10045
10046         queue_m = (const void *)item->mask;
10047         if (!queue_m)
10048                 return;
10049         queue_v = (const void *)item->spec;
10050         if (!queue_v)
10051                 return;
10052         txq = mlx5_txq_get(dev, queue_v->queue);
10053         if (!txq)
10054                 return;
10055         queue = txq->obj->sq->id;
10056         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10057         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10058                  queue & queue_m->queue);
10059         mlx5_txq_release(dev, queue_v->queue);
10060 }
10061
10062 /**
10063  * Set the hash fields according to the @p flow information.
10064  *
10065  * @param[in] dev_flow
10066  *   Pointer to the mlx5_flow.
10067  * @param[in] rss_desc
10068  *   Pointer to the mlx5_flow_rss_desc.
10069  */
10070 static void
10071 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10072                        struct mlx5_flow_rss_desc *rss_desc)
10073 {
10074         uint64_t items = dev_flow->handle->layers;
10075         int rss_inner = 0;
10076         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10077
10078         dev_flow->hash_fields = 0;
10079 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10080         if (rss_desc->level >= 2) {
10081                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10082                 rss_inner = 1;
10083         }
10084 #endif
10085         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10086             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10087                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10088                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10089                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10090                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10091                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10092                         else
10093                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10094                 }
10095         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10096                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10097                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10098                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10099                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10100                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10101                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10102                         else
10103                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10104                 }
10105         }
10106         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10107             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10108                 if (rss_types & ETH_RSS_UDP) {
10109                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10110                                 dev_flow->hash_fields |=
10111                                                 IBV_RX_HASH_SRC_PORT_UDP;
10112                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10113                                 dev_flow->hash_fields |=
10114                                                 IBV_RX_HASH_DST_PORT_UDP;
10115                         else
10116                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10117                 }
10118         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10119                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10120                 if (rss_types & ETH_RSS_TCP) {
10121                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10122                                 dev_flow->hash_fields |=
10123                                                 IBV_RX_HASH_SRC_PORT_TCP;
10124                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10125                                 dev_flow->hash_fields |=
10126                                                 IBV_RX_HASH_DST_PORT_TCP;
10127                         else
10128                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10129                 }
10130         }
10131 }
10132
10133 /**
10134  * Prepare an Rx Hash queue.
10135  *
10136  * @param dev
10137  *   Pointer to Ethernet device.
10138  * @param[in] dev_flow
10139  *   Pointer to the mlx5_flow.
10140  * @param[in] rss_desc
10141  *   Pointer to the mlx5_flow_rss_desc.
10142  * @param[out] hrxq_idx
10143  *   Hash Rx queue index.
10144  *
10145  * @return
10146  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10147  */
10148 static struct mlx5_hrxq *
10149 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10150                      struct mlx5_flow *dev_flow,
10151                      struct mlx5_flow_rss_desc *rss_desc,
10152                      uint32_t *hrxq_idx)
10153 {
10154         struct mlx5_priv *priv = dev->data->dev_private;
10155         struct mlx5_flow_handle *dh = dev_flow->handle;
10156         struct mlx5_hrxq *hrxq;
10157
10158         MLX5_ASSERT(rss_desc->queue_num);
10159         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10160         rss_desc->hash_fields = dev_flow->hash_fields;
10161         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10162         rss_desc->shared_rss = 0;
10163         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10164         if (!*hrxq_idx)
10165                 return NULL;
10166         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10167                               *hrxq_idx);
10168         return hrxq;
10169 }
10170
10171 /**
10172  * Release sample sub action resource.
10173  *
10174  * @param[in, out] dev
10175  *   Pointer to rte_eth_dev structure.
10176  * @param[in] act_res
10177  *   Pointer to sample sub action resource.
10178  */
10179 static void
10180 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10181                                    struct mlx5_flow_sub_actions_idx *act_res)
10182 {
10183         if (act_res->rix_hrxq) {
10184                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10185                 act_res->rix_hrxq = 0;
10186         }
10187         if (act_res->rix_encap_decap) {
10188                 flow_dv_encap_decap_resource_release(dev,
10189                                                      act_res->rix_encap_decap);
10190                 act_res->rix_encap_decap = 0;
10191         }
10192         if (act_res->rix_port_id_action) {
10193                 flow_dv_port_id_action_resource_release(dev,
10194                                                 act_res->rix_port_id_action);
10195                 act_res->rix_port_id_action = 0;
10196         }
10197         if (act_res->rix_tag) {
10198                 flow_dv_tag_release(dev, act_res->rix_tag);
10199                 act_res->rix_tag = 0;
10200         }
10201         if (act_res->rix_jump) {
10202                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10203                 act_res->rix_jump = 0;
10204         }
10205 }
10206
10207 int
10208 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10209                         struct mlx5_cache_entry *entry, void *cb_ctx)
10210 {
10211         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10212         struct rte_eth_dev *dev = ctx->dev;
10213         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10214         struct mlx5_flow_dv_sample_resource *cache_resource =
10215                         container_of(entry, typeof(*cache_resource), entry);
10216
10217         if (resource->ratio == cache_resource->ratio &&
10218             resource->ft_type == cache_resource->ft_type &&
10219             resource->ft_id == cache_resource->ft_id &&
10220             resource->set_action == cache_resource->set_action &&
10221             !memcmp((void *)&resource->sample_act,
10222                     (void *)&cache_resource->sample_act,
10223                     sizeof(struct mlx5_flow_sub_actions_list))) {
10224                 /*
10225                  * Existing sample action should release the prepared
10226                  * sub-actions reference counter.
10227                  */
10228                 flow_dv_sample_sub_actions_release(dev,
10229                                                 &resource->sample_idx);
10230                 return 0;
10231         }
10232         return 1;
10233 }
10234
10235 struct mlx5_cache_entry *
10236 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10237                          struct mlx5_cache_entry *entry __rte_unused,
10238                          void *cb_ctx)
10239 {
10240         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10241         struct rte_eth_dev *dev = ctx->dev;
10242         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10243         void **sample_dv_actions = resource->sub_actions;
10244         struct mlx5_flow_dv_sample_resource *cache_resource;
10245         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10246         struct mlx5_priv *priv = dev->data->dev_private;
10247         struct mlx5_dev_ctx_shared *sh = priv->sh;
10248         struct mlx5_flow_tbl_resource *tbl;
10249         uint32_t idx = 0;
10250         const uint32_t next_ft_step = 1;
10251         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10252         uint8_t is_egress = 0;
10253         uint8_t is_transfer = 0;
10254         struct rte_flow_error *error = ctx->error;
10255
10256         /* Register new sample resource. */
10257         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10258         if (!cache_resource) {
10259                 rte_flow_error_set(error, ENOMEM,
10260                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10261                                           NULL,
10262                                           "cannot allocate resource memory");
10263                 return NULL;
10264         }
10265         *cache_resource = *resource;
10266         /* Create normal path table level */
10267         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10268                 is_transfer = 1;
10269         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10270                 is_egress = 1;
10271         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10272                                         is_egress, is_transfer,
10273                                         true, NULL, 0, 0, 0, error);
10274         if (!tbl) {
10275                 rte_flow_error_set(error, ENOMEM,
10276                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10277                                           NULL,
10278                                           "fail to create normal path table "
10279                                           "for sample");
10280                 goto error;
10281         }
10282         cache_resource->normal_path_tbl = tbl;
10283         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10284                 if (!sh->default_miss_action) {
10285                         rte_flow_error_set(error, ENOMEM,
10286                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10287                                                 NULL,
10288                                                 "default miss action was not "
10289                                                 "created");
10290                         goto error;
10291                 }
10292                 sample_dv_actions[resource->sample_act.actions_num++] =
10293                                                 sh->default_miss_action;
10294         }
10295         /* Create a DR sample action */
10296         sampler_attr.sample_ratio = cache_resource->ratio;
10297         sampler_attr.default_next_table = tbl->obj;
10298         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10299         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10300                                                         &sample_dv_actions[0];
10301         sampler_attr.action = cache_resource->set_action;
10302         if (mlx5_os_flow_dr_create_flow_action_sampler
10303                         (&sampler_attr, &cache_resource->verbs_action)) {
10304                 rte_flow_error_set(error, ENOMEM,
10305                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10306                                         NULL, "cannot create sample action");
10307                 goto error;
10308         }
10309         cache_resource->idx = idx;
10310         cache_resource->dev = dev;
10311         return &cache_resource->entry;
10312 error:
10313         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10314                 flow_dv_sample_sub_actions_release(dev,
10315                                                    &cache_resource->sample_idx);
10316         if (cache_resource->normal_path_tbl)
10317                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10318                                 cache_resource->normal_path_tbl);
10319         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10320         return NULL;
10321
10322 }
10323
10324 /**
10325  * Find existing sample resource or create and register a new one.
10326  *
10327  * @param[in, out] dev
10328  *   Pointer to rte_eth_dev structure.
10329  * @param[in] resource
10330  *   Pointer to sample resource.
10331  * @parm[in, out] dev_flow
10332  *   Pointer to the dev_flow.
10333  * @param[out] error
10334  *   pointer to error structure.
10335  *
10336  * @return
10337  *   0 on success otherwise -errno and errno is set.
10338  */
10339 static int
10340 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10341                          struct mlx5_flow_dv_sample_resource *resource,
10342                          struct mlx5_flow *dev_flow,
10343                          struct rte_flow_error *error)
10344 {
10345         struct mlx5_flow_dv_sample_resource *cache_resource;
10346         struct mlx5_cache_entry *entry;
10347         struct mlx5_priv *priv = dev->data->dev_private;
10348         struct mlx5_flow_cb_ctx ctx = {
10349                 .dev = dev,
10350                 .error = error,
10351                 .data = resource,
10352         };
10353
10354         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10355         if (!entry)
10356                 return -rte_errno;
10357         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10358         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10359         dev_flow->dv.sample_res = cache_resource;
10360         return 0;
10361 }
10362
10363 int
10364 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10365                             struct mlx5_cache_entry *entry, void *cb_ctx)
10366 {
10367         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10368         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10369         struct rte_eth_dev *dev = ctx->dev;
10370         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10371                         container_of(entry, typeof(*cache_resource), entry);
10372         uint32_t idx = 0;
10373
10374         if (resource->num_of_dest == cache_resource->num_of_dest &&
10375             resource->ft_type == cache_resource->ft_type &&
10376             !memcmp((void *)cache_resource->sample_act,
10377                     (void *)resource->sample_act,
10378                    (resource->num_of_dest *
10379                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10380                 /*
10381                  * Existing sample action should release the prepared
10382                  * sub-actions reference counter.
10383                  */
10384                 for (idx = 0; idx < resource->num_of_dest; idx++)
10385                         flow_dv_sample_sub_actions_release(dev,
10386                                         &resource->sample_idx[idx]);
10387                 return 0;
10388         }
10389         return 1;
10390 }
10391
10392 struct mlx5_cache_entry *
10393 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10394                          struct mlx5_cache_entry *entry __rte_unused,
10395                          void *cb_ctx)
10396 {
10397         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10398         struct rte_eth_dev *dev = ctx->dev;
10399         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10400         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10401         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10402         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10403         struct mlx5_priv *priv = dev->data->dev_private;
10404         struct mlx5_dev_ctx_shared *sh = priv->sh;
10405         struct mlx5_flow_sub_actions_list *sample_act;
10406         struct mlx5dv_dr_domain *domain;
10407         uint32_t idx = 0, res_idx = 0;
10408         struct rte_flow_error *error = ctx->error;
10409         uint64_t action_flags;
10410         int ret;
10411
10412         /* Register new destination array resource. */
10413         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10414                                             &res_idx);
10415         if (!cache_resource) {
10416                 rte_flow_error_set(error, ENOMEM,
10417                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10418                                           NULL,
10419                                           "cannot allocate resource memory");
10420                 return NULL;
10421         }
10422         *cache_resource = *resource;
10423         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10424                 domain = sh->fdb_domain;
10425         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10426                 domain = sh->rx_domain;
10427         else
10428                 domain = sh->tx_domain;
10429         for (idx = 0; idx < resource->num_of_dest; idx++) {
10430                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10431                                  mlx5_malloc(MLX5_MEM_ZERO,
10432                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10433                                  0, SOCKET_ID_ANY);
10434                 if (!dest_attr[idx]) {
10435                         rte_flow_error_set(error, ENOMEM,
10436                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10437                                            NULL,
10438                                            "cannot allocate resource memory");
10439                         goto error;
10440                 }
10441                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10442                 sample_act = &resource->sample_act[idx];
10443                 action_flags = sample_act->action_flags;
10444                 switch (action_flags) {
10445                 case MLX5_FLOW_ACTION_QUEUE:
10446                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10447                         break;
10448                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10449                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10450                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10451                         dest_attr[idx]->dest_reformat->reformat =
10452                                         sample_act->dr_encap_action;
10453                         dest_attr[idx]->dest_reformat->dest =
10454                                         sample_act->dr_port_id_action;
10455                         break;
10456                 case MLX5_FLOW_ACTION_PORT_ID:
10457                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10458                         break;
10459                 case MLX5_FLOW_ACTION_JUMP:
10460                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10461                         break;
10462                 default:
10463                         rte_flow_error_set(error, EINVAL,
10464                                            RTE_FLOW_ERROR_TYPE_ACTION,
10465                                            NULL,
10466                                            "unsupported actions type");
10467                         goto error;
10468                 }
10469         }
10470         /* create a dest array actioin */
10471         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10472                                                 (domain,
10473                                                  cache_resource->num_of_dest,
10474                                                  dest_attr,
10475                                                  &cache_resource->action);
10476         if (ret) {
10477                 rte_flow_error_set(error, ENOMEM,
10478                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10479                                    NULL,
10480                                    "cannot create destination array action");
10481                 goto error;
10482         }
10483         cache_resource->idx = res_idx;
10484         cache_resource->dev = dev;
10485         for (idx = 0; idx < resource->num_of_dest; idx++)
10486                 mlx5_free(dest_attr[idx]);
10487         return &cache_resource->entry;
10488 error:
10489         for (idx = 0; idx < resource->num_of_dest; idx++) {
10490                 flow_dv_sample_sub_actions_release(dev,
10491                                 &cache_resource->sample_idx[idx]);
10492                 if (dest_attr[idx])
10493                         mlx5_free(dest_attr[idx]);
10494         }
10495
10496         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10497         return NULL;
10498 }
10499
10500 /**
10501  * Find existing destination array resource or create and register a new one.
10502  *
10503  * @param[in, out] dev
10504  *   Pointer to rte_eth_dev structure.
10505  * @param[in] resource
10506  *   Pointer to destination array resource.
10507  * @parm[in, out] dev_flow
10508  *   Pointer to the dev_flow.
10509  * @param[out] error
10510  *   pointer to error structure.
10511  *
10512  * @return
10513  *   0 on success otherwise -errno and errno is set.
10514  */
10515 static int
10516 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10517                          struct mlx5_flow_dv_dest_array_resource *resource,
10518                          struct mlx5_flow *dev_flow,
10519                          struct rte_flow_error *error)
10520 {
10521         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10522         struct mlx5_priv *priv = dev->data->dev_private;
10523         struct mlx5_cache_entry *entry;
10524         struct mlx5_flow_cb_ctx ctx = {
10525                 .dev = dev,
10526                 .error = error,
10527                 .data = resource,
10528         };
10529
10530         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10531         if (!entry)
10532                 return -rte_errno;
10533         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10534         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10535         dev_flow->dv.dest_array_res = cache_resource;
10536         return 0;
10537 }
10538
10539 /**
10540  * Convert Sample action to DV specification.
10541  *
10542  * @param[in] dev
10543  *   Pointer to rte_eth_dev structure.
10544  * @param[in] action
10545  *   Pointer to sample action structure.
10546  * @param[in, out] dev_flow
10547  *   Pointer to the mlx5_flow.
10548  * @param[in] attr
10549  *   Pointer to the flow attributes.
10550  * @param[in, out] num_of_dest
10551  *   Pointer to the num of destination.
10552  * @param[in, out] sample_actions
10553  *   Pointer to sample actions list.
10554  * @param[in, out] res
10555  *   Pointer to sample resource.
10556  * @param[out] error
10557  *   Pointer to the error structure.
10558  *
10559  * @return
10560  *   0 on success, a negative errno value otherwise and rte_errno is set.
10561  */
10562 static int
10563 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10564                                 const struct rte_flow_action_sample *action,
10565                                 struct mlx5_flow *dev_flow,
10566                                 const struct rte_flow_attr *attr,
10567                                 uint32_t *num_of_dest,
10568                                 void **sample_actions,
10569                                 struct mlx5_flow_dv_sample_resource *res,
10570                                 struct rte_flow_error *error)
10571 {
10572         struct mlx5_priv *priv = dev->data->dev_private;
10573         const struct rte_flow_action *sub_actions;
10574         struct mlx5_flow_sub_actions_list *sample_act;
10575         struct mlx5_flow_sub_actions_idx *sample_idx;
10576         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10577         struct rte_flow *flow = dev_flow->flow;
10578         struct mlx5_flow_rss_desc *rss_desc;
10579         uint64_t action_flags = 0;
10580
10581         MLX5_ASSERT(wks);
10582         rss_desc = &wks->rss_desc;
10583         sample_act = &res->sample_act;
10584         sample_idx = &res->sample_idx;
10585         res->ratio = action->ratio;
10586         sub_actions = action->actions;
10587         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10588                 int type = sub_actions->type;
10589                 uint32_t pre_rix = 0;
10590                 void *pre_r;
10591                 switch (type) {
10592                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10593                 {
10594                         const struct rte_flow_action_queue *queue;
10595                         struct mlx5_hrxq *hrxq;
10596                         uint32_t hrxq_idx;
10597
10598                         queue = sub_actions->conf;
10599                         rss_desc->queue_num = 1;
10600                         rss_desc->queue[0] = queue->index;
10601                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10602                                                     rss_desc, &hrxq_idx);
10603                         if (!hrxq)
10604                                 return rte_flow_error_set
10605                                         (error, rte_errno,
10606                                          RTE_FLOW_ERROR_TYPE_ACTION,
10607                                          NULL,
10608                                          "cannot create fate queue");
10609                         sample_act->dr_queue_action = hrxq->action;
10610                         sample_idx->rix_hrxq = hrxq_idx;
10611                         sample_actions[sample_act->actions_num++] =
10612                                                 hrxq->action;
10613                         (*num_of_dest)++;
10614                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10615                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10616                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10617                         dev_flow->handle->fate_action =
10618                                         MLX5_FLOW_FATE_QUEUE;
10619                         break;
10620                 }
10621                 case RTE_FLOW_ACTION_TYPE_RSS:
10622                 {
10623                         struct mlx5_hrxq *hrxq;
10624                         uint32_t hrxq_idx;
10625                         const struct rte_flow_action_rss *rss;
10626                         const uint8_t *rss_key;
10627
10628                         rss = sub_actions->conf;
10629                         memcpy(rss_desc->queue, rss->queue,
10630                                rss->queue_num * sizeof(uint16_t));
10631                         rss_desc->queue_num = rss->queue_num;
10632                         /* NULL RSS key indicates default RSS key. */
10633                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10634                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10635                         /*
10636                          * rss->level and rss.types should be set in advance
10637                          * when expanding items for RSS.
10638                          */
10639                         flow_dv_hashfields_set(dev_flow, rss_desc);
10640                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10641                                                     rss_desc, &hrxq_idx);
10642                         if (!hrxq)
10643                                 return rte_flow_error_set
10644                                         (error, rte_errno,
10645                                          RTE_FLOW_ERROR_TYPE_ACTION,
10646                                          NULL,
10647                                          "cannot create fate queue");
10648                         sample_act->dr_queue_action = hrxq->action;
10649                         sample_idx->rix_hrxq = hrxq_idx;
10650                         sample_actions[sample_act->actions_num++] =
10651                                                 hrxq->action;
10652                         (*num_of_dest)++;
10653                         action_flags |= MLX5_FLOW_ACTION_RSS;
10654                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10655                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10656                         dev_flow->handle->fate_action =
10657                                         MLX5_FLOW_FATE_QUEUE;
10658                         break;
10659                 }
10660                 case RTE_FLOW_ACTION_TYPE_MARK:
10661                 {
10662                         uint32_t tag_be = mlx5_flow_mark_set
10663                                 (((const struct rte_flow_action_mark *)
10664                                 (sub_actions->conf))->id);
10665
10666                         dev_flow->handle->mark = 1;
10667                         pre_rix = dev_flow->handle->dvh.rix_tag;
10668                         /* Save the mark resource before sample */
10669                         pre_r = dev_flow->dv.tag_resource;
10670                         if (flow_dv_tag_resource_register(dev, tag_be,
10671                                                   dev_flow, error))
10672                                 return -rte_errno;
10673                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10674                         sample_act->dr_tag_action =
10675                                 dev_flow->dv.tag_resource->action;
10676                         sample_idx->rix_tag =
10677                                 dev_flow->handle->dvh.rix_tag;
10678                         sample_actions[sample_act->actions_num++] =
10679                                                 sample_act->dr_tag_action;
10680                         /* Recover the mark resource after sample */
10681                         dev_flow->dv.tag_resource = pre_r;
10682                         dev_flow->handle->dvh.rix_tag = pre_rix;
10683                         action_flags |= MLX5_FLOW_ACTION_MARK;
10684                         break;
10685                 }
10686                 case RTE_FLOW_ACTION_TYPE_COUNT:
10687                 {
10688                         if (!flow->counter) {
10689                                 flow->counter =
10690                                         flow_dv_translate_create_counter(dev,
10691                                                 dev_flow, sub_actions->conf,
10692                                                 0);
10693                                 if (!flow->counter)
10694                                         return rte_flow_error_set
10695                                                 (error, rte_errno,
10696                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10697                                                 NULL,
10698                                                 "cannot create counter"
10699                                                 " object.");
10700                         }
10701                         sample_act->dr_cnt_action =
10702                                   (flow_dv_counter_get_by_idx(dev,
10703                                   flow->counter, NULL))->action;
10704                         sample_actions[sample_act->actions_num++] =
10705                                                 sample_act->dr_cnt_action;
10706                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10707                         break;
10708                 }
10709                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10710                 {
10711                         struct mlx5_flow_dv_port_id_action_resource
10712                                         port_id_resource;
10713                         uint32_t port_id = 0;
10714
10715                         memset(&port_id_resource, 0, sizeof(port_id_resource));
10716                         /* Save the port id resource before sample */
10717                         pre_rix = dev_flow->handle->rix_port_id_action;
10718                         pre_r = dev_flow->dv.port_id_action;
10719                         if (flow_dv_translate_action_port_id(dev, sub_actions,
10720                                                              &port_id, error))
10721                                 return -rte_errno;
10722                         port_id_resource.port_id = port_id;
10723                         if (flow_dv_port_id_action_resource_register
10724                             (dev, &port_id_resource, dev_flow, error))
10725                                 return -rte_errno;
10726                         sample_act->dr_port_id_action =
10727                                 dev_flow->dv.port_id_action->action;
10728                         sample_idx->rix_port_id_action =
10729                                 dev_flow->handle->rix_port_id_action;
10730                         sample_actions[sample_act->actions_num++] =
10731                                                 sample_act->dr_port_id_action;
10732                         /* Recover the port id resource after sample */
10733                         dev_flow->dv.port_id_action = pre_r;
10734                         dev_flow->handle->rix_port_id_action = pre_rix;
10735                         (*num_of_dest)++;
10736                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10737                         break;
10738                 }
10739                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10740                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10741                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10742                         /* Save the encap resource before sample */
10743                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
10744                         pre_r = dev_flow->dv.encap_decap;
10745                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
10746                                                            dev_flow,
10747                                                            attr->transfer,
10748                                                            error))
10749                                 return -rte_errno;
10750                         sample_act->dr_encap_action =
10751                                 dev_flow->dv.encap_decap->action;
10752                         sample_idx->rix_encap_decap =
10753                                 dev_flow->handle->dvh.rix_encap_decap;
10754                         sample_actions[sample_act->actions_num++] =
10755                                                 sample_act->dr_encap_action;
10756                         /* Recover the encap resource after sample */
10757                         dev_flow->dv.encap_decap = pre_r;
10758                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
10759                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10760                         break;
10761                 default:
10762                         return rte_flow_error_set(error, EINVAL,
10763                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10764                                 NULL,
10765                                 "Not support for sampler action");
10766                 }
10767         }
10768         sample_act->action_flags = action_flags;
10769         res->ft_id = dev_flow->dv.group;
10770         if (attr->transfer) {
10771                 union {
10772                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
10773                         uint64_t set_action;
10774                 } action_ctx = { .set_action = 0 };
10775
10776                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10777                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
10778                          MLX5_MODIFICATION_TYPE_SET);
10779                 MLX5_SET(set_action_in, action_ctx.action_in, field,
10780                          MLX5_MODI_META_REG_C_0);
10781                 MLX5_SET(set_action_in, action_ctx.action_in, data,
10782                          priv->vport_meta_tag);
10783                 res->set_action = action_ctx.set_action;
10784         } else if (attr->ingress) {
10785                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10786         } else {
10787                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
10788         }
10789         return 0;
10790 }
10791
10792 /**
10793  * Convert Sample action to DV specification.
10794  *
10795  * @param[in] dev
10796  *   Pointer to rte_eth_dev structure.
10797  * @param[in, out] dev_flow
10798  *   Pointer to the mlx5_flow.
10799  * @param[in] num_of_dest
10800  *   The num of destination.
10801  * @param[in, out] res
10802  *   Pointer to sample resource.
10803  * @param[in, out] mdest_res
10804  *   Pointer to destination array resource.
10805  * @param[in] sample_actions
10806  *   Pointer to sample path actions list.
10807  * @param[in] action_flags
10808  *   Holds the actions detected until now.
10809  * @param[out] error
10810  *   Pointer to the error structure.
10811  *
10812  * @return
10813  *   0 on success, a negative errno value otherwise and rte_errno is set.
10814  */
10815 static int
10816 flow_dv_create_action_sample(struct rte_eth_dev *dev,
10817                              struct mlx5_flow *dev_flow,
10818                              uint32_t num_of_dest,
10819                              struct mlx5_flow_dv_sample_resource *res,
10820                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
10821                              void **sample_actions,
10822                              uint64_t action_flags,
10823                              struct rte_flow_error *error)
10824 {
10825         /* update normal path action resource into last index of array */
10826         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
10827         struct mlx5_flow_sub_actions_list *sample_act =
10828                                         &mdest_res->sample_act[dest_index];
10829         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10830         struct mlx5_flow_rss_desc *rss_desc;
10831         uint32_t normal_idx = 0;
10832         struct mlx5_hrxq *hrxq;
10833         uint32_t hrxq_idx;
10834
10835         MLX5_ASSERT(wks);
10836         rss_desc = &wks->rss_desc;
10837         if (num_of_dest > 1) {
10838                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
10839                         /* Handle QP action for mirroring */
10840                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10841                                                     rss_desc, &hrxq_idx);
10842                         if (!hrxq)
10843                                 return rte_flow_error_set
10844                                      (error, rte_errno,
10845                                       RTE_FLOW_ERROR_TYPE_ACTION,
10846                                       NULL,
10847                                       "cannot create rx queue");
10848                         normal_idx++;
10849                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
10850                         sample_act->dr_queue_action = hrxq->action;
10851                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10852                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10853                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10854                 }
10855                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
10856                         normal_idx++;
10857                         mdest_res->sample_idx[dest_index].rix_encap_decap =
10858                                 dev_flow->handle->dvh.rix_encap_decap;
10859                         sample_act->dr_encap_action =
10860                                 dev_flow->dv.encap_decap->action;
10861                         dev_flow->handle->dvh.rix_encap_decap = 0;
10862                 }
10863                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
10864                         normal_idx++;
10865                         mdest_res->sample_idx[dest_index].rix_port_id_action =
10866                                 dev_flow->handle->rix_port_id_action;
10867                         sample_act->dr_port_id_action =
10868                                 dev_flow->dv.port_id_action->action;
10869                         dev_flow->handle->rix_port_id_action = 0;
10870                 }
10871                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
10872                         normal_idx++;
10873                         mdest_res->sample_idx[dest_index].rix_jump =
10874                                 dev_flow->handle->rix_jump;
10875                         sample_act->dr_jump_action =
10876                                 dev_flow->dv.jump->action;
10877                         dev_flow->handle->rix_jump = 0;
10878                 }
10879                 sample_act->actions_num = normal_idx;
10880                 /* update sample action resource into first index of array */
10881                 mdest_res->ft_type = res->ft_type;
10882                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
10883                                 sizeof(struct mlx5_flow_sub_actions_idx));
10884                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
10885                                 sizeof(struct mlx5_flow_sub_actions_list));
10886                 mdest_res->num_of_dest = num_of_dest;
10887                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
10888                                                          dev_flow, error))
10889                         return rte_flow_error_set(error, EINVAL,
10890                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10891                                                   NULL, "can't create sample "
10892                                                   "action");
10893         } else {
10894                 res->sub_actions = sample_actions;
10895                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
10896                         return rte_flow_error_set(error, EINVAL,
10897                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10898                                                   NULL,
10899                                                   "can't create sample action");
10900         }
10901         return 0;
10902 }
10903
10904 /**
10905  * Remove an ASO age action from age actions list.
10906  *
10907  * @param[in] dev
10908  *   Pointer to the Ethernet device structure.
10909  * @param[in] age
10910  *   Pointer to the aso age action handler.
10911  */
10912 static void
10913 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
10914                                 struct mlx5_aso_age_action *age)
10915 {
10916         struct mlx5_age_info *age_info;
10917         struct mlx5_age_param *age_param = &age->age_params;
10918         struct mlx5_priv *priv = dev->data->dev_private;
10919         uint16_t expected = AGE_CANDIDATE;
10920
10921         age_info = GET_PORT_AGE_INFO(priv);
10922         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
10923                                          AGE_FREE, false, __ATOMIC_RELAXED,
10924                                          __ATOMIC_RELAXED)) {
10925                 /**
10926                  * We need the lock even it is age timeout,
10927                  * since age action may still in process.
10928                  */
10929                 rte_spinlock_lock(&age_info->aged_sl);
10930                 LIST_REMOVE(age, next);
10931                 rte_spinlock_unlock(&age_info->aged_sl);
10932                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
10933         }
10934 }
10935
10936 /**
10937  * Release an ASO age action.
10938  *
10939  * @param[in] dev
10940  *   Pointer to the Ethernet device structure.
10941  * @param[in] age_idx
10942  *   Index of ASO age action to release.
10943  * @param[in] flow
10944  *   True if the release operation is during flow destroy operation.
10945  *   False if the release operation is during action destroy operation.
10946  *
10947  * @return
10948  *   0 when age action was removed, otherwise the number of references.
10949  */
10950 static int
10951 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
10952 {
10953         struct mlx5_priv *priv = dev->data->dev_private;
10954         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10955         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
10956         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
10957
10958         if (!ret) {
10959                 flow_dv_aso_age_remove_from_age(dev, age);
10960                 rte_spinlock_lock(&mng->free_sl);
10961                 LIST_INSERT_HEAD(&mng->free, age, next);
10962                 rte_spinlock_unlock(&mng->free_sl);
10963         }
10964         return ret;
10965 }
10966
10967 /**
10968  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
10969  *
10970  * @param[in] dev
10971  *   Pointer to the Ethernet device structure.
10972  *
10973  * @return
10974  *   0 on success, otherwise negative errno value and rte_errno is set.
10975  */
10976 static int
10977 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
10978 {
10979         struct mlx5_priv *priv = dev->data->dev_private;
10980         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10981         void *old_pools = mng->pools;
10982         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
10983         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
10984         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
10985
10986         if (!pools) {
10987                 rte_errno = ENOMEM;
10988                 return -ENOMEM;
10989         }
10990         if (old_pools) {
10991                 memcpy(pools, old_pools,
10992                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
10993                 mlx5_free(old_pools);
10994         } else {
10995                 /* First ASO flow hit allocation - starting ASO data-path. */
10996                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
10997
10998                 if (ret) {
10999                         mlx5_free(pools);
11000                         return ret;
11001                 }
11002         }
11003         mng->n = resize;
11004         mng->pools = pools;
11005         return 0;
11006 }
11007
11008 /**
11009  * Create and initialize a new ASO aging pool.
11010  *
11011  * @param[in] dev
11012  *   Pointer to the Ethernet device structure.
11013  * @param[out] age_free
11014  *   Where to put the pointer of a new age action.
11015  *
11016  * @return
11017  *   The age actions pool pointer and @p age_free is set on success,
11018  *   NULL otherwise and rte_errno is set.
11019  */
11020 static struct mlx5_aso_age_pool *
11021 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11022                         struct mlx5_aso_age_action **age_free)
11023 {
11024         struct mlx5_priv *priv = dev->data->dev_private;
11025         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11026         struct mlx5_aso_age_pool *pool = NULL;
11027         struct mlx5_devx_obj *obj = NULL;
11028         uint32_t i;
11029
11030         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11031                                                     priv->sh->pdn);
11032         if (!obj) {
11033                 rte_errno = ENODATA;
11034                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11035                 return NULL;
11036         }
11037         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11038         if (!pool) {
11039                 claim_zero(mlx5_devx_cmd_destroy(obj));
11040                 rte_errno = ENOMEM;
11041                 return NULL;
11042         }
11043         pool->flow_hit_aso_obj = obj;
11044         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11045         rte_spinlock_lock(&mng->resize_sl);
11046         pool->index = mng->next;
11047         /* Resize pools array if there is no room for the new pool in it. */
11048         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11049                 claim_zero(mlx5_devx_cmd_destroy(obj));
11050                 mlx5_free(pool);
11051                 rte_spinlock_unlock(&mng->resize_sl);
11052                 return NULL;
11053         }
11054         mng->pools[pool->index] = pool;
11055         mng->next++;
11056         rte_spinlock_unlock(&mng->resize_sl);
11057         /* Assign the first action in the new pool, the rest go to free list. */
11058         *age_free = &pool->actions[0];
11059         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11060                 pool->actions[i].offset = i;
11061                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11062         }
11063         return pool;
11064 }
11065
11066 /**
11067  * Allocate a ASO aging bit.
11068  *
11069  * @param[in] dev
11070  *   Pointer to the Ethernet device structure.
11071  * @param[out] error
11072  *   Pointer to the error structure.
11073  *
11074  * @return
11075  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11076  */
11077 static uint32_t
11078 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11079 {
11080         struct mlx5_priv *priv = dev->data->dev_private;
11081         const struct mlx5_aso_age_pool *pool;
11082         struct mlx5_aso_age_action *age_free = NULL;
11083         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11084
11085         MLX5_ASSERT(mng);
11086         /* Try to get the next free age action bit. */
11087         rte_spinlock_lock(&mng->free_sl);
11088         age_free = LIST_FIRST(&mng->free);
11089         if (age_free) {
11090                 LIST_REMOVE(age_free, next);
11091         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11092                 rte_spinlock_unlock(&mng->free_sl);
11093                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11094                                    NULL, "failed to create ASO age pool");
11095                 return 0; /* 0 is an error. */
11096         }
11097         rte_spinlock_unlock(&mng->free_sl);
11098         pool = container_of
11099           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11100                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11101                                                                        actions);
11102         if (!age_free->dr_action) {
11103                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11104                                                  error);
11105
11106                 if (reg_c < 0) {
11107                         rte_flow_error_set(error, rte_errno,
11108                                            RTE_FLOW_ERROR_TYPE_ACTION,
11109                                            NULL, "failed to get reg_c "
11110                                            "for ASO flow hit");
11111                         return 0; /* 0 is an error. */
11112                 }
11113 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11114                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11115                                 (priv->sh->rx_domain,
11116                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11117                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11118                                  (reg_c - REG_C_0));
11119 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11120                 if (!age_free->dr_action) {
11121                         rte_errno = errno;
11122                         rte_spinlock_lock(&mng->free_sl);
11123                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11124                         rte_spinlock_unlock(&mng->free_sl);
11125                         rte_flow_error_set(error, rte_errno,
11126                                            RTE_FLOW_ERROR_TYPE_ACTION,
11127                                            NULL, "failed to create ASO "
11128                                            "flow hit action");
11129                         return 0; /* 0 is an error. */
11130                 }
11131         }
11132         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11133         return pool->index | ((age_free->offset + 1) << 16);
11134 }
11135
11136 /**
11137  * Create a age action using ASO mechanism.
11138  *
11139  * @param[in] dev
11140  *   Pointer to rte_eth_dev structure.
11141  * @param[in] age
11142  *   Pointer to the aging action configuration.
11143  * @param[out] error
11144  *   Pointer to the error structure.
11145  *
11146  * @return
11147  *   Index to flow counter on success, 0 otherwise.
11148  */
11149 static uint32_t
11150 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
11151                                  const struct rte_flow_action_age *age,
11152                                  struct rte_flow_error *error)
11153 {
11154         uint32_t age_idx = 0;
11155         struct mlx5_aso_age_action *aso_age;
11156
11157         age_idx = flow_dv_aso_age_alloc(dev, error);
11158         if (!age_idx)
11159                 return 0;
11160         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11161         aso_age->age_params.context = age->context;
11162         aso_age->age_params.timeout = age->timeout;
11163         aso_age->age_params.port_id = dev->data->port_id;
11164         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11165                          __ATOMIC_RELAXED);
11166         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11167                          __ATOMIC_RELAXED);
11168         return age_idx;
11169 }
11170
11171 /**
11172  * Prepares DV flow counter with aging configuration.
11173  * Gets it by index when exists, creates a new one when doesn't.
11174  *
11175  * @param[in] dev
11176  *   Pointer to rte_eth_dev structure.
11177  * @param[in] dev_flow
11178  *   Pointer to the mlx5_flow.
11179  * @param[in, out] flow
11180  *   Pointer to the sub flow.
11181  * @param[in] count
11182  *   Pointer to the counter action configuration.
11183  * @param[in] age
11184  *   Pointer to the aging action configuration.
11185  * @param[out] error
11186  *   Pointer to the error structure.
11187  *
11188  * @return
11189  *   Pointer to the counter, NULL otherwise.
11190  */
11191 static struct mlx5_flow_counter *
11192 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11193                         struct mlx5_flow *dev_flow,
11194                         struct rte_flow *flow,
11195                         const struct rte_flow_action_count *count,
11196                         const struct rte_flow_action_age *age,
11197                         struct rte_flow_error *error)
11198 {
11199         if (!flow->counter) {
11200                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11201                                                                  count, age);
11202                 if (!flow->counter) {
11203                         rte_flow_error_set(error, rte_errno,
11204                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11205                                            "cannot create counter object.");
11206                         return NULL;
11207                 }
11208         }
11209         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11210 }
11211
11212 /**
11213  * Fill the flow with DV spec, lock free
11214  * (mutex should be acquired by caller).
11215  *
11216  * @param[in] dev
11217  *   Pointer to rte_eth_dev structure.
11218  * @param[in, out] dev_flow
11219  *   Pointer to the sub flow.
11220  * @param[in] attr
11221  *   Pointer to the flow attributes.
11222  * @param[in] items
11223  *   Pointer to the list of items.
11224  * @param[in] actions
11225  *   Pointer to the list of actions.
11226  * @param[out] error
11227  *   Pointer to the error structure.
11228  *
11229  * @return
11230  *   0 on success, a negative errno value otherwise and rte_errno is set.
11231  */
11232 static int
11233 flow_dv_translate(struct rte_eth_dev *dev,
11234                   struct mlx5_flow *dev_flow,
11235                   const struct rte_flow_attr *attr,
11236                   const struct rte_flow_item items[],
11237                   const struct rte_flow_action actions[],
11238                   struct rte_flow_error *error)
11239 {
11240         struct mlx5_priv *priv = dev->data->dev_private;
11241         struct mlx5_dev_config *dev_conf = &priv->config;
11242         struct rte_flow *flow = dev_flow->flow;
11243         struct mlx5_flow_handle *handle = dev_flow->handle;
11244         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11245         struct mlx5_flow_rss_desc *rss_desc;
11246         uint64_t item_flags = 0;
11247         uint64_t last_item = 0;
11248         uint64_t action_flags = 0;
11249         struct mlx5_flow_dv_matcher matcher = {
11250                 .mask = {
11251                         .size = sizeof(matcher.mask.buf) -
11252                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
11253                 },
11254         };
11255         int actions_n = 0;
11256         bool actions_end = false;
11257         union {
11258                 struct mlx5_flow_dv_modify_hdr_resource res;
11259                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
11260                             sizeof(struct mlx5_modification_cmd) *
11261                             (MLX5_MAX_MODIFY_NUM + 1)];
11262         } mhdr_dummy;
11263         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
11264         const struct rte_flow_action_count *count = NULL;
11265         const struct rte_flow_action_age *non_shared_age = NULL;
11266         union flow_dv_attr flow_attr = { .attr = 0 };
11267         uint32_t tag_be;
11268         union mlx5_flow_tbl_key tbl_key;
11269         uint32_t modify_action_position = UINT32_MAX;
11270         void *match_mask = matcher.mask.buf;
11271         void *match_value = dev_flow->dv.value.buf;
11272         uint8_t next_protocol = 0xff;
11273         struct rte_vlan_hdr vlan = { 0 };
11274         struct mlx5_flow_dv_dest_array_resource mdest_res;
11275         struct mlx5_flow_dv_sample_resource sample_res;
11276         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11277         const struct rte_flow_action_sample *sample = NULL;
11278         struct mlx5_flow_sub_actions_list *sample_act;
11279         uint32_t sample_act_pos = UINT32_MAX;
11280         uint32_t age_act_pos = UINT32_MAX;
11281         uint32_t num_of_dest = 0;
11282         int tmp_actions_n = 0;
11283         uint32_t table;
11284         int ret = 0;
11285         const struct mlx5_flow_tunnel *tunnel;
11286         struct flow_grp_info grp_info = {
11287                 .external = !!dev_flow->external,
11288                 .transfer = !!attr->transfer,
11289                 .fdb_def_rule = !!priv->fdb_def_rule,
11290                 .skip_scale = dev_flow->skip_scale &
11291                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
11292         };
11293
11294         if (!wks)
11295                 return rte_flow_error_set(error, ENOMEM,
11296                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11297                                           NULL,
11298                                           "failed to push flow workspace");
11299         rss_desc = &wks->rss_desc;
11300         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
11301         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
11302         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11303                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11304         /* update normal path action resource into last index of array */
11305         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
11306         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
11307                  flow_items_to_tunnel(items) :
11308                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
11309                  flow_actions_to_tunnel(actions) :
11310                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
11311         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11312                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11313         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
11314                                 (dev, tunnel, attr, items, actions);
11315         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
11316                                        &grp_info, error);
11317         if (ret)
11318                 return ret;
11319         dev_flow->dv.group = table;
11320         if (attr->transfer)
11321                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11322         /* number of actions must be set to 0 in case of dirty stack. */
11323         mhdr_res->actions_num = 0;
11324         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
11325                 /*
11326                  * do not add decap action if match rule drops packet
11327                  * HW rejects rules with decap & drop
11328                  *
11329                  * if tunnel match rule was inserted before matching tunnel set
11330                  * rule flow table used in the match rule must be registered.
11331                  * current implementation handles that in the
11332                  * flow_dv_match_register() at the function end.
11333                  */
11334                 bool add_decap = true;
11335                 const struct rte_flow_action *ptr = actions;
11336
11337                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
11338                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
11339                                 add_decap = false;
11340                                 break;
11341                         }
11342                 }
11343                 if (add_decap) {
11344                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11345                                                            attr->transfer,
11346                                                            error))
11347                                 return -rte_errno;
11348                         dev_flow->dv.actions[actions_n++] =
11349                                         dev_flow->dv.encap_decap->action;
11350                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11351                 }
11352         }
11353         for (; !actions_end ; actions++) {
11354                 const struct rte_flow_action_queue *queue;
11355                 const struct rte_flow_action_rss *rss;
11356                 const struct rte_flow_action *action = actions;
11357                 const uint8_t *rss_key;
11358                 struct mlx5_flow_tbl_resource *tbl;
11359                 struct mlx5_aso_age_action *age_act;
11360                 struct mlx5_flow_counter *cnt_act;
11361                 uint32_t port_id = 0;
11362                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
11363                 int action_type = actions->type;
11364                 const struct rte_flow_action *found_action = NULL;
11365                 uint32_t jump_group = 0;
11366
11367                 if (!mlx5_flow_os_action_supported(action_type))
11368                         return rte_flow_error_set(error, ENOTSUP,
11369                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11370                                                   actions,
11371                                                   "action not supported");
11372                 switch (action_type) {
11373                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
11374                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
11375                         break;
11376                 case RTE_FLOW_ACTION_TYPE_VOID:
11377                         break;
11378                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11379                         if (flow_dv_translate_action_port_id(dev, action,
11380                                                              &port_id, error))
11381                                 return -rte_errno;
11382                         port_id_resource.port_id = port_id;
11383                         MLX5_ASSERT(!handle->rix_port_id_action);
11384                         if (flow_dv_port_id_action_resource_register
11385                             (dev, &port_id_resource, dev_flow, error))
11386                                 return -rte_errno;
11387                         dev_flow->dv.actions[actions_n++] =
11388                                         dev_flow->dv.port_id_action->action;
11389                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11390                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
11391                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11392                         num_of_dest++;
11393                         break;
11394                 case RTE_FLOW_ACTION_TYPE_FLAG:
11395                         action_flags |= MLX5_FLOW_ACTION_FLAG;
11396                         dev_flow->handle->mark = 1;
11397                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11398                                 struct rte_flow_action_mark mark = {
11399                                         .id = MLX5_FLOW_MARK_DEFAULT,
11400                                 };
11401
11402                                 if (flow_dv_convert_action_mark(dev, &mark,
11403                                                                 mhdr_res,
11404                                                                 error))
11405                                         return -rte_errno;
11406                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11407                                 break;
11408                         }
11409                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
11410                         /*
11411                          * Only one FLAG or MARK is supported per device flow
11412                          * right now. So the pointer to the tag resource must be
11413                          * zero before the register process.
11414                          */
11415                         MLX5_ASSERT(!handle->dvh.rix_tag);
11416                         if (flow_dv_tag_resource_register(dev, tag_be,
11417                                                           dev_flow, error))
11418                                 return -rte_errno;
11419                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11420                         dev_flow->dv.actions[actions_n++] =
11421                                         dev_flow->dv.tag_resource->action;
11422                         break;
11423                 case RTE_FLOW_ACTION_TYPE_MARK:
11424                         action_flags |= MLX5_FLOW_ACTION_MARK;
11425                         dev_flow->handle->mark = 1;
11426                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11427                                 const struct rte_flow_action_mark *mark =
11428                                         (const struct rte_flow_action_mark *)
11429                                                 actions->conf;
11430
11431                                 if (flow_dv_convert_action_mark(dev, mark,
11432                                                                 mhdr_res,
11433                                                                 error))
11434                                         return -rte_errno;
11435                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11436                                 break;
11437                         }
11438                         /* Fall-through */
11439                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
11440                         /* Legacy (non-extensive) MARK action. */
11441                         tag_be = mlx5_flow_mark_set
11442                               (((const struct rte_flow_action_mark *)
11443                                (actions->conf))->id);
11444                         MLX5_ASSERT(!handle->dvh.rix_tag);
11445                         if (flow_dv_tag_resource_register(dev, tag_be,
11446                                                           dev_flow, error))
11447                                 return -rte_errno;
11448                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11449                         dev_flow->dv.actions[actions_n++] =
11450                                         dev_flow->dv.tag_resource->action;
11451                         break;
11452                 case RTE_FLOW_ACTION_TYPE_SET_META:
11453                         if (flow_dv_convert_action_set_meta
11454                                 (dev, mhdr_res, attr,
11455                                  (const struct rte_flow_action_set_meta *)
11456                                   actions->conf, error))
11457                                 return -rte_errno;
11458                         action_flags |= MLX5_FLOW_ACTION_SET_META;
11459                         break;
11460                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
11461                         if (flow_dv_convert_action_set_tag
11462                                 (dev, mhdr_res,
11463                                  (const struct rte_flow_action_set_tag *)
11464                                   actions->conf, error))
11465                                 return -rte_errno;
11466                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11467                         break;
11468                 case RTE_FLOW_ACTION_TYPE_DROP:
11469                         action_flags |= MLX5_FLOW_ACTION_DROP;
11470                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
11471                         break;
11472                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11473                         queue = actions->conf;
11474                         rss_desc->queue_num = 1;
11475                         rss_desc->queue[0] = queue->index;
11476                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11477                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11478                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
11479                         num_of_dest++;
11480                         break;
11481                 case RTE_FLOW_ACTION_TYPE_RSS:
11482                         rss = actions->conf;
11483                         memcpy(rss_desc->queue, rss->queue,
11484                                rss->queue_num * sizeof(uint16_t));
11485                         rss_desc->queue_num = rss->queue_num;
11486                         /* NULL RSS key indicates default RSS key. */
11487                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11488                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11489                         /*
11490                          * rss->level and rss.types should be set in advance
11491                          * when expanding items for RSS.
11492                          */
11493                         action_flags |= MLX5_FLOW_ACTION_RSS;
11494                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
11495                                 MLX5_FLOW_FATE_SHARED_RSS :
11496                                 MLX5_FLOW_FATE_QUEUE;
11497                         break;
11498                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
11499                         flow->age = (uint32_t)(uintptr_t)(action->conf);
11500                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
11501                         __atomic_fetch_add(&age_act->refcnt, 1,
11502                                            __ATOMIC_RELAXED);
11503                         age_act_pos = actions_n++;
11504                         action_flags |= MLX5_FLOW_ACTION_AGE;
11505                         break;
11506                 case RTE_FLOW_ACTION_TYPE_AGE:
11507                         non_shared_age = action->conf;
11508                         age_act_pos = actions_n++;
11509                         action_flags |= MLX5_FLOW_ACTION_AGE;
11510                         break;
11511                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
11512                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
11513                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
11514                                                              NULL);
11515                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
11516                                            __ATOMIC_RELAXED);
11517                         /* Save information first, will apply later. */
11518                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11519                         break;
11520                 case RTE_FLOW_ACTION_TYPE_COUNT:
11521                         if (!dev_conf->devx) {
11522                                 return rte_flow_error_set
11523                                               (error, ENOTSUP,
11524                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11525                                                NULL,
11526                                                "count action not supported");
11527                         }
11528                         /* Save information first, will apply later. */
11529                         count = action->conf;
11530                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11531                         break;
11532                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
11533                         dev_flow->dv.actions[actions_n++] =
11534                                                 priv->sh->pop_vlan_action;
11535                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
11536                         break;
11537                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
11538                         if (!(action_flags &
11539                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
11540                                 flow_dev_get_vlan_info_from_items(items, &vlan);
11541                         vlan.eth_proto = rte_be_to_cpu_16
11542                              ((((const struct rte_flow_action_of_push_vlan *)
11543                                                    actions->conf)->ethertype));
11544                         found_action = mlx5_flow_find_action
11545                                         (actions + 1,
11546                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
11547                         if (found_action)
11548                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11549                         found_action = mlx5_flow_find_action
11550                                         (actions + 1,
11551                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
11552                         if (found_action)
11553                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11554                         if (flow_dv_create_action_push_vlan
11555                                             (dev, attr, &vlan, dev_flow, error))
11556                                 return -rte_errno;
11557                         dev_flow->dv.actions[actions_n++] =
11558                                         dev_flow->dv.push_vlan_res->action;
11559                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
11560                         break;
11561                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
11562                         /* of_vlan_push action handled this action */
11563                         MLX5_ASSERT(action_flags &
11564                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
11565                         break;
11566                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
11567                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
11568                                 break;
11569                         flow_dev_get_vlan_info_from_items(items, &vlan);
11570                         mlx5_update_vlan_vid_pcp(actions, &vlan);
11571                         /* If no VLAN push - this is a modify header action */
11572                         if (flow_dv_convert_action_modify_vlan_vid
11573                                                 (mhdr_res, actions, error))
11574                                 return -rte_errno;
11575                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
11576                         break;
11577                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11578                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11579                         if (flow_dv_create_action_l2_encap(dev, actions,
11580                                                            dev_flow,
11581                                                            attr->transfer,
11582                                                            error))
11583                                 return -rte_errno;
11584                         dev_flow->dv.actions[actions_n++] =
11585                                         dev_flow->dv.encap_decap->action;
11586                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11587                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11588                                 sample_act->action_flags |=
11589                                                         MLX5_FLOW_ACTION_ENCAP;
11590                         break;
11591                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
11592                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
11593                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11594                                                            attr->transfer,
11595                                                            error))
11596                                 return -rte_errno;
11597                         dev_flow->dv.actions[actions_n++] =
11598                                         dev_flow->dv.encap_decap->action;
11599                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11600                         break;
11601                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11602                         /* Handle encap with preceding decap. */
11603                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
11604                                 if (flow_dv_create_action_raw_encap
11605                                         (dev, actions, dev_flow, attr, error))
11606                                         return -rte_errno;
11607                                 dev_flow->dv.actions[actions_n++] =
11608                                         dev_flow->dv.encap_decap->action;
11609                         } else {
11610                                 /* Handle encap without preceding decap. */
11611                                 if (flow_dv_create_action_l2_encap
11612                                     (dev, actions, dev_flow, attr->transfer,
11613                                      error))
11614                                         return -rte_errno;
11615                                 dev_flow->dv.actions[actions_n++] =
11616                                         dev_flow->dv.encap_decap->action;
11617                         }
11618                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11619                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11620                                 sample_act->action_flags |=
11621                                                         MLX5_FLOW_ACTION_ENCAP;
11622                         break;
11623                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11624                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
11625                                 ;
11626                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11627                                 if (flow_dv_create_action_l2_decap
11628                                     (dev, dev_flow, attr->transfer, error))
11629                                         return -rte_errno;
11630                                 dev_flow->dv.actions[actions_n++] =
11631                                         dev_flow->dv.encap_decap->action;
11632                         }
11633                         /* If decap is followed by encap, handle it at encap. */
11634                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11635                         break;
11636                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
11637                         dev_flow->dv.actions[actions_n++] =
11638                                 (void *)(uintptr_t)action->conf;
11639                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11640                         break;
11641                 case RTE_FLOW_ACTION_TYPE_JUMP:
11642                         jump_group = ((const struct rte_flow_action_jump *)
11643                                                         action->conf)->group;
11644                         grp_info.std_tbl_fix = 0;
11645                         if (dev_flow->skip_scale &
11646                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
11647                                 grp_info.skip_scale = 1;
11648                         else
11649                                 grp_info.skip_scale = 0;
11650                         ret = mlx5_flow_group_to_table(dev, tunnel,
11651                                                        jump_group,
11652                                                        &table,
11653                                                        &grp_info, error);
11654                         if (ret)
11655                                 return ret;
11656                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
11657                                                        attr->transfer,
11658                                                        !!dev_flow->external,
11659                                                        tunnel, jump_group, 0,
11660                                                        0, error);
11661                         if (!tbl)
11662                                 return rte_flow_error_set
11663                                                 (error, errno,
11664                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11665                                                  NULL,
11666                                                  "cannot create jump action.");
11667                         if (flow_dv_jump_tbl_resource_register
11668                             (dev, tbl, dev_flow, error)) {
11669                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11670                                 return rte_flow_error_set
11671                                                 (error, errno,
11672                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11673                                                  NULL,
11674                                                  "cannot create jump action.");
11675                         }
11676                         dev_flow->dv.actions[actions_n++] =
11677                                         dev_flow->dv.jump->action;
11678                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11679                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
11680                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
11681                         num_of_dest++;
11682                         break;
11683                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
11684                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
11685                         if (flow_dv_convert_action_modify_mac
11686                                         (mhdr_res, actions, error))
11687                                 return -rte_errno;
11688                         action_flags |= actions->type ==
11689                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
11690                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
11691                                         MLX5_FLOW_ACTION_SET_MAC_DST;
11692                         break;
11693                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
11694                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
11695                         if (flow_dv_convert_action_modify_ipv4
11696                                         (mhdr_res, actions, error))
11697                                 return -rte_errno;
11698                         action_flags |= actions->type ==
11699                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
11700                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
11701                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
11702                         break;
11703                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
11704                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
11705                         if (flow_dv_convert_action_modify_ipv6
11706                                         (mhdr_res, actions, error))
11707                                 return -rte_errno;
11708                         action_flags |= actions->type ==
11709                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
11710                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
11711                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
11712                         break;
11713                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
11714                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
11715                         if (flow_dv_convert_action_modify_tp
11716                                         (mhdr_res, actions, items,
11717                                          &flow_attr, dev_flow, !!(action_flags &
11718                                          MLX5_FLOW_ACTION_DECAP), error))
11719                                 return -rte_errno;
11720                         action_flags |= actions->type ==
11721                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
11722                                         MLX5_FLOW_ACTION_SET_TP_SRC :
11723                                         MLX5_FLOW_ACTION_SET_TP_DST;
11724                         break;
11725                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
11726                         if (flow_dv_convert_action_modify_dec_ttl
11727                                         (mhdr_res, items, &flow_attr, dev_flow,
11728                                          !!(action_flags &
11729                                          MLX5_FLOW_ACTION_DECAP), error))
11730                                 return -rte_errno;
11731                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
11732                         break;
11733                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
11734                         if (flow_dv_convert_action_modify_ttl
11735                                         (mhdr_res, actions, items, &flow_attr,
11736                                          dev_flow, !!(action_flags &
11737                                          MLX5_FLOW_ACTION_DECAP), error))
11738                                 return -rte_errno;
11739                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
11740                         break;
11741                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
11742                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
11743                         if (flow_dv_convert_action_modify_tcp_seq
11744                                         (mhdr_res, actions, error))
11745                                 return -rte_errno;
11746                         action_flags |= actions->type ==
11747                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
11748                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
11749                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
11750                         break;
11751
11752                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
11753                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
11754                         if (flow_dv_convert_action_modify_tcp_ack
11755                                         (mhdr_res, actions, error))
11756                                 return -rte_errno;
11757                         action_flags |= actions->type ==
11758                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
11759                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
11760                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
11761                         break;
11762                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
11763                         if (flow_dv_convert_action_set_reg
11764                                         (mhdr_res, actions, error))
11765                                 return -rte_errno;
11766                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11767                         break;
11768                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
11769                         if (flow_dv_convert_action_copy_mreg
11770                                         (dev, mhdr_res, actions, error))
11771                                 return -rte_errno;
11772                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11773                         break;
11774                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
11775                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
11776                         dev_flow->handle->fate_action =
11777                                         MLX5_FLOW_FATE_DEFAULT_MISS;
11778                         break;
11779                 case RTE_FLOW_ACTION_TYPE_METER:
11780                         if (!wks->fm)
11781                                 return rte_flow_error_set(error, rte_errno,
11782                                         RTE_FLOW_ERROR_TYPE_ACTION,
11783                                         NULL, "Failed to get meter in flow.");
11784                         /* Set the meter action. */
11785                         dev_flow->dv.actions[actions_n++] =
11786                                 wks->fm->meter_action;
11787                         action_flags |= MLX5_FLOW_ACTION_METER;
11788                         break;
11789                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
11790                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
11791                                                               actions, error))
11792                                 return -rte_errno;
11793                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
11794                         break;
11795                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
11796                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
11797                                                               actions, error))
11798                                 return -rte_errno;
11799                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
11800                         break;
11801                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
11802                         sample_act_pos = actions_n;
11803                         sample = (const struct rte_flow_action_sample *)
11804                                  action->conf;
11805                         actions_n++;
11806                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
11807                         /* put encap action into group if work with port id */
11808                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
11809                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
11810                                 sample_act->action_flags |=
11811                                                         MLX5_FLOW_ACTION_ENCAP;
11812                         break;
11813                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
11814                         if (flow_dv_convert_action_modify_field
11815                                         (dev, mhdr_res, actions, attr, error))
11816                                 return -rte_errno;
11817                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
11818                         break;
11819                 case RTE_FLOW_ACTION_TYPE_END:
11820                         actions_end = true;
11821                         if (mhdr_res->actions_num) {
11822                                 /* create modify action if needed. */
11823                                 if (flow_dv_modify_hdr_resource_register
11824                                         (dev, mhdr_res, dev_flow, error))
11825                                         return -rte_errno;
11826                                 dev_flow->dv.actions[modify_action_position] =
11827                                         handle->dvh.modify_hdr->action;
11828                         }
11829                         /*
11830                          * Handle AGE and COUNT action by single HW counter
11831                          * when they are not shared.
11832                          */
11833                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
11834                                 if ((non_shared_age &&
11835                                      count && !count->shared) ||
11836                                     !(priv->sh->flow_hit_aso_en &&
11837                                       attr->group)) {
11838                                         /* Creates age by counters. */
11839                                         cnt_act = flow_dv_prepare_counter
11840                                                                 (dev, dev_flow,
11841                                                                  flow, count,
11842                                                                  non_shared_age,
11843                                                                  error);
11844                                         if (!cnt_act)
11845                                                 return -rte_errno;
11846                                         dev_flow->dv.actions[age_act_pos] =
11847                                                                 cnt_act->action;
11848                                         break;
11849                                 }
11850                                 if (!flow->age && non_shared_age) {
11851                                         flow->age =
11852                                                 flow_dv_translate_create_aso_age
11853                                                                 (dev,
11854                                                                  non_shared_age,
11855                                                                  error);
11856                                         if (!flow->age)
11857                                                 return rte_flow_error_set
11858                                                     (error, rte_errno,
11859                                                      RTE_FLOW_ERROR_TYPE_ACTION,
11860                                                      NULL,
11861                                                      "can't create ASO age action");
11862                                 }
11863                                 age_act = flow_aso_age_get_by_idx(dev,
11864                                                                   flow->age);
11865                                 dev_flow->dv.actions[age_act_pos] =
11866                                                              age_act->dr_action;
11867                         }
11868                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
11869                                 /*
11870                                  * Create one count action, to be used
11871                                  * by all sub-flows.
11872                                  */
11873                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
11874                                                                   flow, count,
11875                                                                   NULL, error);
11876                                 if (!cnt_act)
11877                                         return -rte_errno;
11878                                 dev_flow->dv.actions[actions_n++] =
11879                                                                 cnt_act->action;
11880                         }
11881                 default:
11882                         break;
11883                 }
11884                 if (mhdr_res->actions_num &&
11885                     modify_action_position == UINT32_MAX)
11886                         modify_action_position = actions_n++;
11887         }
11888         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
11889                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
11890                 int item_type = items->type;
11891
11892                 if (!mlx5_flow_os_item_supported(item_type))
11893                         return rte_flow_error_set(error, ENOTSUP,
11894                                                   RTE_FLOW_ERROR_TYPE_ITEM,
11895                                                   NULL, "item not supported");
11896                 switch (item_type) {
11897                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
11898                         flow_dv_translate_item_port_id
11899                                 (dev, match_mask, match_value, items, attr);
11900                         last_item = MLX5_FLOW_ITEM_PORT_ID;
11901                         break;
11902                 case RTE_FLOW_ITEM_TYPE_ETH:
11903                         flow_dv_translate_item_eth(match_mask, match_value,
11904                                                    items, tunnel,
11905                                                    dev_flow->dv.group);
11906                         matcher.priority = action_flags &
11907                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
11908                                         !dev_flow->external ?
11909                                         MLX5_PRIORITY_MAP_L3 :
11910                                         MLX5_PRIORITY_MAP_L2;
11911                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
11912                                              MLX5_FLOW_LAYER_OUTER_L2;
11913                         break;
11914                 case RTE_FLOW_ITEM_TYPE_VLAN:
11915                         flow_dv_translate_item_vlan(dev_flow,
11916                                                     match_mask, match_value,
11917                                                     items, tunnel,
11918                                                     dev_flow->dv.group);
11919                         matcher.priority = MLX5_PRIORITY_MAP_L2;
11920                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
11921                                               MLX5_FLOW_LAYER_INNER_VLAN) :
11922                                              (MLX5_FLOW_LAYER_OUTER_L2 |
11923                                               MLX5_FLOW_LAYER_OUTER_VLAN);
11924                         break;
11925                 case RTE_FLOW_ITEM_TYPE_IPV4:
11926                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11927                                                   &item_flags, &tunnel);
11928                         flow_dv_translate_item_ipv4(match_mask, match_value,
11929                                                     items, tunnel,
11930                                                     dev_flow->dv.group);
11931                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11932                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
11933                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
11934                         if (items->mask != NULL &&
11935                             ((const struct rte_flow_item_ipv4 *)
11936                              items->mask)->hdr.next_proto_id) {
11937                                 next_protocol =
11938                                         ((const struct rte_flow_item_ipv4 *)
11939                                          (items->spec))->hdr.next_proto_id;
11940                                 next_protocol &=
11941                                         ((const struct rte_flow_item_ipv4 *)
11942                                          (items->mask))->hdr.next_proto_id;
11943                         } else {
11944                                 /* Reset for inner layer. */
11945                                 next_protocol = 0xff;
11946                         }
11947                         break;
11948                 case RTE_FLOW_ITEM_TYPE_IPV6:
11949                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11950                                                   &item_flags, &tunnel);
11951                         flow_dv_translate_item_ipv6(match_mask, match_value,
11952                                                     items, tunnel,
11953                                                     dev_flow->dv.group);
11954                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11955                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
11956                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
11957                         if (items->mask != NULL &&
11958                             ((const struct rte_flow_item_ipv6 *)
11959                              items->mask)->hdr.proto) {
11960                                 next_protocol =
11961                                         ((const struct rte_flow_item_ipv6 *)
11962                                          items->spec)->hdr.proto;
11963                                 next_protocol &=
11964                                         ((const struct rte_flow_item_ipv6 *)
11965                                          items->mask)->hdr.proto;
11966                         } else {
11967                                 /* Reset for inner layer. */
11968                                 next_protocol = 0xff;
11969                         }
11970                         break;
11971                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
11972                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
11973                                                              match_value,
11974                                                              items, tunnel);
11975                         last_item = tunnel ?
11976                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
11977                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
11978                         if (items->mask != NULL &&
11979                             ((const struct rte_flow_item_ipv6_frag_ext *)
11980                              items->mask)->hdr.next_header) {
11981                                 next_protocol =
11982                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11983                                  items->spec)->hdr.next_header;
11984                                 next_protocol &=
11985                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11986                                  items->mask)->hdr.next_header;
11987                         } else {
11988                                 /* Reset for inner layer. */
11989                                 next_protocol = 0xff;
11990                         }
11991                         break;
11992                 case RTE_FLOW_ITEM_TYPE_TCP:
11993                         flow_dv_translate_item_tcp(match_mask, match_value,
11994                                                    items, tunnel);
11995                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11996                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
11997                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
11998                         break;
11999                 case RTE_FLOW_ITEM_TYPE_UDP:
12000                         flow_dv_translate_item_udp(match_mask, match_value,
12001                                                    items, tunnel);
12002                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12003                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12004                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12005                         break;
12006                 case RTE_FLOW_ITEM_TYPE_GRE:
12007                         flow_dv_translate_item_gre(match_mask, match_value,
12008                                                    items, tunnel);
12009                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12010                         last_item = MLX5_FLOW_LAYER_GRE;
12011                         break;
12012                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12013                         flow_dv_translate_item_gre_key(match_mask,
12014                                                        match_value, items);
12015                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12016                         break;
12017                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12018                         flow_dv_translate_item_nvgre(match_mask, match_value,
12019                                                      items, tunnel);
12020                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12021                         last_item = MLX5_FLOW_LAYER_GRE;
12022                         break;
12023                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12024                         flow_dv_translate_item_vxlan(match_mask, match_value,
12025                                                      items, tunnel);
12026                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12027                         last_item = MLX5_FLOW_LAYER_VXLAN;
12028                         break;
12029                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12030                         flow_dv_translate_item_vxlan_gpe(match_mask,
12031                                                          match_value, items,
12032                                                          tunnel);
12033                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12034                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12035                         break;
12036                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12037                         flow_dv_translate_item_geneve(match_mask, match_value,
12038                                                       items, tunnel);
12039                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12040                         last_item = MLX5_FLOW_LAYER_GENEVE;
12041                         break;
12042                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12043                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12044                                                           match_value,
12045                                                           items, error);
12046                         if (ret)
12047                                 return rte_flow_error_set(error, -ret,
12048                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12049                                         "cannot create GENEVE TLV option");
12050                         flow->geneve_tlv_option = 1;
12051                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12052                         break;
12053                 case RTE_FLOW_ITEM_TYPE_MPLS:
12054                         flow_dv_translate_item_mpls(match_mask, match_value,
12055                                                     items, last_item, tunnel);
12056                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12057                         last_item = MLX5_FLOW_LAYER_MPLS;
12058                         break;
12059                 case RTE_FLOW_ITEM_TYPE_MARK:
12060                         flow_dv_translate_item_mark(dev, match_mask,
12061                                                     match_value, items);
12062                         last_item = MLX5_FLOW_ITEM_MARK;
12063                         break;
12064                 case RTE_FLOW_ITEM_TYPE_META:
12065                         flow_dv_translate_item_meta(dev, match_mask,
12066                                                     match_value, attr, items);
12067                         last_item = MLX5_FLOW_ITEM_METADATA;
12068                         break;
12069                 case RTE_FLOW_ITEM_TYPE_ICMP:
12070                         flow_dv_translate_item_icmp(match_mask, match_value,
12071                                                     items, tunnel);
12072                         last_item = MLX5_FLOW_LAYER_ICMP;
12073                         break;
12074                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12075                         flow_dv_translate_item_icmp6(match_mask, match_value,
12076                                                       items, tunnel);
12077                         last_item = MLX5_FLOW_LAYER_ICMP6;
12078                         break;
12079                 case RTE_FLOW_ITEM_TYPE_TAG:
12080                         flow_dv_translate_item_tag(dev, match_mask,
12081                                                    match_value, items);
12082                         last_item = MLX5_FLOW_ITEM_TAG;
12083                         break;
12084                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12085                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12086                                                         match_value, items);
12087                         last_item = MLX5_FLOW_ITEM_TAG;
12088                         break;
12089                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12090                         flow_dv_translate_item_tx_queue(dev, match_mask,
12091                                                         match_value,
12092                                                         items);
12093                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12094                         break;
12095                 case RTE_FLOW_ITEM_TYPE_GTP:
12096                         flow_dv_translate_item_gtp(match_mask, match_value,
12097                                                    items, tunnel);
12098                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12099                         last_item = MLX5_FLOW_LAYER_GTP;
12100                         break;
12101                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12102                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12103                                                           match_value,
12104                                                           items);
12105                         if (ret)
12106                                 return rte_flow_error_set(error, -ret,
12107                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12108                                         "cannot create GTP PSC item");
12109                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12110                         break;
12111                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12112                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12113                                 /* Create it only the first time to be used. */
12114                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12115                                 if (ret)
12116                                         return rte_flow_error_set
12117                                                 (error, -ret,
12118                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12119                                                 NULL,
12120                                                 "cannot create eCPRI parser");
12121                         }
12122                         /* Adjust the length matcher and device flow value. */
12123                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12124                         dev_flow->dv.value.size =
12125                                         MLX5_ST_SZ_BYTES(fte_match_param);
12126                         flow_dv_translate_item_ecpri(dev, match_mask,
12127                                                      match_value, items);
12128                         /* No other protocol should follow eCPRI layer. */
12129                         last_item = MLX5_FLOW_LAYER_ECPRI;
12130                         break;
12131                 default:
12132                         break;
12133                 }
12134                 item_flags |= last_item;
12135         }
12136         /*
12137          * When E-Switch mode is enabled, we have two cases where we need to
12138          * set the source port manually.
12139          * The first one, is in case of Nic steering rule, and the second is
12140          * E-Switch rule where no port_id item was found. In both cases
12141          * the source port is set according the current port in use.
12142          */
12143         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12144             (priv->representor || priv->master)) {
12145                 if (flow_dv_translate_item_port_id(dev, match_mask,
12146                                                    match_value, NULL, attr))
12147                         return -rte_errno;
12148         }
12149 #ifdef RTE_LIBRTE_MLX5_DEBUG
12150         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12151                                               dev_flow->dv.value.buf));
12152 #endif
12153         /*
12154          * Layers may be already initialized from prefix flow if this dev_flow
12155          * is the suffix flow.
12156          */
12157         handle->layers |= item_flags;
12158         if (action_flags & MLX5_FLOW_ACTION_RSS)
12159                 flow_dv_hashfields_set(dev_flow, rss_desc);
12160         /* If has RSS action in the sample action, the Sample/Mirror resource
12161          * should be registered after the hash filed be update.
12162          */
12163         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
12164                 ret = flow_dv_translate_action_sample(dev,
12165                                                       sample,
12166                                                       dev_flow, attr,
12167                                                       &num_of_dest,
12168                                                       sample_actions,
12169                                                       &sample_res,
12170                                                       error);
12171                 if (ret < 0)
12172                         return ret;
12173                 ret = flow_dv_create_action_sample(dev,
12174                                                    dev_flow,
12175                                                    num_of_dest,
12176                                                    &sample_res,
12177                                                    &mdest_res,
12178                                                    sample_actions,
12179                                                    action_flags,
12180                                                    error);
12181                 if (ret < 0)
12182                         return rte_flow_error_set
12183                                                 (error, rte_errno,
12184                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12185                                                 NULL,
12186                                                 "cannot create sample action");
12187                 if (num_of_dest > 1) {
12188                         dev_flow->dv.actions[sample_act_pos] =
12189                         dev_flow->dv.dest_array_res->action;
12190                 } else {
12191                         dev_flow->dv.actions[sample_act_pos] =
12192                         dev_flow->dv.sample_res->verbs_action;
12193                 }
12194         }
12195         /*
12196          * For multiple destination (sample action with ratio=1), the encap
12197          * action and port id action will be combined into group action.
12198          * So need remove the original these actions in the flow and only
12199          * use the sample action instead of.
12200          */
12201         if (num_of_dest > 1 &&
12202             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
12203                 int i;
12204                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12205
12206                 for (i = 0; i < actions_n; i++) {
12207                         if ((sample_act->dr_encap_action &&
12208                                 sample_act->dr_encap_action ==
12209                                 dev_flow->dv.actions[i]) ||
12210                                 (sample_act->dr_port_id_action &&
12211                                 sample_act->dr_port_id_action ==
12212                                 dev_flow->dv.actions[i]) ||
12213                                 (sample_act->dr_jump_action &&
12214                                 sample_act->dr_jump_action ==
12215                                 dev_flow->dv.actions[i]))
12216                                 continue;
12217                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
12218                 }
12219                 memcpy((void *)dev_flow->dv.actions,
12220                                 (void *)temp_actions,
12221                                 tmp_actions_n * sizeof(void *));
12222                 actions_n = tmp_actions_n;
12223         }
12224         dev_flow->dv.actions_n = actions_n;
12225         dev_flow->act_flags = action_flags;
12226         if (wks->skip_matcher_reg)
12227                 return 0;
12228         /* Register matcher. */
12229         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
12230                                     matcher.mask.size);
12231         matcher.priority = mlx5_get_matcher_priority(dev, attr,
12232                                         matcher.priority);
12233         /* reserved field no needs to be set to 0 here. */
12234         tbl_key.is_fdb = attr->transfer;
12235         tbl_key.is_egress = attr->egress;
12236         tbl_key.level = dev_flow->dv.group;
12237         tbl_key.id = dev_flow->dv.table_id;
12238         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
12239                                      tunnel, attr->group, error))
12240                 return -rte_errno;
12241         return 0;
12242 }
12243
12244 /**
12245  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12246  * and tunnel.
12247  *
12248  * @param[in, out] action
12249  *   Shred RSS action holding hash RX queue objects.
12250  * @param[in] hash_fields
12251  *   Defines combination of packet fields to participate in RX hash.
12252  * @param[in] tunnel
12253  *   Tunnel type
12254  * @param[in] hrxq_idx
12255  *   Hash RX queue index to set.
12256  *
12257  * @return
12258  *   0 on success, otherwise negative errno value.
12259  */
12260 static int
12261 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
12262                               const uint64_t hash_fields,
12263                               uint32_t hrxq_idx)
12264 {
12265         uint32_t *hrxqs = action->hrxq;
12266
12267         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12268         case MLX5_RSS_HASH_IPV4:
12269                 /* fall-through. */
12270         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12271                 /* fall-through. */
12272         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12273                 hrxqs[0] = hrxq_idx;
12274                 return 0;
12275         case MLX5_RSS_HASH_IPV4_TCP:
12276                 /* fall-through. */
12277         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12278                 /* fall-through. */
12279         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12280                 hrxqs[1] = hrxq_idx;
12281                 return 0;
12282         case MLX5_RSS_HASH_IPV4_UDP:
12283                 /* fall-through. */
12284         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12285                 /* fall-through. */
12286         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12287                 hrxqs[2] = hrxq_idx;
12288                 return 0;
12289         case MLX5_RSS_HASH_IPV6:
12290                 /* fall-through. */
12291         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12292                 /* fall-through. */
12293         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12294                 hrxqs[3] = hrxq_idx;
12295                 return 0;
12296         case MLX5_RSS_HASH_IPV6_TCP:
12297                 /* fall-through. */
12298         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12299                 /* fall-through. */
12300         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12301                 hrxqs[4] = hrxq_idx;
12302                 return 0;
12303         case MLX5_RSS_HASH_IPV6_UDP:
12304                 /* fall-through. */
12305         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12306                 /* fall-through. */
12307         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12308                 hrxqs[5] = hrxq_idx;
12309                 return 0;
12310         case MLX5_RSS_HASH_NONE:
12311                 hrxqs[6] = hrxq_idx;
12312                 return 0;
12313         default:
12314                 return -1;
12315         }
12316 }
12317
12318 /**
12319  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12320  * and tunnel.
12321  *
12322  * @param[in] dev
12323  *   Pointer to the Ethernet device structure.
12324  * @param[in] idx
12325  *   Shared RSS action ID holding hash RX queue objects.
12326  * @param[in] hash_fields
12327  *   Defines combination of packet fields to participate in RX hash.
12328  * @param[in] tunnel
12329  *   Tunnel type
12330  *
12331  * @return
12332  *   Valid hash RX queue index, otherwise 0.
12333  */
12334 static uint32_t
12335 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
12336                                  const uint64_t hash_fields)
12337 {
12338         struct mlx5_priv *priv = dev->data->dev_private;
12339         struct mlx5_shared_action_rss *shared_rss =
12340             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12341         const uint32_t *hrxqs = shared_rss->hrxq;
12342
12343         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12344         case MLX5_RSS_HASH_IPV4:
12345                 /* fall-through. */
12346         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12347                 /* fall-through. */
12348         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12349                 return hrxqs[0];
12350         case MLX5_RSS_HASH_IPV4_TCP:
12351                 /* fall-through. */
12352         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12353                 /* fall-through. */
12354         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12355                 return hrxqs[1];
12356         case MLX5_RSS_HASH_IPV4_UDP:
12357                 /* fall-through. */
12358         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12359                 /* fall-through. */
12360         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12361                 return hrxqs[2];
12362         case MLX5_RSS_HASH_IPV6:
12363                 /* fall-through. */
12364         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12365                 /* fall-through. */
12366         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12367                 return hrxqs[3];
12368         case MLX5_RSS_HASH_IPV6_TCP:
12369                 /* fall-through. */
12370         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12371                 /* fall-through. */
12372         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12373                 return hrxqs[4];
12374         case MLX5_RSS_HASH_IPV6_UDP:
12375                 /* fall-through. */
12376         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12377                 /* fall-through. */
12378         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12379                 return hrxqs[5];
12380         case MLX5_RSS_HASH_NONE:
12381                 return hrxqs[6];
12382         default:
12383                 return 0;
12384         }
12385
12386 }
12387
12388 /**
12389  * Apply the flow to the NIC, lock free,
12390  * (mutex should be acquired by caller).
12391  *
12392  * @param[in] dev
12393  *   Pointer to the Ethernet device structure.
12394  * @param[in, out] flow
12395  *   Pointer to flow structure.
12396  * @param[out] error
12397  *   Pointer to error structure.
12398  *
12399  * @return
12400  *   0 on success, a negative errno value otherwise and rte_errno is set.
12401  */
12402 static int
12403 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
12404               struct rte_flow_error *error)
12405 {
12406         struct mlx5_flow_dv_workspace *dv;
12407         struct mlx5_flow_handle *dh;
12408         struct mlx5_flow_handle_dv *dv_h;
12409         struct mlx5_flow *dev_flow;
12410         struct mlx5_priv *priv = dev->data->dev_private;
12411         uint32_t handle_idx;
12412         int n;
12413         int err;
12414         int idx;
12415         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12416         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
12417
12418         MLX5_ASSERT(wks);
12419         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
12420                 dev_flow = &wks->flows[idx];
12421                 dv = &dev_flow->dv;
12422                 dh = dev_flow->handle;
12423                 dv_h = &dh->dvh;
12424                 n = dv->actions_n;
12425                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
12426                         if (dv->transfer) {
12427                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12428                                 dv->actions[n++] = priv->sh->dr_drop_action;
12429                         } else {
12430 #ifdef HAVE_MLX5DV_DR
12431                                 /* DR supports drop action placeholder. */
12432                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12433                                 dv->actions[n++] = priv->sh->dr_drop_action;
12434 #else
12435                                 /* For DV we use the explicit drop queue. */
12436                                 MLX5_ASSERT(priv->drop_queue.hrxq);
12437                                 dv->actions[n++] =
12438                                                 priv->drop_queue.hrxq->action;
12439 #endif
12440                         }
12441                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
12442                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
12443                         struct mlx5_hrxq *hrxq;
12444                         uint32_t hrxq_idx;
12445
12446                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
12447                                                     &hrxq_idx);
12448                         if (!hrxq) {
12449                                 rte_flow_error_set
12450                                         (error, rte_errno,
12451                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12452                                          "cannot get hash queue");
12453                                 goto error;
12454                         }
12455                         dh->rix_hrxq = hrxq_idx;
12456                         dv->actions[n++] = hrxq->action;
12457                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12458                         struct mlx5_hrxq *hrxq = NULL;
12459                         uint32_t hrxq_idx;
12460
12461                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
12462                                                 rss_desc->shared_rss,
12463                                                 dev_flow->hash_fields);
12464                         if (hrxq_idx)
12465                                 hrxq = mlx5_ipool_get
12466                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
12467                                          hrxq_idx);
12468                         if (!hrxq) {
12469                                 rte_flow_error_set
12470                                         (error, rte_errno,
12471                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12472                                          "cannot get hash queue");
12473                                 goto error;
12474                         }
12475                         dh->rix_srss = rss_desc->shared_rss;
12476                         dv->actions[n++] = hrxq->action;
12477                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
12478                         if (!priv->sh->default_miss_action) {
12479                                 rte_flow_error_set
12480                                         (error, rte_errno,
12481                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12482                                          "default miss action not be created.");
12483                                 goto error;
12484                         }
12485                         dv->actions[n++] = priv->sh->default_miss_action;
12486                 }
12487                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
12488                                                (void *)&dv->value, n,
12489                                                dv->actions, &dh->drv_flow);
12490                 if (err) {
12491                         rte_flow_error_set(error, errno,
12492                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12493                                            NULL,
12494                                            "hardware refuses to create flow");
12495                         goto error;
12496                 }
12497                 if (priv->vmwa_context &&
12498                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
12499                         /*
12500                          * The rule contains the VLAN pattern.
12501                          * For VF we are going to create VLAN
12502                          * interface to make hypervisor set correct
12503                          * e-Switch vport context.
12504                          */
12505                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
12506                 }
12507         }
12508         return 0;
12509 error:
12510         err = rte_errno; /* Save rte_errno before cleanup. */
12511         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
12512                        handle_idx, dh, next) {
12513                 /* hrxq is union, don't clear it if the flag is not set. */
12514                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
12515                         mlx5_hrxq_release(dev, dh->rix_hrxq);
12516                         dh->rix_hrxq = 0;
12517                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12518                         dh->rix_srss = 0;
12519                 }
12520                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12521                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12522         }
12523         rte_errno = err; /* Restore rte_errno. */
12524         return -rte_errno;
12525 }
12526
12527 void
12528 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
12529                           struct mlx5_cache_entry *entry)
12530 {
12531         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
12532                                                           entry);
12533
12534         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
12535         mlx5_free(cache);
12536 }
12537
12538 /**
12539  * Release the flow matcher.
12540  *
12541  * @param dev
12542  *   Pointer to Ethernet device.
12543  * @param port_id
12544  *   Index to port ID action resource.
12545  *
12546  * @return
12547  *   1 while a reference on it exists, 0 when freed.
12548  */
12549 static int
12550 flow_dv_matcher_release(struct rte_eth_dev *dev,
12551                         struct mlx5_flow_handle *handle)
12552 {
12553         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
12554         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
12555                                                             typeof(*tbl), tbl);
12556         int ret;
12557
12558         MLX5_ASSERT(matcher->matcher_object);
12559         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
12560         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
12561         return ret;
12562 }
12563
12564 /**
12565  * Release encap_decap resource.
12566  *
12567  * @param list
12568  *   Pointer to the hash list.
12569  * @param entry
12570  *   Pointer to exist resource entry object.
12571  */
12572 void
12573 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
12574                               struct mlx5_hlist_entry *entry)
12575 {
12576         struct mlx5_dev_ctx_shared *sh = list->ctx;
12577         struct mlx5_flow_dv_encap_decap_resource *res =
12578                 container_of(entry, typeof(*res), entry);
12579
12580         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12581         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
12582 }
12583
12584 /**
12585  * Release an encap/decap resource.
12586  *
12587  * @param dev
12588  *   Pointer to Ethernet device.
12589  * @param encap_decap_idx
12590  *   Index of encap decap resource.
12591  *
12592  * @return
12593  *   1 while a reference on it exists, 0 when freed.
12594  */
12595 static int
12596 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
12597                                      uint32_t encap_decap_idx)
12598 {
12599         struct mlx5_priv *priv = dev->data->dev_private;
12600         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
12601
12602         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
12603                                         encap_decap_idx);
12604         if (!cache_resource)
12605                 return 0;
12606         MLX5_ASSERT(cache_resource->action);
12607         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
12608                                      &cache_resource->entry);
12609 }
12610
12611 /**
12612  * Release an jump to table action resource.
12613  *
12614  * @param dev
12615  *   Pointer to Ethernet device.
12616  * @param rix_jump
12617  *   Index to the jump action resource.
12618  *
12619  * @return
12620  *   1 while a reference on it exists, 0 when freed.
12621  */
12622 static int
12623 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
12624                                   uint32_t rix_jump)
12625 {
12626         struct mlx5_priv *priv = dev->data->dev_private;
12627         struct mlx5_flow_tbl_data_entry *tbl_data;
12628
12629         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
12630                                   rix_jump);
12631         if (!tbl_data)
12632                 return 0;
12633         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
12634 }
12635
12636 void
12637 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
12638                          struct mlx5_hlist_entry *entry)
12639 {
12640         struct mlx5_flow_dv_modify_hdr_resource *res =
12641                 container_of(entry, typeof(*res), entry);
12642
12643         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12644         mlx5_free(entry);
12645 }
12646
12647 /**
12648  * Release a modify-header resource.
12649  *
12650  * @param dev
12651  *   Pointer to Ethernet device.
12652  * @param handle
12653  *   Pointer to mlx5_flow_handle.
12654  *
12655  * @return
12656  *   1 while a reference on it exists, 0 when freed.
12657  */
12658 static int
12659 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
12660                                     struct mlx5_flow_handle *handle)
12661 {
12662         struct mlx5_priv *priv = dev->data->dev_private;
12663         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
12664
12665         MLX5_ASSERT(entry->action);
12666         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
12667 }
12668
12669 void
12670 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
12671                           struct mlx5_cache_entry *entry)
12672 {
12673         struct mlx5_dev_ctx_shared *sh = list->ctx;
12674         struct mlx5_flow_dv_port_id_action_resource *cache =
12675                         container_of(entry, typeof(*cache), entry);
12676
12677         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12678         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
12679 }
12680
12681 /**
12682  * Release port ID action resource.
12683  *
12684  * @param dev
12685  *   Pointer to Ethernet device.
12686  * @param handle
12687  *   Pointer to mlx5_flow_handle.
12688  *
12689  * @return
12690  *   1 while a reference on it exists, 0 when freed.
12691  */
12692 static int
12693 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
12694                                         uint32_t port_id)
12695 {
12696         struct mlx5_priv *priv = dev->data->dev_private;
12697         struct mlx5_flow_dv_port_id_action_resource *cache;
12698
12699         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
12700         if (!cache)
12701                 return 0;
12702         MLX5_ASSERT(cache->action);
12703         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
12704                                      &cache->entry);
12705 }
12706
12707 /**
12708  * Release shared RSS action resource.
12709  *
12710  * @param dev
12711  *   Pointer to Ethernet device.
12712  * @param srss
12713  *   Shared RSS action index.
12714  */
12715 static void
12716 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
12717 {
12718         struct mlx5_priv *priv = dev->data->dev_private;
12719         struct mlx5_shared_action_rss *shared_rss;
12720
12721         shared_rss = mlx5_ipool_get
12722                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
12723         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12724 }
12725
12726 void
12727 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
12728                             struct mlx5_cache_entry *entry)
12729 {
12730         struct mlx5_dev_ctx_shared *sh = list->ctx;
12731         struct mlx5_flow_dv_push_vlan_action_resource *cache =
12732                         container_of(entry, typeof(*cache), entry);
12733
12734         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12735         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
12736 }
12737
12738 /**
12739  * Release push vlan action resource.
12740  *
12741  * @param dev
12742  *   Pointer to Ethernet device.
12743  * @param handle
12744  *   Pointer to mlx5_flow_handle.
12745  *
12746  * @return
12747  *   1 while a reference on it exists, 0 when freed.
12748  */
12749 static int
12750 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
12751                                           struct mlx5_flow_handle *handle)
12752 {
12753         struct mlx5_priv *priv = dev->data->dev_private;
12754         struct mlx5_flow_dv_push_vlan_action_resource *cache;
12755         uint32_t idx = handle->dvh.rix_push_vlan;
12756
12757         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
12758         if (!cache)
12759                 return 0;
12760         MLX5_ASSERT(cache->action);
12761         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
12762                                      &cache->entry);
12763 }
12764
12765 /**
12766  * Release the fate resource.
12767  *
12768  * @param dev
12769  *   Pointer to Ethernet device.
12770  * @param handle
12771  *   Pointer to mlx5_flow_handle.
12772  */
12773 static void
12774 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
12775                                struct mlx5_flow_handle *handle)
12776 {
12777         if (!handle->rix_fate)
12778                 return;
12779         switch (handle->fate_action) {
12780         case MLX5_FLOW_FATE_QUEUE:
12781                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
12782                         mlx5_hrxq_release(dev, handle->rix_hrxq);
12783                 break;
12784         case MLX5_FLOW_FATE_JUMP:
12785                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
12786                 break;
12787         case MLX5_FLOW_FATE_PORT_ID:
12788                 flow_dv_port_id_action_resource_release(dev,
12789                                 handle->rix_port_id_action);
12790                 break;
12791         default:
12792                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
12793                 break;
12794         }
12795         handle->rix_fate = 0;
12796 }
12797
12798 void
12799 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
12800                          struct mlx5_cache_entry *entry)
12801 {
12802         struct mlx5_flow_dv_sample_resource *cache_resource =
12803                         container_of(entry, typeof(*cache_resource), entry);
12804         struct rte_eth_dev *dev = cache_resource->dev;
12805         struct mlx5_priv *priv = dev->data->dev_private;
12806
12807         if (cache_resource->verbs_action)
12808                 claim_zero(mlx5_flow_os_destroy_flow_action
12809                                 (cache_resource->verbs_action));
12810         if (cache_resource->normal_path_tbl)
12811                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12812                         cache_resource->normal_path_tbl);
12813         flow_dv_sample_sub_actions_release(dev,
12814                                 &cache_resource->sample_idx);
12815         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12816                         cache_resource->idx);
12817         DRV_LOG(DEBUG, "sample resource %p: removed",
12818                 (void *)cache_resource);
12819 }
12820
12821 /**
12822  * Release an sample resource.
12823  *
12824  * @param dev
12825  *   Pointer to Ethernet device.
12826  * @param handle
12827  *   Pointer to mlx5_flow_handle.
12828  *
12829  * @return
12830  *   1 while a reference on it exists, 0 when freed.
12831  */
12832 static int
12833 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
12834                                      struct mlx5_flow_handle *handle)
12835 {
12836         struct mlx5_priv *priv = dev->data->dev_private;
12837         struct mlx5_flow_dv_sample_resource *cache_resource;
12838
12839         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12840                          handle->dvh.rix_sample);
12841         if (!cache_resource)
12842                 return 0;
12843         MLX5_ASSERT(cache_resource->verbs_action);
12844         return mlx5_cache_unregister(&priv->sh->sample_action_list,
12845                                      &cache_resource->entry);
12846 }
12847
12848 void
12849 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
12850                              struct mlx5_cache_entry *entry)
12851 {
12852         struct mlx5_flow_dv_dest_array_resource *cache_resource =
12853                         container_of(entry, typeof(*cache_resource), entry);
12854         struct rte_eth_dev *dev = cache_resource->dev;
12855         struct mlx5_priv *priv = dev->data->dev_private;
12856         uint32_t i = 0;
12857
12858         MLX5_ASSERT(cache_resource->action);
12859         if (cache_resource->action)
12860                 claim_zero(mlx5_flow_os_destroy_flow_action
12861                                         (cache_resource->action));
12862         for (; i < cache_resource->num_of_dest; i++)
12863                 flow_dv_sample_sub_actions_release(dev,
12864                                 &cache_resource->sample_idx[i]);
12865         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12866                         cache_resource->idx);
12867         DRV_LOG(DEBUG, "destination array resource %p: removed",
12868                 (void *)cache_resource);
12869 }
12870
12871 /**
12872  * Release an destination array resource.
12873  *
12874  * @param dev
12875  *   Pointer to Ethernet device.
12876  * @param handle
12877  *   Pointer to mlx5_flow_handle.
12878  *
12879  * @return
12880  *   1 while a reference on it exists, 0 when freed.
12881  */
12882 static int
12883 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
12884                                     struct mlx5_flow_handle *handle)
12885 {
12886         struct mlx5_priv *priv = dev->data->dev_private;
12887         struct mlx5_flow_dv_dest_array_resource *cache;
12888
12889         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12890                                handle->dvh.rix_dest_array);
12891         if (!cache)
12892                 return 0;
12893         MLX5_ASSERT(cache->action);
12894         return mlx5_cache_unregister(&priv->sh->dest_array_list,
12895                                      &cache->entry);
12896 }
12897
12898 static void
12899 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
12900 {
12901         struct mlx5_priv *priv = dev->data->dev_private;
12902         struct mlx5_dev_ctx_shared *sh = priv->sh;
12903         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
12904                                 sh->geneve_tlv_option_resource;
12905         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
12906         if (geneve_opt_resource) {
12907                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
12908                                          __ATOMIC_RELAXED))) {
12909                         claim_zero(mlx5_devx_cmd_destroy
12910                                         (geneve_opt_resource->obj));
12911                         mlx5_free(sh->geneve_tlv_option_resource);
12912                         sh->geneve_tlv_option_resource = NULL;
12913                 }
12914         }
12915         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
12916 }
12917
12918 /**
12919  * Remove the flow from the NIC but keeps it in memory.
12920  * Lock free, (mutex should be acquired by caller).
12921  *
12922  * @param[in] dev
12923  *   Pointer to Ethernet device.
12924  * @param[in, out] flow
12925  *   Pointer to flow structure.
12926  */
12927 static void
12928 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12929 {
12930         struct mlx5_flow_handle *dh;
12931         uint32_t handle_idx;
12932         struct mlx5_priv *priv = dev->data->dev_private;
12933
12934         if (!flow)
12935                 return;
12936         handle_idx = flow->dev_handles;
12937         while (handle_idx) {
12938                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12939                                     handle_idx);
12940                 if (!dh)
12941                         return;
12942                 if (dh->drv_flow) {
12943                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
12944                         dh->drv_flow = NULL;
12945                 }
12946                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
12947                         flow_dv_fate_resource_release(dev, dh);
12948                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12949                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12950                 handle_idx = dh->next.next;
12951         }
12952 }
12953
12954 /**
12955  * Remove the flow from the NIC and the memory.
12956  * Lock free, (mutex should be acquired by caller).
12957  *
12958  * @param[in] dev
12959  *   Pointer to the Ethernet device structure.
12960  * @param[in, out] flow
12961  *   Pointer to flow structure.
12962  */
12963 static void
12964 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12965 {
12966         struct mlx5_flow_handle *dev_handle;
12967         struct mlx5_priv *priv = dev->data->dev_private;
12968         struct mlx5_flow_meter_info *fm = NULL;
12969         uint32_t srss = 0;
12970
12971         if (!flow)
12972                 return;
12973         flow_dv_remove(dev, flow);
12974         if (flow->counter) {
12975                 flow_dv_counter_free(dev, flow->counter);
12976                 flow->counter = 0;
12977         }
12978         if (flow->meter) {
12979                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
12980                 if (fm)
12981                         mlx5_flow_meter_detach(priv, fm);
12982                 flow->meter = 0;
12983         }
12984         if (flow->age)
12985                 flow_dv_aso_age_release(dev, flow->age);
12986         if (flow->geneve_tlv_option) {
12987                 flow_dv_geneve_tlv_option_resource_release(dev);
12988                 flow->geneve_tlv_option = 0;
12989         }
12990         while (flow->dev_handles) {
12991                 uint32_t tmp_idx = flow->dev_handles;
12992
12993                 dev_handle = mlx5_ipool_get(priv->sh->ipool
12994                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
12995                 if (!dev_handle)
12996                         return;
12997                 flow->dev_handles = dev_handle->next.next;
12998                 if (dev_handle->dvh.matcher)
12999                         flow_dv_matcher_release(dev, dev_handle);
13000                 if (dev_handle->dvh.rix_sample)
13001                         flow_dv_sample_resource_release(dev, dev_handle);
13002                 if (dev_handle->dvh.rix_dest_array)
13003                         flow_dv_dest_array_resource_release(dev, dev_handle);
13004                 if (dev_handle->dvh.rix_encap_decap)
13005                         flow_dv_encap_decap_resource_release(dev,
13006                                 dev_handle->dvh.rix_encap_decap);
13007                 if (dev_handle->dvh.modify_hdr)
13008                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13009                 if (dev_handle->dvh.rix_push_vlan)
13010                         flow_dv_push_vlan_action_resource_release(dev,
13011                                                                   dev_handle);
13012                 if (dev_handle->dvh.rix_tag)
13013                         flow_dv_tag_release(dev,
13014                                             dev_handle->dvh.rix_tag);
13015                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13016                         flow_dv_fate_resource_release(dev, dev_handle);
13017                 else if (!srss)
13018                         srss = dev_handle->rix_srss;
13019                 if (fm && dev_handle->is_meter_flow_id &&
13020                     dev_handle->split_flow_id)
13021                         mlx5_ipool_free(fm->flow_ipool,
13022                                         dev_handle->split_flow_id);
13023                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13024                            tmp_idx);
13025         }
13026         if (srss)
13027                 flow_dv_shared_rss_action_release(dev, srss);
13028 }
13029
13030 /**
13031  * Release array of hash RX queue objects.
13032  * Helper function.
13033  *
13034  * @param[in] dev
13035  *   Pointer to the Ethernet device structure.
13036  * @param[in, out] hrxqs
13037  *   Array of hash RX queue objects.
13038  *
13039  * @return
13040  *   Total number of references to hash RX queue objects in *hrxqs* array
13041  *   after this operation.
13042  */
13043 static int
13044 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13045                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13046 {
13047         size_t i;
13048         int remaining = 0;
13049
13050         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13051                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13052
13053                 if (!ret)
13054                         (*hrxqs)[i] = 0;
13055                 remaining += ret;
13056         }
13057         return remaining;
13058 }
13059
13060 /**
13061  * Release all hash RX queue objects representing shared RSS action.
13062  *
13063  * @param[in] dev
13064  *   Pointer to the Ethernet device structure.
13065  * @param[in, out] action
13066  *   Shared RSS action to remove hash RX queue objects from.
13067  *
13068  * @return
13069  *   Total number of references to hash RX queue objects stored in *action*
13070  *   after this operation.
13071  *   Expected to be 0 if no external references held.
13072  */
13073 static int
13074 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13075                                  struct mlx5_shared_action_rss *shared_rss)
13076 {
13077         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13078 }
13079
13080 /**
13081  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13082  * user input.
13083  *
13084  * Only one hash value is available for one L3+L4 combination:
13085  * for example:
13086  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13087  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13088  * same slot in mlx5_rss_hash_fields.
13089  *
13090  * @param[in] rss
13091  *   Pointer to the shared action RSS conf.
13092  * @param[in, out] hash_field
13093  *   hash_field variable needed to be adjusted.
13094  *
13095  * @return
13096  *   void
13097  */
13098 static void
13099 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13100                                      uint64_t *hash_field)
13101 {
13102         uint64_t rss_types = rss->origin.types;
13103
13104         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13105         case MLX5_RSS_HASH_IPV4:
13106                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13107                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13108                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13109                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13110                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13111                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13112                         else
13113                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13114                 }
13115                 return;
13116         case MLX5_RSS_HASH_IPV6:
13117                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13118                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13119                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13120                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13121                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13122                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13123                         else
13124                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13125                 }
13126                 return;
13127         case MLX5_RSS_HASH_IPV4_UDP:
13128                 /* fall-through. */
13129         case MLX5_RSS_HASH_IPV6_UDP:
13130                 if (rss_types & ETH_RSS_UDP) {
13131                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13132                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13133                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13134                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13135                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13136                         else
13137                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13138                 }
13139                 return;
13140         case MLX5_RSS_HASH_IPV4_TCP:
13141                 /* fall-through. */
13142         case MLX5_RSS_HASH_IPV6_TCP:
13143                 if (rss_types & ETH_RSS_TCP) {
13144                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13145                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13146                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13147                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13148                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13149                         else
13150                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13151                 }
13152                 return;
13153         default:
13154                 return;
13155         }
13156 }
13157
13158 /**
13159  * Setup shared RSS action.
13160  * Prepare set of hash RX queue objects sufficient to handle all valid
13161  * hash_fields combinations (see enum ibv_rx_hash_fields).
13162  *
13163  * @param[in] dev
13164  *   Pointer to the Ethernet device structure.
13165  * @param[in] action_idx
13166  *   Shared RSS action ipool index.
13167  * @param[in, out] action
13168  *   Partially initialized shared RSS action.
13169  * @param[out] error
13170  *   Perform verbose error reporting if not NULL. Initialized in case of
13171  *   error only.
13172  *
13173  * @return
13174  *   0 on success, otherwise negative errno value.
13175  */
13176 static int
13177 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
13178                            uint32_t action_idx,
13179                            struct mlx5_shared_action_rss *shared_rss,
13180                            struct rte_flow_error *error)
13181 {
13182         struct mlx5_flow_rss_desc rss_desc = { 0 };
13183         size_t i;
13184         int err;
13185
13186         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
13187                 return rte_flow_error_set(error, rte_errno,
13188                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13189                                           "cannot setup indirection table");
13190         }
13191         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
13192         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
13193         rss_desc.const_q = shared_rss->origin.queue;
13194         rss_desc.queue_num = shared_rss->origin.queue_num;
13195         /* Set non-zero value to indicate a shared RSS. */
13196         rss_desc.shared_rss = action_idx;
13197         rss_desc.ind_tbl = shared_rss->ind_tbl;
13198         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
13199                 uint32_t hrxq_idx;
13200                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
13201                 int tunnel = 0;
13202
13203                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
13204                 if (shared_rss->origin.level > 1) {
13205                         hash_fields |= IBV_RX_HASH_INNER;
13206                         tunnel = 1;
13207                 }
13208                 rss_desc.tunnel = tunnel;
13209                 rss_desc.hash_fields = hash_fields;
13210                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
13211                 if (!hrxq_idx) {
13212                         rte_flow_error_set
13213                                 (error, rte_errno,
13214                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13215                                  "cannot get hash queue");
13216                         goto error_hrxq_new;
13217                 }
13218                 err = __flow_dv_action_rss_hrxq_set
13219                         (shared_rss, hash_fields, hrxq_idx);
13220                 MLX5_ASSERT(!err);
13221         }
13222         return 0;
13223 error_hrxq_new:
13224         err = rte_errno;
13225         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13226         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
13227                 shared_rss->ind_tbl = NULL;
13228         rte_errno = err;
13229         return -rte_errno;
13230 }
13231
13232 /**
13233  * Create shared RSS action.
13234  *
13235  * @param[in] dev
13236  *   Pointer to the Ethernet device structure.
13237  * @param[in] conf
13238  *   Shared action configuration.
13239  * @param[in] rss
13240  *   RSS action specification used to create shared action.
13241  * @param[out] error
13242  *   Perform verbose error reporting if not NULL. Initialized in case of
13243  *   error only.
13244  *
13245  * @return
13246  *   A valid shared action ID in case of success, 0 otherwise and
13247  *   rte_errno is set.
13248  */
13249 static uint32_t
13250 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
13251                             const struct rte_flow_indir_action_conf *conf,
13252                             const struct rte_flow_action_rss *rss,
13253                             struct rte_flow_error *error)
13254 {
13255         struct mlx5_priv *priv = dev->data->dev_private;
13256         struct mlx5_shared_action_rss *shared_rss = NULL;
13257         void *queue = NULL;
13258         struct rte_flow_action_rss *origin;
13259         const uint8_t *rss_key;
13260         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
13261         uint32_t idx;
13262
13263         RTE_SET_USED(conf);
13264         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13265                             0, SOCKET_ID_ANY);
13266         shared_rss = mlx5_ipool_zmalloc
13267                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
13268         if (!shared_rss || !queue) {
13269                 rte_flow_error_set(error, ENOMEM,
13270                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13271                                    "cannot allocate resource memory");
13272                 goto error_rss_init;
13273         }
13274         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
13275                 rte_flow_error_set(error, E2BIG,
13276                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13277                                    "rss action number out of range");
13278                 goto error_rss_init;
13279         }
13280         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
13281                                           sizeof(*shared_rss->ind_tbl),
13282                                           0, SOCKET_ID_ANY);
13283         if (!shared_rss->ind_tbl) {
13284                 rte_flow_error_set(error, ENOMEM,
13285                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13286                                    "cannot allocate resource memory");
13287                 goto error_rss_init;
13288         }
13289         memcpy(queue, rss->queue, queue_size);
13290         shared_rss->ind_tbl->queues = queue;
13291         shared_rss->ind_tbl->queues_n = rss->queue_num;
13292         origin = &shared_rss->origin;
13293         origin->func = rss->func;
13294         origin->level = rss->level;
13295         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
13296         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
13297         /* NULL RSS key indicates default RSS key. */
13298         rss_key = !rss->key ? rss_hash_default_key : rss->key;
13299         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
13300         origin->key = &shared_rss->key[0];
13301         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
13302         origin->queue = queue;
13303         origin->queue_num = rss->queue_num;
13304         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
13305                 goto error_rss_init;
13306         rte_spinlock_init(&shared_rss->action_rss_sl);
13307         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13308         rte_spinlock_lock(&priv->shared_act_sl);
13309         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13310                      &priv->rss_shared_actions, idx, shared_rss, next);
13311         rte_spinlock_unlock(&priv->shared_act_sl);
13312         return idx;
13313 error_rss_init:
13314         if (shared_rss) {
13315                 if (shared_rss->ind_tbl)
13316                         mlx5_free(shared_rss->ind_tbl);
13317                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13318                                 idx);
13319         }
13320         if (queue)
13321                 mlx5_free(queue);
13322         return 0;
13323 }
13324
13325 /**
13326  * Destroy the shared RSS action.
13327  * Release related hash RX queue objects.
13328  *
13329  * @param[in] dev
13330  *   Pointer to the Ethernet device structure.
13331  * @param[in] idx
13332  *   The shared RSS action object ID to be removed.
13333  * @param[out] error
13334  *   Perform verbose error reporting if not NULL. Initialized in case of
13335  *   error only.
13336  *
13337  * @return
13338  *   0 on success, otherwise negative errno value.
13339  */
13340 static int
13341 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
13342                              struct rte_flow_error *error)
13343 {
13344         struct mlx5_priv *priv = dev->data->dev_private;
13345         struct mlx5_shared_action_rss *shared_rss =
13346             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13347         uint32_t old_refcnt = 1;
13348         int remaining;
13349         uint16_t *queue = NULL;
13350
13351         if (!shared_rss)
13352                 return rte_flow_error_set(error, EINVAL,
13353                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13354                                           "invalid shared action");
13355         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13356         if (remaining)
13357                 return rte_flow_error_set(error, EBUSY,
13358                                           RTE_FLOW_ERROR_TYPE_ACTION,
13359                                           NULL,
13360                                           "shared rss hrxq has references");
13361         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
13362                                          0, 0, __ATOMIC_ACQUIRE,
13363                                          __ATOMIC_RELAXED))
13364                 return rte_flow_error_set(error, EBUSY,
13365                                           RTE_FLOW_ERROR_TYPE_ACTION,
13366                                           NULL,
13367                                           "shared rss has references");
13368         queue = shared_rss->ind_tbl->queues;
13369         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
13370         if (remaining)
13371                 return rte_flow_error_set(error, EBUSY,
13372                                           RTE_FLOW_ERROR_TYPE_ACTION,
13373                                           NULL,
13374                                           "shared rss indirection table has"
13375                                           " references");
13376         mlx5_free(queue);
13377         rte_spinlock_lock(&priv->shared_act_sl);
13378         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13379                      &priv->rss_shared_actions, idx, shared_rss, next);
13380         rte_spinlock_unlock(&priv->shared_act_sl);
13381         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13382                         idx);
13383         return 0;
13384 }
13385
13386 /**
13387  * Create indirect action, lock free,
13388  * (mutex should be acquired by caller).
13389  * Dispatcher for action type specific call.
13390  *
13391  * @param[in] dev
13392  *   Pointer to the Ethernet device structure.
13393  * @param[in] conf
13394  *   Shared action configuration.
13395  * @param[in] action
13396  *   Action specification used to create indirect action.
13397  * @param[out] error
13398  *   Perform verbose error reporting if not NULL. Initialized in case of
13399  *   error only.
13400  *
13401  * @return
13402  *   A valid shared action handle in case of success, NULL otherwise and
13403  *   rte_errno is set.
13404  */
13405 static struct rte_flow_action_handle *
13406 flow_dv_action_create(struct rte_eth_dev *dev,
13407                       const struct rte_flow_indir_action_conf *conf,
13408                       const struct rte_flow_action *action,
13409                       struct rte_flow_error *err)
13410 {
13411         uint32_t idx = 0;
13412         uint32_t ret = 0;
13413
13414         switch (action->type) {
13415         case RTE_FLOW_ACTION_TYPE_RSS:
13416                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
13417                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
13418                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13419                 break;
13420         case RTE_FLOW_ACTION_TYPE_AGE:
13421                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
13422                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
13423                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13424                 if (ret) {
13425                         struct mlx5_aso_age_action *aso_age =
13426                                               flow_aso_age_get_by_idx(dev, ret);
13427
13428                         if (!aso_age->age_params.context)
13429                                 aso_age->age_params.context =
13430                                                          (void *)(uintptr_t)idx;
13431                 }
13432                 break;
13433         case RTE_FLOW_ACTION_TYPE_COUNT:
13434                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
13435                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
13436                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13437                 break;
13438         default:
13439                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
13440                                    NULL, "action type not supported");
13441                 break;
13442         }
13443         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
13444 }
13445
13446 /**
13447  * Destroy the indirect action.
13448  * Release action related resources on the NIC and the memory.
13449  * Lock free, (mutex should be acquired by caller).
13450  * Dispatcher for action type specific call.
13451  *
13452  * @param[in] dev
13453  *   Pointer to the Ethernet device structure.
13454  * @param[in] handle
13455  *   The indirect action object handle to be removed.
13456  * @param[out] error
13457  *   Perform verbose error reporting if not NULL. Initialized in case of
13458  *   error only.
13459  *
13460  * @return
13461  *   0 on success, otherwise negative errno value.
13462  */
13463 static int
13464 flow_dv_action_destroy(struct rte_eth_dev *dev,
13465                        struct rte_flow_action_handle *handle,
13466                        struct rte_flow_error *error)
13467 {
13468         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13469         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13470         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13471         struct mlx5_flow_counter *cnt;
13472         uint32_t no_flow_refcnt = 1;
13473         int ret;
13474
13475         switch (type) {
13476         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13477                 return __flow_dv_action_rss_release(dev, idx, error);
13478         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13479                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
13480                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
13481                                                  &no_flow_refcnt, 1, false,
13482                                                  __ATOMIC_ACQUIRE,
13483                                                  __ATOMIC_RELAXED))
13484                         return rte_flow_error_set(error, EBUSY,
13485                                                   RTE_FLOW_ERROR_TYPE_ACTION,
13486                                                   NULL,
13487                                                   "Indirect count action has references");
13488                 flow_dv_counter_free(dev, idx);
13489                 return 0;
13490         case MLX5_INDIRECT_ACTION_TYPE_AGE:
13491                 ret = flow_dv_aso_age_release(dev, idx);
13492                 if (ret)
13493                         /*
13494                          * In this case, the last flow has a reference will
13495                          * actually release the age action.
13496                          */
13497                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
13498                                 " released with references %d.", idx, ret);
13499                 return 0;
13500         default:
13501                 return rte_flow_error_set(error, ENOTSUP,
13502                                           RTE_FLOW_ERROR_TYPE_ACTION,
13503                                           NULL,
13504                                           "action type not supported");
13505         }
13506 }
13507
13508 /**
13509  * Updates in place shared RSS action configuration.
13510  *
13511  * @param[in] dev
13512  *   Pointer to the Ethernet device structure.
13513  * @param[in] idx
13514  *   The shared RSS action object ID to be updated.
13515  * @param[in] action_conf
13516  *   RSS action specification used to modify *shared_rss*.
13517  * @param[out] error
13518  *   Perform verbose error reporting if not NULL. Initialized in case of
13519  *   error only.
13520  *
13521  * @return
13522  *   0 on success, otherwise negative errno value.
13523  * @note: currently only support update of RSS queues.
13524  */
13525 static int
13526 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
13527                             const struct rte_flow_action_rss *action_conf,
13528                             struct rte_flow_error *error)
13529 {
13530         struct mlx5_priv *priv = dev->data->dev_private;
13531         struct mlx5_shared_action_rss *shared_rss =
13532             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13533         int ret = 0;
13534         void *queue = NULL;
13535         uint16_t *queue_old = NULL;
13536         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
13537
13538         if (!shared_rss)
13539                 return rte_flow_error_set(error, EINVAL,
13540                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13541                                           "invalid shared action to update");
13542         if (priv->obj_ops.ind_table_modify == NULL)
13543                 return rte_flow_error_set(error, ENOTSUP,
13544                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13545                                           "cannot modify indirection table");
13546         queue = mlx5_malloc(MLX5_MEM_ZERO,
13547                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13548                             0, SOCKET_ID_ANY);
13549         if (!queue)
13550                 return rte_flow_error_set(error, ENOMEM,
13551                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13552                                           NULL,
13553                                           "cannot allocate resource memory");
13554         memcpy(queue, action_conf->queue, queue_size);
13555         MLX5_ASSERT(shared_rss->ind_tbl);
13556         rte_spinlock_lock(&shared_rss->action_rss_sl);
13557         queue_old = shared_rss->ind_tbl->queues;
13558         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
13559                                         queue, action_conf->queue_num, true);
13560         if (ret) {
13561                 mlx5_free(queue);
13562                 ret = rte_flow_error_set(error, rte_errno,
13563                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13564                                           "cannot update indirection table");
13565         } else {
13566                 mlx5_free(queue_old);
13567                 shared_rss->origin.queue = queue;
13568                 shared_rss->origin.queue_num = action_conf->queue_num;
13569         }
13570         rte_spinlock_unlock(&shared_rss->action_rss_sl);
13571         return ret;
13572 }
13573
13574 /**
13575  * Updates in place shared action configuration, lock free,
13576  * (mutex should be acquired by caller).
13577  *
13578  * @param[in] dev
13579  *   Pointer to the Ethernet device structure.
13580  * @param[in] handle
13581  *   The indirect action object handle to be updated.
13582  * @param[in] update
13583  *   Action specification used to modify the action pointed by *handle*.
13584  *   *update* could be of same type with the action pointed by the *handle*
13585  *   handle argument, or some other structures like a wrapper, depending on
13586  *   the indirect action type.
13587  * @param[out] error
13588  *   Perform verbose error reporting if not NULL. Initialized in case of
13589  *   error only.
13590  *
13591  * @return
13592  *   0 on success, otherwise negative errno value.
13593  */
13594 static int
13595 flow_dv_action_update(struct rte_eth_dev *dev,
13596                         struct rte_flow_action_handle *handle,
13597                         const void *update,
13598                         struct rte_flow_error *err)
13599 {
13600         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13601         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13602         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13603         const void *action_conf;
13604
13605         switch (type) {
13606         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13607                 action_conf = ((const struct rte_flow_action *)update)->conf;
13608                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
13609         default:
13610                 return rte_flow_error_set(err, ENOTSUP,
13611                                           RTE_FLOW_ERROR_TYPE_ACTION,
13612                                           NULL,
13613                                           "action type update not supported");
13614         }
13615 }
13616
13617 /**
13618  * Destroy the meter sub policy table rules.
13619  * Lock free, (mutex should be acquired by caller).
13620  *
13621  * @param[in] dev
13622  *   Pointer to Ethernet device.
13623  * @param[in] sub_policy
13624  *   Pointer to meter sub policy table.
13625  */
13626 static void
13627 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
13628                              struct mlx5_flow_meter_sub_policy *sub_policy)
13629 {
13630         struct mlx5_flow_tbl_data_entry *tbl;
13631         int i;
13632
13633         for (i = 0; i < RTE_COLORS; i++) {
13634                 if (sub_policy->color_rule[i]) {
13635                         claim_zero(mlx5_flow_os_destroy_flow
13636                                 (sub_policy->color_rule[i]));
13637                         sub_policy->color_rule[i] = NULL;
13638                 }
13639                 if (sub_policy->color_matcher[i]) {
13640                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
13641                                 typeof(*tbl), tbl);
13642                         mlx5_cache_unregister(&tbl->matchers,
13643                                       &sub_policy->color_matcher[i]->entry);
13644                         sub_policy->color_matcher[i] = NULL;
13645                 }
13646         }
13647         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13648                 if (sub_policy->rix_hrxq[i]) {
13649                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
13650                         sub_policy->rix_hrxq[i] = 0;
13651                 }
13652                 if (sub_policy->jump_tbl[i]) {
13653                         flow_dv_tbl_resource_release(MLX5_SH(dev),
13654                         sub_policy->jump_tbl[i]);
13655                         sub_policy->jump_tbl[i] = NULL;
13656                 }
13657         }
13658         if (sub_policy->tbl_rsc) {
13659                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13660                         sub_policy->tbl_rsc);
13661                 sub_policy->tbl_rsc = NULL;
13662         }
13663 }
13664
13665 /**
13666  * Destroy policy rules, lock free,
13667  * (mutex should be acquired by caller).
13668  * Dispatcher for action type specific call.
13669  *
13670  * @param[in] dev
13671  *   Pointer to the Ethernet device structure.
13672  * @param[in] mtr_policy
13673  *   Meter policy struct.
13674  */
13675 static void
13676 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
13677                       struct mlx5_flow_meter_policy *mtr_policy)
13678 {
13679         uint32_t i, j;
13680         struct mlx5_flow_meter_sub_policy *sub_policy;
13681         uint16_t sub_policy_num;
13682
13683         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
13684                 sub_policy_num = (mtr_policy->sub_policy_num >>
13685                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
13686                         MLX5_MTR_SUB_POLICY_NUM_MASK;
13687                 for (j = 0; j < sub_policy_num; j++) {
13688                         sub_policy = mtr_policy->sub_policys[i][j];
13689                         if (sub_policy)
13690                                 __flow_dv_destroy_sub_policy_rules
13691                                                 (dev, sub_policy);
13692                 }
13693         }
13694 }
13695
13696 /**
13697  * Destroy policy action, lock free,
13698  * (mutex should be acquired by caller).
13699  * Dispatcher for action type specific call.
13700  *
13701  * @param[in] dev
13702  *   Pointer to the Ethernet device structure.
13703  * @param[in] mtr_policy
13704  *   Meter policy struct.
13705  */
13706 static void
13707 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
13708                       struct mlx5_flow_meter_policy *mtr_policy)
13709 {
13710         struct rte_flow_action *rss_action;
13711         struct mlx5_flow_handle dev_handle;
13712         uint32_t i, j;
13713
13714         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13715                 if (mtr_policy->act_cnt[i].rix_mark) {
13716                         flow_dv_tag_release(dev,
13717                                 mtr_policy->act_cnt[i].rix_mark);
13718                         mtr_policy->act_cnt[i].rix_mark = 0;
13719                 }
13720                 if (mtr_policy->act_cnt[i].modify_hdr) {
13721                         dev_handle.dvh.modify_hdr =
13722                                 mtr_policy->act_cnt[i].modify_hdr;
13723                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
13724                 }
13725                 switch (mtr_policy->act_cnt[i].fate_action) {
13726                 case MLX5_FLOW_FATE_SHARED_RSS:
13727                         rss_action = mtr_policy->act_cnt[i].rss;
13728                         mlx5_free(rss_action);
13729                         break;
13730                 case MLX5_FLOW_FATE_PORT_ID:
13731                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
13732                                 flow_dv_port_id_action_resource_release(dev,
13733                                 mtr_policy->act_cnt[i].rix_port_id_action);
13734                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
13735                         }
13736                         break;
13737                 case MLX5_FLOW_FATE_DROP:
13738                 case MLX5_FLOW_FATE_JUMP:
13739                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13740                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
13741                                                 NULL;
13742                         break;
13743                 default:
13744                         /*Queue action do nothing*/
13745                         break;
13746                 }
13747         }
13748         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13749                 mtr_policy->dr_drop_action[j] = NULL;
13750 }
13751
13752 /**
13753  * Create policy action per domain, lock free,
13754  * (mutex should be acquired by caller).
13755  * Dispatcher for action type specific call.
13756  *
13757  * @param[in] dev
13758  *   Pointer to the Ethernet device structure.
13759  * @param[in] mtr_policy
13760  *   Meter policy struct.
13761  * @param[in] action
13762  *   Action specification used to create meter actions.
13763  * @param[out] error
13764  *   Perform verbose error reporting if not NULL. Initialized in case of
13765  *   error only.
13766  *
13767  * @return
13768  *   0 on success, otherwise negative errno value.
13769  */
13770 static int
13771 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
13772                         struct mlx5_flow_meter_policy *mtr_policy,
13773                         const struct rte_flow_action *actions[RTE_COLORS],
13774                         enum mlx5_meter_domain domain,
13775                         struct rte_mtr_error *error)
13776 {
13777         struct mlx5_priv *priv = dev->data->dev_private;
13778         struct rte_flow_error flow_err;
13779         const struct rte_flow_action *act;
13780         uint64_t action_flags = 0;
13781         struct mlx5_flow_handle dh;
13782         struct mlx5_flow dev_flow;
13783         struct mlx5_flow_dv_port_id_action_resource port_id_action;
13784         int i, ret;
13785         uint8_t egress, transfer;
13786         struct mlx5_meter_policy_action_container *act_cnt = NULL;
13787         union {
13788                 struct mlx5_flow_dv_modify_hdr_resource res;
13789                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13790                             sizeof(struct mlx5_modification_cmd) *
13791                             (MLX5_MAX_MODIFY_NUM + 1)];
13792         } mhdr_dummy;
13793
13794         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
13795         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
13796         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
13797         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
13798         memset(&port_id_action, 0,
13799                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
13800         dev_flow.handle = &dh;
13801         dev_flow.dv.port_id_action = &port_id_action;
13802         dev_flow.external = true;
13803         for (i = 0; i < RTE_COLORS; i++) {
13804                 if (i < MLX5_MTR_RTE_COLORS)
13805                         act_cnt = &mtr_policy->act_cnt[i];
13806                 for (act = actions[i];
13807                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
13808                         act++) {
13809                         switch (act->type) {
13810                         case RTE_FLOW_ACTION_TYPE_MARK:
13811                         {
13812                                 uint32_t tag_be = mlx5_flow_mark_set
13813                                         (((const struct rte_flow_action_mark *)
13814                                         (act->conf))->id);
13815
13816                                 if (i >= MLX5_MTR_RTE_COLORS)
13817                                         return -rte_mtr_error_set(error,
13818                                           ENOTSUP,
13819                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13820                                           NULL,
13821                                           "cannot create policy "
13822                                           "mark action for this color");
13823                                 dev_flow.handle->mark = 1;
13824                                 if (flow_dv_tag_resource_register(dev, tag_be,
13825                                                   &dev_flow, &flow_err))
13826                                         return -rte_mtr_error_set(error,
13827                                         ENOTSUP,
13828                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13829                                         NULL,
13830                                         "cannot setup policy mark action");
13831                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
13832                                 act_cnt->rix_mark =
13833                                         dev_flow.handle->dvh.rix_tag;
13834                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13835                                         dev_flow.handle->rix_hrxq =
13836                         mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13837                                         flow_drv_rxq_flags_set(dev,
13838                                                 dev_flow.handle);
13839                                 }
13840                                 action_flags |= MLX5_FLOW_ACTION_MARK;
13841                                 break;
13842                         }
13843                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
13844                         {
13845                                 struct mlx5_flow_dv_modify_hdr_resource
13846                                         *mhdr_res = &mhdr_dummy.res;
13847
13848                                 if (i >= MLX5_MTR_RTE_COLORS)
13849                                         return -rte_mtr_error_set(error,
13850                                           ENOTSUP,
13851                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13852                                           NULL,
13853                                           "cannot create policy "
13854                                           "set tag action for this color");
13855                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
13856                                 mhdr_res->ft_type = transfer ?
13857                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
13858                                         egress ?
13859                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13860                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13861                                 if (flow_dv_convert_action_set_tag
13862                                 (dev, mhdr_res,
13863                                 (const struct rte_flow_action_set_tag *)
13864                                 act->conf,  &flow_err))
13865                                         return -rte_mtr_error_set(error,
13866                                         ENOTSUP,
13867                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13868                                         NULL, "cannot convert policy "
13869                                         "set tag action");
13870                                 if (!mhdr_res->actions_num)
13871                                         return -rte_mtr_error_set(error,
13872                                         ENOTSUP,
13873                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13874                                         NULL, "cannot find policy "
13875                                         "set tag action");
13876                                 /* create modify action if needed. */
13877                                 dev_flow.dv.group = 1;
13878                                 if (flow_dv_modify_hdr_resource_register
13879                                         (dev, mhdr_res, &dev_flow, &flow_err))
13880                                         return -rte_mtr_error_set(error,
13881                                         ENOTSUP,
13882                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13883                                         NULL, "cannot register policy "
13884                                         "set tag action");
13885                                 act_cnt->modify_hdr =
13886                                 dev_flow.handle->dvh.modify_hdr;
13887                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13888                                         dev_flow.handle->rix_hrxq =
13889                                 mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13890                                         flow_drv_rxq_flags_set(dev,
13891                                                 dev_flow.handle);
13892                                 }
13893                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13894                                 break;
13895                         }
13896                         case RTE_FLOW_ACTION_TYPE_DROP:
13897                         {
13898                                 struct mlx5_flow_mtr_mng *mtrmng =
13899                                                 priv->sh->mtrmng;
13900                                 struct mlx5_flow_tbl_data_entry *tbl_data;
13901
13902                                 /*
13903                                  * Create the drop table with
13904                                  * METER DROP level.
13905                                  */
13906                                 if (!mtrmng->drop_tbl[domain]) {
13907                                         mtrmng->drop_tbl[domain] =
13908                                         flow_dv_tbl_resource_get(dev,
13909                                         MLX5_FLOW_TABLE_LEVEL_METER,
13910                                         egress, transfer, false, NULL, 0,
13911                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
13912                                         if (!mtrmng->drop_tbl[domain])
13913                                                 return -rte_mtr_error_set
13914                                         (error, ENOTSUP,
13915                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13916                                         NULL,
13917                                         "Failed to create meter drop table");
13918                                 }
13919                                 tbl_data = container_of
13920                                 (mtrmng->drop_tbl[domain],
13921                                 struct mlx5_flow_tbl_data_entry, tbl);
13922                                 if (i < MLX5_MTR_RTE_COLORS) {
13923                                         act_cnt->dr_jump_action[domain] =
13924                                                 tbl_data->jump.action;
13925                                         act_cnt->fate_action =
13926                                                 MLX5_FLOW_FATE_DROP;
13927                                 }
13928                                 if (i == RTE_COLOR_RED)
13929                                         mtr_policy->dr_drop_action[domain] =
13930                                                 tbl_data->jump.action;
13931                                 action_flags |= MLX5_FLOW_ACTION_DROP;
13932                                 break;
13933                         }
13934                         case RTE_FLOW_ACTION_TYPE_QUEUE:
13935                         {
13936                                 struct mlx5_hrxq *hrxq;
13937                                 uint32_t hrxq_idx;
13938                                 struct mlx5_flow_rss_desc rss_desc;
13939                                 struct mlx5_flow_meter_sub_policy *sub_policy =
13940                                 mtr_policy->sub_policys[domain][0];
13941
13942                                 if (i >= MLX5_MTR_RTE_COLORS)
13943                                         return -rte_mtr_error_set(error,
13944                                         ENOTSUP,
13945                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13946                                         NULL, "cannot create policy "
13947                                         "fate queue for this color");
13948                                 memset(&rss_desc, 0,
13949                                         sizeof(struct mlx5_flow_rss_desc));
13950                                 rss_desc.queue_num = 1;
13951                                 rss_desc.const_q = act->conf;
13952                                 hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
13953                                                     &rss_desc, &hrxq_idx);
13954                                 if (!hrxq)
13955                                         return -rte_mtr_error_set(error,
13956                                         ENOTSUP,
13957                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13958                                         NULL,
13959                                         "cannot create policy fate queue");
13960                                 sub_policy->rix_hrxq[i] = hrxq_idx;
13961                                 act_cnt->fate_action =
13962                                         MLX5_FLOW_FATE_QUEUE;
13963                                 dev_flow.handle->fate_action =
13964                                         MLX5_FLOW_FATE_QUEUE;
13965                                 if (action_flags & MLX5_FLOW_ACTION_MARK ||
13966                                     action_flags & MLX5_FLOW_ACTION_SET_TAG) {
13967                                         dev_flow.handle->rix_hrxq = hrxq_idx;
13968                                         flow_drv_rxq_flags_set(dev,
13969                                                 dev_flow.handle);
13970                                 }
13971                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
13972                                 break;
13973                         }
13974                         case RTE_FLOW_ACTION_TYPE_RSS:
13975                         {
13976                                 int rss_size;
13977
13978                                 if (i >= MLX5_MTR_RTE_COLORS)
13979                                         return -rte_mtr_error_set(error,
13980                                           ENOTSUP,
13981                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13982                                           NULL,
13983                                           "cannot create policy "
13984                                           "rss action for this color");
13985                                 /*
13986                                  * Save RSS conf into policy struct
13987                                  * for translate stage.
13988                                  */
13989                                 rss_size = (int)rte_flow_conv
13990                                         (RTE_FLOW_CONV_OP_ACTION,
13991                                         NULL, 0, act, &flow_err);
13992                                 if (rss_size <= 0)
13993                                         return -rte_mtr_error_set(error,
13994                                           ENOTSUP,
13995                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13996                                           NULL, "Get the wrong "
13997                                           "rss action struct size");
13998                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
13999                                                 rss_size, 0, SOCKET_ID_ANY);
14000                                 if (!act_cnt->rss)
14001                                         return -rte_mtr_error_set(error,
14002                                           ENOTSUP,
14003                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14004                                           NULL,
14005                                           "Fail to malloc rss action memory");
14006                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14007                                         act_cnt->rss, rss_size,
14008                                         act, &flow_err);
14009                                 if (ret < 0)
14010                                         return -rte_mtr_error_set(error,
14011                                           ENOTSUP,
14012                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14013                                           NULL, "Fail to save "
14014                                           "rss action into policy struct");
14015                                 act_cnt->fate_action =
14016                                         MLX5_FLOW_FATE_SHARED_RSS;
14017                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14018                                 break;
14019                         }
14020                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14021                         {
14022                                 struct mlx5_flow_dv_port_id_action_resource
14023                                         port_id_resource;
14024                                 uint32_t port_id = 0;
14025
14026                                 if (i >= MLX5_MTR_RTE_COLORS)
14027                                         return -rte_mtr_error_set(error,
14028                                         ENOTSUP,
14029                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14030                                         NULL, "cannot create policy "
14031                                         "port action for this color");
14032                                 memset(&port_id_resource, 0,
14033                                         sizeof(port_id_resource));
14034                                 if (flow_dv_translate_action_port_id(dev, act,
14035                                                 &port_id, &flow_err))
14036                                         return -rte_mtr_error_set(error,
14037                                         ENOTSUP,
14038                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14039                                         NULL, "cannot translate "
14040                                         "policy port action");
14041                                 port_id_resource.port_id = port_id;
14042                                 if (flow_dv_port_id_action_resource_register
14043                                         (dev, &port_id_resource,
14044                                         &dev_flow, &flow_err))
14045                                         return -rte_mtr_error_set(error,
14046                                         ENOTSUP,
14047                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14048                                         NULL, "cannot setup "
14049                                         "policy port action");
14050                                 act_cnt->rix_port_id_action =
14051                                         dev_flow.handle->rix_port_id_action;
14052                                 act_cnt->fate_action =
14053                                         MLX5_FLOW_FATE_PORT_ID;
14054                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14055                                 break;
14056                         }
14057                         case RTE_FLOW_ACTION_TYPE_JUMP:
14058                         {
14059                                 uint32_t jump_group = 0;
14060                                 uint32_t table = 0;
14061                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14062                                 struct flow_grp_info grp_info = {
14063                                         .external = !!dev_flow.external,
14064                                         .transfer = !!transfer,
14065                                         .fdb_def_rule = !!priv->fdb_def_rule,
14066                                         .std_tbl_fix = 0,
14067                                         .skip_scale = dev_flow.skip_scale &
14068                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14069                                 };
14070                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14071                                 mtr_policy->sub_policys[domain][0];
14072
14073                                 if (i >= MLX5_MTR_RTE_COLORS)
14074                                         return -rte_mtr_error_set(error,
14075                                           ENOTSUP,
14076                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14077                                           NULL,
14078                                           "cannot create policy "
14079                                           "jump action for this color");
14080                                 jump_group =
14081                                 ((const struct rte_flow_action_jump *)
14082                                                         act->conf)->group;
14083                                 if (mlx5_flow_group_to_table(dev, NULL,
14084                                                        jump_group,
14085                                                        &table,
14086                                                        &grp_info, &flow_err))
14087                                         return -rte_mtr_error_set(error,
14088                                         ENOTSUP,
14089                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14090                                         NULL, "cannot setup "
14091                                         "policy jump action");
14092                                 sub_policy->jump_tbl[i] =
14093                                 flow_dv_tbl_resource_get(dev,
14094                                         table, egress,
14095                                         transfer,
14096                                         !!dev_flow.external,
14097                                         NULL, jump_group, 0,
14098                                         0, &flow_err);
14099                                 if
14100                                 (!sub_policy->jump_tbl[i])
14101                                         return  -rte_mtr_error_set(error,
14102                                         ENOTSUP,
14103                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14104                                         NULL, "cannot create jump action.");
14105                                 tbl_data = container_of
14106                                 (sub_policy->jump_tbl[i],
14107                                 struct mlx5_flow_tbl_data_entry, tbl);
14108                                 act_cnt->dr_jump_action[domain] =
14109                                         tbl_data->jump.action;
14110                                 act_cnt->fate_action =
14111                                         MLX5_FLOW_FATE_JUMP;
14112                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
14113                                 break;
14114                         }
14115                         default:
14116                                 return -rte_mtr_error_set(error, ENOTSUP,
14117                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14118                                           NULL, "action type not supported");
14119                         }
14120                 }
14121         }
14122         return 0;
14123 }
14124
14125 /**
14126  * Create policy action per domain, lock free,
14127  * (mutex should be acquired by caller).
14128  * Dispatcher for action type specific call.
14129  *
14130  * @param[in] dev
14131  *   Pointer to the Ethernet device structure.
14132  * @param[in] mtr_policy
14133  *   Meter policy struct.
14134  * @param[in] action
14135  *   Action specification used to create meter actions.
14136  * @param[out] error
14137  *   Perform verbose error reporting if not NULL. Initialized in case of
14138  *   error only.
14139  *
14140  * @return
14141  *   0 on success, otherwise negative errno value.
14142  */
14143 static int
14144 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
14145                       struct mlx5_flow_meter_policy *mtr_policy,
14146                       const struct rte_flow_action *actions[RTE_COLORS],
14147                       struct rte_mtr_error *error)
14148 {
14149         int ret, i;
14150         uint16_t sub_policy_num;
14151
14152         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14153                 sub_policy_num = (mtr_policy->sub_policy_num >>
14154                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14155                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14156                 if (sub_policy_num) {
14157                         ret = __flow_dv_create_domain_policy_acts(dev,
14158                                 mtr_policy, actions,
14159                                 (enum mlx5_meter_domain)i, error);
14160                         if (ret)
14161                                 return ret;
14162                 }
14163         }
14164         return 0;
14165 }
14166
14167 /**
14168  * Query a DV flow rule for its statistics via DevX.
14169  *
14170  * @param[in] dev
14171  *   Pointer to Ethernet device.
14172  * @param[in] cnt_idx
14173  *   Index to the flow counter.
14174  * @param[out] data
14175  *   Data retrieved by the query.
14176  * @param[out] error
14177  *   Perform verbose error reporting if not NULL.
14178  *
14179  * @return
14180  *   0 on success, a negative errno value otherwise and rte_errno is set.
14181  */
14182 static int
14183 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
14184                     struct rte_flow_error *error)
14185 {
14186         struct mlx5_priv *priv = dev->data->dev_private;
14187         struct rte_flow_query_count *qc = data;
14188
14189         if (!priv->config.devx)
14190                 return rte_flow_error_set(error, ENOTSUP,
14191                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14192                                           NULL,
14193                                           "counters are not supported");
14194         if (cnt_idx) {
14195                 uint64_t pkts, bytes;
14196                 struct mlx5_flow_counter *cnt;
14197                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
14198
14199                 if (err)
14200                         return rte_flow_error_set(error, -err,
14201                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14202                                         NULL, "cannot read counters");
14203                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
14204                 qc->hits_set = 1;
14205                 qc->bytes_set = 1;
14206                 qc->hits = pkts - cnt->hits;
14207                 qc->bytes = bytes - cnt->bytes;
14208                 if (qc->reset) {
14209                         cnt->hits = pkts;
14210                         cnt->bytes = bytes;
14211                 }
14212                 return 0;
14213         }
14214         return rte_flow_error_set(error, EINVAL,
14215                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14216                                   NULL,
14217                                   "counters are not available");
14218 }
14219
14220 static int
14221 flow_dv_action_query(struct rte_eth_dev *dev,
14222                      const struct rte_flow_action_handle *handle, void *data,
14223                      struct rte_flow_error *error)
14224 {
14225         struct mlx5_age_param *age_param;
14226         struct rte_flow_query_age *resp;
14227         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14228         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14229         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14230
14231         switch (type) {
14232         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14233                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
14234                 resp = data;
14235                 resp->aged = __atomic_load_n(&age_param->state,
14236                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
14237                                                                           1 : 0;
14238                 resp->sec_since_last_hit_valid = !resp->aged;
14239                 if (resp->sec_since_last_hit_valid)
14240                         resp->sec_since_last_hit = __atomic_load_n
14241                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
14242                 return 0;
14243         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14244                 return flow_dv_query_count(dev, idx, data, error);
14245         default:
14246                 return rte_flow_error_set(error, ENOTSUP,
14247                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14248                                           "action type query not supported");
14249         }
14250 }
14251
14252 /**
14253  * Query a flow rule AGE action for aging information.
14254  *
14255  * @param[in] dev
14256  *   Pointer to Ethernet device.
14257  * @param[in] flow
14258  *   Pointer to the sub flow.
14259  * @param[out] data
14260  *   data retrieved by the query.
14261  * @param[out] error
14262  *   Perform verbose error reporting if not NULL.
14263  *
14264  * @return
14265  *   0 on success, a negative errno value otherwise and rte_errno is set.
14266  */
14267 static int
14268 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
14269                   void *data, struct rte_flow_error *error)
14270 {
14271         struct rte_flow_query_age *resp = data;
14272         struct mlx5_age_param *age_param;
14273
14274         if (flow->age) {
14275                 struct mlx5_aso_age_action *act =
14276                                      flow_aso_age_get_by_idx(dev, flow->age);
14277
14278                 age_param = &act->age_params;
14279         } else if (flow->counter) {
14280                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
14281
14282                 if (!age_param || !age_param->timeout)
14283                         return rte_flow_error_set
14284                                         (error, EINVAL,
14285                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14286                                          NULL, "cannot read age data");
14287         } else {
14288                 return rte_flow_error_set(error, EINVAL,
14289                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14290                                           NULL, "age data not available");
14291         }
14292         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
14293                                      AGE_TMOUT ? 1 : 0;
14294         resp->sec_since_last_hit_valid = !resp->aged;
14295         if (resp->sec_since_last_hit_valid)
14296                 resp->sec_since_last_hit = __atomic_load_n
14297                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
14298         return 0;
14299 }
14300
14301 /**
14302  * Query a flow.
14303  *
14304  * @see rte_flow_query()
14305  * @see rte_flow_ops
14306  */
14307 static int
14308 flow_dv_query(struct rte_eth_dev *dev,
14309               struct rte_flow *flow __rte_unused,
14310               const struct rte_flow_action *actions __rte_unused,
14311               void *data __rte_unused,
14312               struct rte_flow_error *error __rte_unused)
14313 {
14314         int ret = -EINVAL;
14315
14316         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
14317                 switch (actions->type) {
14318                 case RTE_FLOW_ACTION_TYPE_VOID:
14319                         break;
14320                 case RTE_FLOW_ACTION_TYPE_COUNT:
14321                         ret = flow_dv_query_count(dev, flow->counter, data,
14322                                                   error);
14323                         break;
14324                 case RTE_FLOW_ACTION_TYPE_AGE:
14325                         ret = flow_dv_query_age(dev, flow, data, error);
14326                         break;
14327                 default:
14328                         return rte_flow_error_set(error, ENOTSUP,
14329                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14330                                                   actions,
14331                                                   "action not supported");
14332                 }
14333         }
14334         return ret;
14335 }
14336
14337 /**
14338  * Destroy the meter table set.
14339  * Lock free, (mutex should be acquired by caller).
14340  *
14341  * @param[in] dev
14342  *   Pointer to Ethernet device.
14343  * @param[in] fm
14344  *   Meter information table.
14345  */
14346 static void
14347 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
14348                         struct mlx5_flow_meter_info *fm)
14349 {
14350         struct mlx5_priv *priv = dev->data->dev_private;
14351         int i;
14352
14353         if (!fm || !priv->config.dv_flow_en)
14354                 return;
14355         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14356                 if (fm->drop_rule[i]) {
14357                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
14358                         fm->drop_rule[i] = NULL;
14359                 }
14360         }
14361 }
14362
14363 static void
14364 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
14365 {
14366         struct mlx5_priv *priv = dev->data->dev_private;
14367         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14368         struct mlx5_flow_tbl_data_entry *tbl;
14369         int i, j;
14370
14371         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14372                 if (mtrmng->def_rule[i]) {
14373                         claim_zero(mlx5_flow_os_destroy_flow
14374                                         (mtrmng->def_rule[i]));
14375                         mtrmng->def_rule[i] = NULL;
14376                 }
14377                 if (mtrmng->def_matcher[i]) {
14378                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
14379                                 struct mlx5_flow_tbl_data_entry, tbl);
14380                         mlx5_cache_unregister(&tbl->matchers,
14381                                       &mtrmng->def_matcher[i]->entry);
14382                         mtrmng->def_matcher[i] = NULL;
14383                 }
14384                 for (j = 0; j < MLX5_REG_BITS; j++) {
14385                         if (mtrmng->drop_matcher[i][j]) {
14386                                 tbl =
14387                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
14388                                              struct mlx5_flow_tbl_data_entry,
14389                                              tbl);
14390                                 mlx5_cache_unregister(&tbl->matchers,
14391                                         &mtrmng->drop_matcher[i][j]->entry);
14392                                 mtrmng->drop_matcher[i][j] = NULL;
14393                         }
14394                 }
14395                 if (mtrmng->drop_tbl[i]) {
14396                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14397                                 mtrmng->drop_tbl[i]);
14398                         mtrmng->drop_tbl[i] = NULL;
14399                 }
14400         }
14401 }
14402
14403 /* Number of meter flow actions, count and jump or count and drop. */
14404 #define METER_ACTIONS 2
14405
14406 static void
14407 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
14408                               enum mlx5_meter_domain domain)
14409 {
14410         struct mlx5_priv *priv = dev->data->dev_private;
14411         struct mlx5_flow_meter_def_policy *def_policy =
14412                         priv->sh->mtrmng->def_policy[domain];
14413
14414         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
14415         mlx5_free(def_policy);
14416         priv->sh->mtrmng->def_policy[domain] = NULL;
14417 }
14418
14419 /**
14420  * Destroy the default policy table set.
14421  *
14422  * @param[in] dev
14423  *   Pointer to Ethernet device.
14424  */
14425 static void
14426 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
14427 {
14428         struct mlx5_priv *priv = dev->data->dev_private;
14429         int i;
14430
14431         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
14432                 if (priv->sh->mtrmng->def_policy[i])
14433                         __flow_dv_destroy_domain_def_policy(dev,
14434                                         (enum mlx5_meter_domain)i);
14435         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
14436 }
14437
14438 static int
14439 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
14440                         uint32_t color_reg_c_idx,
14441                         enum rte_color color, void *matcher_object,
14442                         int actions_n, void *actions,
14443                         bool is_default_policy, void **rule,
14444                         const struct rte_flow_attr *attr)
14445 {
14446         int ret;
14447         struct mlx5_flow_dv_match_params value = {
14448                 .size = sizeof(value.buf) -
14449                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14450         };
14451         struct mlx5_flow_dv_match_params matcher = {
14452                 .size = sizeof(matcher.buf) -
14453                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14454         };
14455         struct mlx5_priv *priv = dev->data->dev_private;
14456
14457         if (!is_default_policy && (priv->representor || priv->master)) {
14458                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
14459                                                    value.buf, NULL, attr)) {
14460                         DRV_LOG(ERR,
14461                         "Failed to create meter policy flow with port.");
14462                         return -1;
14463                 }
14464         }
14465         flow_dv_match_meta_reg(matcher.buf, value.buf,
14466                                 (enum modify_reg)color_reg_c_idx,
14467                                 rte_col_2_mlx5_col(color),
14468                                 UINT32_MAX);
14469         ret = mlx5_flow_os_create_flow(matcher_object,
14470                         (void *)&value, actions_n, actions, rule);
14471         if (ret) {
14472                 DRV_LOG(ERR, "Failed to create meter policy flow.");
14473                 return -1;
14474         }
14475         return 0;
14476 }
14477
14478 static int
14479 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
14480                         uint32_t color_reg_c_idx,
14481                         uint16_t priority,
14482                         struct mlx5_flow_meter_sub_policy *sub_policy,
14483                         const struct rte_flow_attr *attr,
14484                         bool is_default_policy,
14485                         struct rte_flow_error *error)
14486 {
14487         struct mlx5_cache_entry *entry;
14488         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
14489         struct mlx5_flow_dv_matcher matcher = {
14490                 .mask = {
14491                         .size = sizeof(matcher.mask.buf) -
14492                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14493                 },
14494                 .tbl = tbl_rsc,
14495         };
14496         struct mlx5_flow_dv_match_params value = {
14497                 .size = sizeof(value.buf) -
14498                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14499         };
14500         struct mlx5_flow_cb_ctx ctx = {
14501                 .error = error,
14502                 .data = &matcher,
14503         };
14504         struct mlx5_flow_tbl_data_entry *tbl_data;
14505         struct mlx5_priv *priv = dev->data->dev_private;
14506         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
14507
14508         if (!is_default_policy && (priv->representor || priv->master)) {
14509                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
14510                                                    value.buf, NULL, attr)) {
14511                         DRV_LOG(ERR,
14512                         "Failed to register meter drop matcher with port.");
14513                         return -1;
14514                 }
14515         }
14516         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
14517         if (priority < RTE_COLOR_RED)
14518                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14519                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
14520         matcher.priority = priority;
14521         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
14522                                         matcher.mask.size);
14523         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14524         if (!entry) {
14525                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
14526                 return -1;
14527         }
14528         sub_policy->color_matcher[priority] =
14529                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
14530         return 0;
14531 }
14532
14533 /**
14534  * Create the policy rules per domain.
14535  *
14536  * @param[in] dev
14537  *   Pointer to Ethernet device.
14538  * @param[in] sub_policy
14539  *    Pointer to sub policy table..
14540  * @param[in] egress
14541  *   Direction of the table.
14542  * @param[in] transfer
14543  *   E-Switch or NIC flow.
14544  * @param[in] acts
14545  *   Pointer to policy action list per color.
14546  *
14547  * @return
14548  *   0 on success, -1 otherwise.
14549  */
14550 static int
14551 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
14552                 struct mlx5_flow_meter_sub_policy *sub_policy,
14553                 uint8_t egress, uint8_t transfer, bool is_default_policy,
14554                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
14555 {
14556         struct rte_flow_error flow_err;
14557         uint32_t color_reg_c_idx;
14558         struct rte_flow_attr attr = {
14559                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
14560                 .priority = 0,
14561                 .ingress = 0,
14562                 .egress = !!egress,
14563                 .transfer = !!transfer,
14564                 .reserved = 0,
14565         };
14566         int i;
14567         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
14568
14569         if (ret < 0)
14570                 return -1;
14571         /* Create policy table with POLICY level. */
14572         if (!sub_policy->tbl_rsc)
14573                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
14574                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
14575                                 egress, transfer, false, NULL, 0, 0,
14576                                 sub_policy->idx, &flow_err);
14577         if (!sub_policy->tbl_rsc) {
14578                 DRV_LOG(ERR,
14579                         "Failed to create meter sub policy table.");
14580                 return -1;
14581         }
14582         /* Prepare matchers. */
14583         color_reg_c_idx = ret;
14584         for (i = 0; i < RTE_COLORS; i++) {
14585                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
14586                         continue;
14587                 attr.priority = i;
14588                 if (!sub_policy->color_matcher[i]) {
14589                         /* Create matchers for Color. */
14590                         if (__flow_dv_create_policy_matcher(dev,
14591                                 color_reg_c_idx, i, sub_policy,
14592                                 &attr, is_default_policy, &flow_err))
14593                                 return -1;
14594                 }
14595                 /* Create flow, matching color. */
14596                 if (acts[i].actions_n)
14597                         if (__flow_dv_create_policy_flow(dev,
14598                                 color_reg_c_idx, (enum rte_color)i,
14599                                 sub_policy->color_matcher[i]->matcher_object,
14600                                 acts[i].actions_n,
14601                                 acts[i].dv_actions,
14602                                 is_default_policy,
14603                                 &sub_policy->color_rule[i],
14604                                 &attr))
14605                                 return -1;
14606         }
14607         return 0;
14608 }
14609
14610 static int
14611 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
14612                         struct mlx5_flow_meter_policy *mtr_policy,
14613                         struct mlx5_flow_meter_sub_policy *sub_policy,
14614                         uint32_t domain)
14615 {
14616         struct mlx5_priv *priv = dev->data->dev_private;
14617         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14618         struct mlx5_flow_dv_tag_resource *tag;
14619         struct mlx5_flow_dv_port_id_action_resource *port_action;
14620         struct mlx5_hrxq *hrxq;
14621         uint8_t egress, transfer;
14622         int i;
14623
14624         for (i = 0; i < RTE_COLORS; i++) {
14625                 acts[i].actions_n = 0;
14626                 if (i == RTE_COLOR_YELLOW)
14627                         continue;
14628                 if (i == RTE_COLOR_RED) {
14629                         /* Only support drop on red. */
14630                         acts[i].dv_actions[0] =
14631                         mtr_policy->dr_drop_action[domain];
14632                         acts[i].actions_n = 1;
14633                         continue;
14634                 }
14635                 if (mtr_policy->act_cnt[i].rix_mark) {
14636                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
14637                                         mtr_policy->act_cnt[i].rix_mark);
14638                         if (!tag) {
14639                                 DRV_LOG(ERR, "Failed to find "
14640                                 "mark action for policy.");
14641                                 return -1;
14642                         }
14643                         acts[i].dv_actions[acts[i].actions_n] =
14644                                                 tag->action;
14645                         acts[i].actions_n++;
14646                 }
14647                 if (mtr_policy->act_cnt[i].modify_hdr) {
14648                         acts[i].dv_actions[acts[i].actions_n] =
14649                         mtr_policy->act_cnt[i].modify_hdr->action;
14650                         acts[i].actions_n++;
14651                 }
14652                 if (mtr_policy->act_cnt[i].fate_action) {
14653                         switch (mtr_policy->act_cnt[i].fate_action) {
14654                         case MLX5_FLOW_FATE_PORT_ID:
14655                                 port_action = mlx5_ipool_get
14656                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
14657                                 mtr_policy->act_cnt[i].rix_port_id_action);
14658                                 if (!port_action) {
14659                                         DRV_LOG(ERR, "Failed to find "
14660                                                 "port action for policy.");
14661                                         return -1;
14662                                 }
14663                                 acts[i].dv_actions[acts[i].actions_n] =
14664                                 port_action->action;
14665                                 acts[i].actions_n++;
14666                                 break;
14667                         case MLX5_FLOW_FATE_DROP:
14668                         case MLX5_FLOW_FATE_JUMP:
14669                                 acts[i].dv_actions[acts[i].actions_n] =
14670                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
14671                                 acts[i].actions_n++;
14672                                 break;
14673                         case MLX5_FLOW_FATE_SHARED_RSS:
14674                         case MLX5_FLOW_FATE_QUEUE:
14675                                 hrxq = mlx5_ipool_get
14676                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
14677                                 sub_policy->rix_hrxq[i]);
14678                                 if (!hrxq) {
14679                                         DRV_LOG(ERR, "Failed to find "
14680                                                 "queue action for policy.");
14681                                         return -1;
14682                                 }
14683                                 acts[i].dv_actions[acts[i].actions_n] =
14684                                 hrxq->action;
14685                                 acts[i].actions_n++;
14686                                 break;
14687                         default:
14688                                 /*Queue action do nothing*/
14689                                 break;
14690                         }
14691                 }
14692         }
14693         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14694         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14695         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
14696                                 egress, transfer, false, acts)) {
14697                 DRV_LOG(ERR,
14698                 "Failed to create policy rules per domain.");
14699                 return -1;
14700         }
14701         return 0;
14702 }
14703
14704 /**
14705  * Create the policy rules.
14706  *
14707  * @param[in] dev
14708  *   Pointer to Ethernet device.
14709  * @param[in,out] mtr_policy
14710  *   Pointer to meter policy table.
14711  *
14712  * @return
14713  *   0 on success, -1 otherwise.
14714  */
14715 static int
14716 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
14717                              struct mlx5_flow_meter_policy *mtr_policy)
14718 {
14719         int i;
14720         uint16_t sub_policy_num;
14721
14722         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14723                 sub_policy_num = (mtr_policy->sub_policy_num >>
14724                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14725                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14726                 if (!sub_policy_num)
14727                         continue;
14728                 /* Prepare actions list and create policy rules. */
14729                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
14730                         mtr_policy->sub_policys[i][0], i)) {
14731                         DRV_LOG(ERR,
14732                         "Failed to create policy action list per domain.");
14733                         return -1;
14734                 }
14735         }
14736         return 0;
14737 }
14738
14739 static int
14740 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
14741 {
14742         struct mlx5_priv *priv = dev->data->dev_private;
14743         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14744         struct mlx5_flow_meter_def_policy *def_policy;
14745         struct mlx5_flow_tbl_resource *jump_tbl;
14746         struct mlx5_flow_tbl_data_entry *tbl_data;
14747         uint8_t egress, transfer;
14748         struct rte_flow_error error;
14749         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14750         int ret;
14751
14752         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14753         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14754         def_policy = mtrmng->def_policy[domain];
14755         if (!def_policy) {
14756                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
14757                         sizeof(struct mlx5_flow_meter_def_policy),
14758                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
14759                 if (!def_policy) {
14760                         DRV_LOG(ERR, "Failed to alloc "
14761                                         "default policy table.");
14762                         goto def_policy_error;
14763                 }
14764                 mtrmng->def_policy[domain] = def_policy;
14765                 /* Create the meter suffix table with SUFFIX level. */
14766                 jump_tbl = flow_dv_tbl_resource_get(dev,
14767                                 MLX5_FLOW_TABLE_LEVEL_METER,
14768                                 egress, transfer, false, NULL, 0,
14769                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
14770                 if (!jump_tbl) {
14771                         DRV_LOG(ERR,
14772                                 "Failed to create meter suffix table.");
14773                         goto def_policy_error;
14774                 }
14775                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
14776                 tbl_data = container_of(jump_tbl,
14777                                 struct mlx5_flow_tbl_data_entry, tbl);
14778                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
14779                                                 tbl_data->jump.action;
14780                 acts[RTE_COLOR_GREEN].dv_actions[0] =
14781                                                 tbl_data->jump.action;
14782                 acts[RTE_COLOR_GREEN].actions_n = 1;
14783                 /* Create jump action to the drop table. */
14784                 if (!mtrmng->drop_tbl[domain]) {
14785                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
14786                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
14787                                 egress, transfer, false, NULL, 0,
14788                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
14789                         if (!mtrmng->drop_tbl[domain]) {
14790                                 DRV_LOG(ERR, "Failed to create "
14791                                 "meter drop table for default policy.");
14792                                 goto def_policy_error;
14793                         }
14794                 }
14795                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14796                                 struct mlx5_flow_tbl_data_entry, tbl);
14797                 def_policy->dr_jump_action[RTE_COLOR_RED] =
14798                                                 tbl_data->jump.action;
14799                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
14800                 acts[RTE_COLOR_RED].actions_n = 1;
14801                 /* Create default policy rules. */
14802                 ret = __flow_dv_create_domain_policy_rules(dev,
14803                                         &def_policy->sub_policy,
14804                                         egress, transfer, true, acts);
14805                 if (ret) {
14806                         DRV_LOG(ERR, "Failed to create "
14807                                 "default policy rules.");
14808                                 goto def_policy_error;
14809                 }
14810         }
14811         return 0;
14812 def_policy_error:
14813         __flow_dv_destroy_domain_def_policy(dev,
14814                         (enum mlx5_meter_domain)domain);
14815         return -1;
14816 }
14817
14818 /**
14819  * Create the default policy table set.
14820  *
14821  * @param[in] dev
14822  *   Pointer to Ethernet device.
14823  * @return
14824  *   0 on success, -1 otherwise.
14825  */
14826 static int
14827 flow_dv_create_def_policy(struct rte_eth_dev *dev)
14828 {
14829         struct mlx5_priv *priv = dev->data->dev_private;
14830         int i;
14831
14832         /* Non-termination policy table. */
14833         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14834                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
14835                         continue;
14836                 if (__flow_dv_create_domain_def_policy(dev, i)) {
14837                         DRV_LOG(ERR,
14838                         "Failed to create default policy");
14839                         return -1;
14840                 }
14841         }
14842         return 0;
14843 }
14844
14845 /**
14846  * Create the needed meter tables.
14847  * Lock free, (mutex should be acquired by caller).
14848  *
14849  * @param[in] dev
14850  *   Pointer to Ethernet device.
14851  * @param[in] fm
14852  *   Meter information table.
14853  * @param[in] mtr_idx
14854  *   Meter index.
14855  * @param[in] domain_bitmap
14856  *   Domain bitmap.
14857  * @return
14858  *   0 on success, -1 otherwise.
14859  */
14860 static int
14861 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
14862                         struct mlx5_flow_meter_info *fm,
14863                         uint32_t mtr_idx,
14864                         uint8_t domain_bitmap)
14865 {
14866         struct mlx5_priv *priv = dev->data->dev_private;
14867         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14868         struct rte_flow_error error;
14869         struct mlx5_flow_tbl_data_entry *tbl_data;
14870         uint8_t egress, transfer;
14871         void *actions[METER_ACTIONS];
14872         int domain, ret, i;
14873         struct mlx5_flow_counter *cnt;
14874         struct mlx5_flow_dv_match_params value = {
14875                 .size = sizeof(value.buf) -
14876                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14877         };
14878         struct mlx5_flow_dv_match_params matcher_para = {
14879                 .size = sizeof(matcher_para.buf) -
14880                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14881         };
14882         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
14883                                                      0, &error);
14884         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
14885         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
14886         struct mlx5_cache_entry *entry;
14887         struct mlx5_flow_dv_matcher matcher = {
14888                 .mask = {
14889                         .size = sizeof(matcher.mask.buf) -
14890                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14891                 },
14892         };
14893         struct mlx5_flow_dv_matcher *drop_matcher;
14894         struct mlx5_flow_cb_ctx ctx = {
14895                 .error = &error,
14896                 .data = &matcher,
14897         };
14898
14899         if (!priv->mtr_en || mtr_id_reg_c < 0) {
14900                 rte_errno = ENOTSUP;
14901                 return -1;
14902         }
14903         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
14904                 if (!(domain_bitmap & (1 << domain)) ||
14905                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
14906                         continue;
14907                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14908                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14909                 /* Create the drop table with METER DROP level. */
14910                 if (!mtrmng->drop_tbl[domain]) {
14911                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
14912                                         MLX5_FLOW_TABLE_LEVEL_METER,
14913                                         egress, transfer, false, NULL, 0,
14914                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
14915                         if (!mtrmng->drop_tbl[domain]) {
14916                                 DRV_LOG(ERR, "Failed to create meter drop table.");
14917                                 goto policy_error;
14918                         }
14919                 }
14920                 /* Create default matcher in drop table. */
14921                 matcher.tbl = mtrmng->drop_tbl[domain],
14922                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14923                                 struct mlx5_flow_tbl_data_entry, tbl);
14924                 if (!mtrmng->def_matcher[domain]) {
14925                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14926                                        (enum modify_reg)mtr_id_reg_c,
14927                                        0, 0);
14928                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
14929                         matcher.crc = rte_raw_cksum
14930                                         ((const void *)matcher.mask.buf,
14931                                         matcher.mask.size);
14932                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14933                         if (!entry) {
14934                                 DRV_LOG(ERR, "Failed to register meter "
14935                                 "drop default matcher.");
14936                                 goto policy_error;
14937                         }
14938                         mtrmng->def_matcher[domain] = container_of(entry,
14939                         struct mlx5_flow_dv_matcher, entry);
14940                 }
14941                 /* Create default rule in drop table. */
14942                 if (!mtrmng->def_rule[domain]) {
14943                         i = 0;
14944                         actions[i++] = priv->sh->dr_drop_action;
14945                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14946                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
14947                         ret = mlx5_flow_os_create_flow
14948                                 (mtrmng->def_matcher[domain]->matcher_object,
14949                                 (void *)&value, i, actions,
14950                                 &mtrmng->def_rule[domain]);
14951                         if (ret) {
14952                                 DRV_LOG(ERR, "Failed to create meter "
14953                                 "default drop rule for drop table.");
14954                                 goto policy_error;
14955                         }
14956                 }
14957                 if (!fm->drop_cnt)
14958                         continue;
14959                 MLX5_ASSERT(mtrmng->max_mtr_bits);
14960                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
14961                         /* Create matchers for Drop. */
14962                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14963                                         (enum modify_reg)mtr_id_reg_c, 0,
14964                                         (mtr_id_mask << mtr_id_offset));
14965                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
14966                         matcher.crc = rte_raw_cksum
14967                                         ((const void *)matcher.mask.buf,
14968                                         matcher.mask.size);
14969                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14970                         if (!entry) {
14971                                 DRV_LOG(ERR,
14972                                 "Failed to register meter drop matcher.");
14973                                 goto policy_error;
14974                         }
14975                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
14976                                 container_of(entry, struct mlx5_flow_dv_matcher,
14977                                              entry);
14978                 }
14979                 drop_matcher =
14980                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
14981                 /* Create drop rule, matching meter_id only. */
14982                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14983                                 (enum modify_reg)mtr_id_reg_c,
14984                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
14985                 i = 0;
14986                 cnt = flow_dv_counter_get_by_idx(dev,
14987                                         fm->drop_cnt, NULL);
14988                 actions[i++] = cnt->action;
14989                 actions[i++] = priv->sh->dr_drop_action;
14990                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
14991                                                (void *)&value, i, actions,
14992                                                &fm->drop_rule[domain]);
14993                 if (ret) {
14994                         DRV_LOG(ERR, "Failed to create meter "
14995                                 "drop rule for drop table.");
14996                                 goto policy_error;
14997                 }
14998         }
14999         return 0;
15000 policy_error:
15001         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15002                 if (fm->drop_rule[i]) {
15003                         claim_zero(mlx5_flow_os_destroy_flow
15004                                 (fm->drop_rule[i]));
15005                         fm->drop_rule[i] = NULL;
15006                 }
15007         }
15008         return -1;
15009 }
15010
15011 /**
15012  * Find the policy table for prefix table with RSS.
15013  *
15014  * @param[in] dev
15015  *   Pointer to Ethernet device.
15016  * @param[in] mtr_policy
15017  *   Pointer to meter policy table.
15018  * @param[in] rss_desc
15019  *   Pointer to rss_desc
15020  * @return
15021  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
15022  */
15023 static struct mlx5_flow_meter_sub_policy *
15024 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
15025                 struct mlx5_flow_meter_policy *mtr_policy,
15026                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
15027 {
15028         struct mlx5_priv *priv = dev->data->dev_private;
15029         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
15030         uint32_t sub_policy_idx = 0;
15031         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
15032         uint32_t i, j;
15033         struct mlx5_hrxq *hrxq;
15034         struct mlx5_flow_handle dh;
15035         struct mlx5_meter_policy_action_container *act_cnt;
15036         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
15037         uint16_t sub_policy_num;
15038
15039         rte_spinlock_lock(&mtr_policy->sl);
15040         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15041                 if (!rss_desc[i])
15042                         continue;
15043                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
15044                 if (!hrxq_idx[i]) {
15045                         rte_spinlock_unlock(&mtr_policy->sl);
15046                         return NULL;
15047                 }
15048         }
15049         sub_policy_num = (mtr_policy->sub_policy_num >>
15050                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15051                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15052         for (i = 0; i < sub_policy_num;
15053                 i++) {
15054                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
15055                         if (rss_desc[j] &&
15056                                 hrxq_idx[j] !=
15057                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
15058                                 break;
15059                 }
15060                 if (j >= MLX5_MTR_RTE_COLORS) {
15061                         /*
15062                          * Found the sub policy table with
15063                          * the same queue per color
15064                          */
15065                         rte_spinlock_unlock(&mtr_policy->sl);
15066                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
15067                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
15068                         return mtr_policy->sub_policys[domain][i];
15069                 }
15070         }
15071         /* Create sub policy. */
15072         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
15073                 /* Reuse the first dummy sub_policy*/
15074                 sub_policy = mtr_policy->sub_policys[domain][0];
15075                 sub_policy_idx = sub_policy->idx;
15076         } else {
15077                 sub_policy = mlx5_ipool_zmalloc
15078                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15079                                 &sub_policy_idx);
15080                 if (!sub_policy ||
15081                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
15082                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
15083                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
15084                         goto rss_sub_policy_error;
15085                 }
15086                 sub_policy->idx = sub_policy_idx;
15087                 sub_policy->main_policy = mtr_policy;
15088         }
15089         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15090                 if (!rss_desc[i])
15091                         continue;
15092                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
15093                 /*
15094                  * Overwrite the last action from
15095                  * RSS action to Queue action.
15096                  */
15097                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
15098                               hrxq_idx[i]);
15099                 if (!hrxq) {
15100                         DRV_LOG(ERR, "Failed to create policy hrxq");
15101                         goto rss_sub_policy_error;
15102                 }
15103                 act_cnt = &mtr_policy->act_cnt[i];
15104                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
15105                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15106                         if (act_cnt->rix_mark)
15107                                 dh.mark = 1;
15108                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
15109                         dh.rix_hrxq = hrxq_idx[i];
15110                         flow_drv_rxq_flags_set(dev, &dh);
15111                 }
15112         }
15113         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15114                 sub_policy, domain)) {
15115                 DRV_LOG(ERR, "Failed to create policy "
15116                         "rules per domain.");
15117                 goto rss_sub_policy_error;
15118         }
15119         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
15120                 i = (mtr_policy->sub_policy_num >>
15121                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15122                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15123                 mtr_policy->sub_policys[domain][i] = sub_policy;
15124                 i++;
15125                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
15126                         goto rss_sub_policy_error;
15127                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
15128                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
15129                 mtr_policy->sub_policy_num |=
15130                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
15131                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
15132         }
15133         rte_spinlock_unlock(&mtr_policy->sl);
15134         return sub_policy;
15135 rss_sub_policy_error:
15136         if (sub_policy) {
15137                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
15138                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
15139                         i = (mtr_policy->sub_policy_num >>
15140                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15141                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15142                         mtr_policy->sub_policys[domain][i] = NULL;
15143                         mlx5_ipool_free
15144                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15145                                         sub_policy->idx);
15146                 }
15147         }
15148         if (sub_policy_idx)
15149                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15150                         sub_policy_idx);
15151         rte_spinlock_unlock(&mtr_policy->sl);
15152         return NULL;
15153 }
15154
15155 /**
15156  * Validate the batch counter support in root table.
15157  *
15158  * Create a simple flow with invalid counter and drop action on root table to
15159  * validate if batch counter with offset on root table is supported or not.
15160  *
15161  * @param[in] dev
15162  *   Pointer to rte_eth_dev structure.
15163  *
15164  * @return
15165  *   0 on success, a negative errno value otherwise and rte_errno is set.
15166  */
15167 int
15168 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
15169 {
15170         struct mlx5_priv *priv = dev->data->dev_private;
15171         struct mlx5_dev_ctx_shared *sh = priv->sh;
15172         struct mlx5_flow_dv_match_params mask = {
15173                 .size = sizeof(mask.buf),
15174         };
15175         struct mlx5_flow_dv_match_params value = {
15176                 .size = sizeof(value.buf),
15177         };
15178         struct mlx5dv_flow_matcher_attr dv_attr = {
15179                 .type = IBV_FLOW_ATTR_NORMAL,
15180                 .priority = 0,
15181                 .match_criteria_enable = 0,
15182                 .match_mask = (void *)&mask,
15183         };
15184         void *actions[2] = { 0 };
15185         struct mlx5_flow_tbl_resource *tbl = NULL;
15186         struct mlx5_devx_obj *dcs = NULL;
15187         void *matcher = NULL;
15188         void *flow = NULL;
15189         int ret = -1;
15190
15191         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
15192                                         0, 0, 0, NULL);
15193         if (!tbl)
15194                 goto err;
15195         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
15196         if (!dcs)
15197                 goto err;
15198         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
15199                                                     &actions[0]);
15200         if (ret)
15201                 goto err;
15202         actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
15203                                           priv->drop_queue.hrxq->action;
15204         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
15205         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
15206                                                &matcher);
15207         if (ret)
15208                 goto err;
15209         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
15210                                        actions, &flow);
15211 err:
15212         /*
15213          * If batch counter with offset is not supported, the driver will not
15214          * validate the invalid offset value, flow create should success.
15215          * In this case, it means batch counter is not supported in root table.
15216          *
15217          * Otherwise, if flow create is failed, counter offset is supported.
15218          */
15219         if (flow) {
15220                 DRV_LOG(INFO, "Batch counter is not supported in root "
15221                               "table. Switch to fallback mode.");
15222                 rte_errno = ENOTSUP;
15223                 ret = -rte_errno;
15224                 claim_zero(mlx5_flow_os_destroy_flow(flow));
15225         } else {
15226                 /* Check matcher to make sure validate fail at flow create. */
15227                 if (!matcher || (matcher && errno != EINVAL))
15228                         DRV_LOG(ERR, "Unexpected error in counter offset "
15229                                      "support detection");
15230                 ret = 0;
15231         }
15232         if (actions[0])
15233                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
15234         if (matcher)
15235                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
15236         if (tbl)
15237                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
15238         if (dcs)
15239                 claim_zero(mlx5_devx_cmd_destroy(dcs));
15240         return ret;
15241 }
15242
15243 /**
15244  * Query a devx counter.
15245  *
15246  * @param[in] dev
15247  *   Pointer to the Ethernet device structure.
15248  * @param[in] cnt
15249  *   Index to the flow counter.
15250  * @param[in] clear
15251  *   Set to clear the counter statistics.
15252  * @param[out] pkts
15253  *   The statistics value of packets.
15254  * @param[out] bytes
15255  *   The statistics value of bytes.
15256  *
15257  * @return
15258  *   0 on success, otherwise return -1.
15259  */
15260 static int
15261 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
15262                       uint64_t *pkts, uint64_t *bytes)
15263 {
15264         struct mlx5_priv *priv = dev->data->dev_private;
15265         struct mlx5_flow_counter *cnt;
15266         uint64_t inn_pkts, inn_bytes;
15267         int ret;
15268
15269         if (!priv->config.devx)
15270                 return -1;
15271
15272         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
15273         if (ret)
15274                 return -1;
15275         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
15276         *pkts = inn_pkts - cnt->hits;
15277         *bytes = inn_bytes - cnt->bytes;
15278         if (clear) {
15279                 cnt->hits = inn_pkts;
15280                 cnt->bytes = inn_bytes;
15281         }
15282         return 0;
15283 }
15284
15285 /**
15286  * Get aged-out flows.
15287  *
15288  * @param[in] dev
15289  *   Pointer to the Ethernet device structure.
15290  * @param[in] context
15291  *   The address of an array of pointers to the aged-out flows contexts.
15292  * @param[in] nb_contexts
15293  *   The length of context array pointers.
15294  * @param[out] error
15295  *   Perform verbose error reporting if not NULL. Initialized in case of
15296  *   error only.
15297  *
15298  * @return
15299  *   how many contexts get in success, otherwise negative errno value.
15300  *   if nb_contexts is 0, return the amount of all aged contexts.
15301  *   if nb_contexts is not 0 , return the amount of aged flows reported
15302  *   in the context array.
15303  * @note: only stub for now
15304  */
15305 static int
15306 flow_get_aged_flows(struct rte_eth_dev *dev,
15307                     void **context,
15308                     uint32_t nb_contexts,
15309                     struct rte_flow_error *error)
15310 {
15311         struct mlx5_priv *priv = dev->data->dev_private;
15312         struct mlx5_age_info *age_info;
15313         struct mlx5_age_param *age_param;
15314         struct mlx5_flow_counter *counter;
15315         struct mlx5_aso_age_action *act;
15316         int nb_flows = 0;
15317
15318         if (nb_contexts && !context)
15319                 return rte_flow_error_set(error, EINVAL,
15320                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15321                                           NULL, "empty context");
15322         age_info = GET_PORT_AGE_INFO(priv);
15323         rte_spinlock_lock(&age_info->aged_sl);
15324         LIST_FOREACH(act, &age_info->aged_aso, next) {
15325                 nb_flows++;
15326                 if (nb_contexts) {
15327                         context[nb_flows - 1] =
15328                                                 act->age_params.context;
15329                         if (!(--nb_contexts))
15330                                 break;
15331                 }
15332         }
15333         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
15334                 nb_flows++;
15335                 if (nb_contexts) {
15336                         age_param = MLX5_CNT_TO_AGE(counter);
15337                         context[nb_flows - 1] = age_param->context;
15338                         if (!(--nb_contexts))
15339                                 break;
15340                 }
15341         }
15342         rte_spinlock_unlock(&age_info->aged_sl);
15343         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
15344         return nb_flows;
15345 }
15346
15347 /*
15348  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
15349  */
15350 static uint32_t
15351 flow_dv_counter_allocate(struct rte_eth_dev *dev)
15352 {
15353         return flow_dv_counter_alloc(dev, 0);
15354 }
15355
15356 /**
15357  * Validate indirect action.
15358  * Dispatcher for action type specific validation.
15359  *
15360  * @param[in] dev
15361  *   Pointer to the Ethernet device structure.
15362  * @param[in] conf
15363  *   Indirect action configuration.
15364  * @param[in] action
15365  *   The indirect action object to validate.
15366  * @param[out] error
15367  *   Perform verbose error reporting if not NULL. Initialized in case of
15368  *   error only.
15369  *
15370  * @return
15371  *   0 on success, otherwise negative errno value.
15372  */
15373 static int
15374 flow_dv_action_validate(struct rte_eth_dev *dev,
15375                         const struct rte_flow_indir_action_conf *conf,
15376                         const struct rte_flow_action *action,
15377                         struct rte_flow_error *err)
15378 {
15379         struct mlx5_priv *priv = dev->data->dev_private;
15380
15381         RTE_SET_USED(conf);
15382         switch (action->type) {
15383         case RTE_FLOW_ACTION_TYPE_RSS:
15384                 /*
15385                  * priv->obj_ops is set according to driver capabilities.
15386                  * When DevX capabilities are
15387                  * sufficient, it is set to devx_obj_ops.
15388                  * Otherwise, it is set to ibv_obj_ops.
15389                  * ibv_obj_ops doesn't support ind_table_modify operation.
15390                  * In this case the indirect RSS action can't be used.
15391                  */
15392                 if (priv->obj_ops.ind_table_modify == NULL)
15393                         return rte_flow_error_set
15394                                         (err, ENOTSUP,
15395                                          RTE_FLOW_ERROR_TYPE_ACTION,
15396                                          NULL,
15397                                          "Indirect RSS action not supported");
15398                 return mlx5_validate_action_rss(dev, action, err);
15399         case RTE_FLOW_ACTION_TYPE_AGE:
15400                 if (!priv->sh->aso_age_mng)
15401                         return rte_flow_error_set(err, ENOTSUP,
15402                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15403                                                 NULL,
15404                                                 "Indirect age action not supported");
15405                 return flow_dv_validate_action_age(0, action, dev, err);
15406         case RTE_FLOW_ACTION_TYPE_COUNT:
15407                 /*
15408                  * There are two mechanisms to share the action count.
15409                  * The old mechanism uses the shared field to share, while the
15410                  * new mechanism uses the indirect action API.
15411                  * This validation comes to make sure that the two mechanisms
15412                  * are not combined.
15413                  */
15414                 if (is_shared_action_count(action))
15415                         return rte_flow_error_set(err, ENOTSUP,
15416                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15417                                                   NULL,
15418                                                   "Mix shared and indirect counter is not supported");
15419                 return flow_dv_validate_action_count(dev, true, 0, err);
15420         default:
15421                 return rte_flow_error_set(err, ENOTSUP,
15422                                           RTE_FLOW_ERROR_TYPE_ACTION,
15423                                           NULL,
15424                                           "action type not supported");
15425         }
15426 }
15427
15428 /**
15429  * Validate meter policy actions.
15430  * Dispatcher for action type specific validation.
15431  *
15432  * @param[in] dev
15433  *   Pointer to the Ethernet device structure.
15434  * @param[in] action
15435  *   The meter policy action object to validate.
15436  * @param[in] attr
15437  *   Attributes of flow to determine steering domain.
15438  * @param[out] error
15439  *   Perform verbose error reporting if not NULL. Initialized in case of
15440  *   error only.
15441  *
15442  * @return
15443  *   0 on success, otherwise negative errno value.
15444  */
15445 static int
15446 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
15447                         const struct rte_flow_action *actions[RTE_COLORS],
15448                         struct rte_flow_attr *attr,
15449                         bool *is_rss,
15450                         uint8_t *domain_bitmap,
15451                         bool *is_def_policy,
15452                         struct rte_mtr_error *error)
15453 {
15454         struct mlx5_priv *priv = dev->data->dev_private;
15455         struct mlx5_dev_config *dev_conf = &priv->config;
15456         const struct rte_flow_action *act;
15457         uint64_t action_flags = 0;
15458         int actions_n;
15459         int i, ret;
15460         struct rte_flow_error flow_err;
15461         uint8_t domain_color[RTE_COLORS] = {0};
15462         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
15463
15464         if (!priv->config.dv_esw_en)
15465                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
15466         *domain_bitmap = def_domain;
15467         if (actions[RTE_COLOR_YELLOW] &&
15468                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
15469                 return -rte_mtr_error_set(error, ENOTSUP,
15470                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15471                                 NULL,
15472                                 "Yellow color does not support any action.");
15473         if (actions[RTE_COLOR_YELLOW] &&
15474                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
15475                 return -rte_mtr_error_set(error, ENOTSUP,
15476                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15477                                 NULL, "Red color only supports drop action.");
15478         /*
15479          * Check default policy actions:
15480          * Green/Yellow: no action, Red: drop action
15481          */
15482         if ((!actions[RTE_COLOR_GREEN] ||
15483                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
15484                 *is_def_policy = true;
15485                 return 0;
15486         }
15487         flow_err.message = NULL;
15488         for (i = 0; i < RTE_COLORS; i++) {
15489                 act = actions[i];
15490                 for (action_flags = 0, actions_n = 0;
15491                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15492                         act++) {
15493                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
15494                                 return -rte_mtr_error_set(error, ENOTSUP,
15495                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15496                                           NULL, "too many actions");
15497                         switch (act->type) {
15498                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15499                                 if (!priv->config.dv_esw_en)
15500                                         return -rte_mtr_error_set(error,
15501                                         ENOTSUP,
15502                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15503                                         NULL, "PORT action validate check"
15504                                         " fail for ESW disable");
15505                                 ret = flow_dv_validate_action_port_id(dev,
15506                                                 action_flags,
15507                                                 act, attr, &flow_err);
15508                                 if (ret)
15509                                         return -rte_mtr_error_set(error,
15510                                         ENOTSUP,
15511                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15512                                         NULL, flow_err.message ?
15513                                         flow_err.message :
15514                                         "PORT action validate check fail");
15515                                 ++actions_n;
15516                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15517                                 break;
15518                         case RTE_FLOW_ACTION_TYPE_MARK:
15519                                 ret = flow_dv_validate_action_mark(dev, act,
15520                                                            action_flags,
15521                                                            attr, &flow_err);
15522                                 if (ret < 0)
15523                                         return -rte_mtr_error_set(error,
15524                                         ENOTSUP,
15525                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15526                                         NULL, flow_err.message ?
15527                                         flow_err.message :
15528                                         "Mark action validate check fail");
15529                                 if (dev_conf->dv_xmeta_en !=
15530                                         MLX5_XMETA_MODE_LEGACY)
15531                                         return -rte_mtr_error_set(error,
15532                                         ENOTSUP,
15533                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15534                                         NULL, "Extend MARK action is "
15535                                         "not supported. Please try use "
15536                                         "default policy for meter.");
15537                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15538                                 ++actions_n;
15539                                 break;
15540                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15541                                 ret = flow_dv_validate_action_set_tag(dev,
15542                                                         act, action_flags,
15543                                                         attr, &flow_err);
15544                                 if (ret)
15545                                         return -rte_mtr_error_set(error,
15546                                         ENOTSUP,
15547                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15548                                         NULL, flow_err.message ?
15549                                         flow_err.message :
15550                                         "Set tag action validate check fail");
15551                                 /*
15552                                  * Count all modify-header actions
15553                                  * as one action.
15554                                  */
15555                                 if (!(action_flags &
15556                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
15557                                         ++actions_n;
15558                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15559                                 break;
15560                         case RTE_FLOW_ACTION_TYPE_DROP:
15561                                 ret = mlx5_flow_validate_action_drop
15562                                         (action_flags,
15563                                         attr, &flow_err);
15564                                 if (ret < 0)
15565                                         return -rte_mtr_error_set(error,
15566                                         ENOTSUP,
15567                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15568                                         NULL, flow_err.message ?
15569                                         flow_err.message :
15570                                         "Drop action validate check fail");
15571                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15572                                 ++actions_n;
15573                                 break;
15574                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15575                                 /*
15576                                  * Check whether extensive
15577                                  * metadata feature is engaged.
15578                                  */
15579                                 if (dev_conf->dv_flow_en &&
15580                                         (dev_conf->dv_xmeta_en !=
15581                                         MLX5_XMETA_MODE_LEGACY) &&
15582                                         mlx5_flow_ext_mreg_supported(dev))
15583                                         return -rte_mtr_error_set(error,
15584                                           ENOTSUP,
15585                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15586                                           NULL, "Queue action with meta "
15587                                           "is not supported. Please try use "
15588                                           "default policy for meter.");
15589                                 ret = mlx5_flow_validate_action_queue(act,
15590                                                         action_flags, dev,
15591                                                         attr, &flow_err);
15592                                 if (ret < 0)
15593                                         return -rte_mtr_error_set(error,
15594                                           ENOTSUP,
15595                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15596                                           NULL, flow_err.message ?
15597                                           flow_err.message :
15598                                           "Queue action validate check fail");
15599                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15600                                 ++actions_n;
15601                                 break;
15602                         case RTE_FLOW_ACTION_TYPE_RSS:
15603                                 if (dev_conf->dv_flow_en &&
15604                                         (dev_conf->dv_xmeta_en !=
15605                                         MLX5_XMETA_MODE_LEGACY) &&
15606                                         mlx5_flow_ext_mreg_supported(dev))
15607                                         return -rte_mtr_error_set(error,
15608                                           ENOTSUP,
15609                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15610                                           NULL, "RSS action with meta "
15611                                           "is not supported. Please try use "
15612                                           "default policy for meter.");
15613                                 ret = mlx5_validate_action_rss(dev, act,
15614                                                 &flow_err);
15615                                 if (ret < 0)
15616                                         return -rte_mtr_error_set(error,
15617                                           ENOTSUP,
15618                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15619                                           NULL, flow_err.message ?
15620                                           flow_err.message :
15621                                           "RSS action validate check fail");
15622                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15623                                 ++actions_n;
15624                                 *is_rss = true;
15625                                 break;
15626                         case RTE_FLOW_ACTION_TYPE_JUMP:
15627                                 ret = flow_dv_validate_action_jump(dev,
15628                                         NULL, act, action_flags,
15629                                         attr, true, &flow_err);
15630                                 if (ret)
15631                                         return -rte_mtr_error_set(error,
15632                                           ENOTSUP,
15633                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15634                                           NULL, flow_err.message ?
15635                                           flow_err.message :
15636                                           "Jump action validate check fail");
15637                                 ++actions_n;
15638                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15639                                 break;
15640                         default:
15641                                 return -rte_mtr_error_set(error, ENOTSUP,
15642                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15643                                         NULL,
15644                                         "Doesn't support optional action");
15645                         }
15646                 }
15647                 /* Yellow is not supported, just skip. */
15648                 if (i == RTE_COLOR_YELLOW)
15649                         continue;
15650                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
15651                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
15652                 else if ((action_flags &
15653                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
15654                         (action_flags & MLX5_FLOW_ACTION_MARK))
15655                         /*
15656                          * Only support MLX5_XMETA_MODE_LEGACY
15657                          * so MARK action only in ingress domain.
15658                          */
15659                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
15660                 else
15661                         domain_color[i] = def_domain;
15662                 /*
15663                  * Validate the drop action mutual exclusion
15664                  * with other actions. Drop action is mutually-exclusive
15665                  * with any other action, except for Count action.
15666                  */
15667                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
15668                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
15669                         return -rte_mtr_error_set(error, ENOTSUP,
15670                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15671                                 NULL, "Drop action is mutually-exclusive "
15672                                 "with any other action");
15673                 }
15674                 /* Eswitch has few restrictions on using items and actions */
15675                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
15676                         if (!mlx5_flow_ext_mreg_supported(dev) &&
15677                                 action_flags & MLX5_FLOW_ACTION_MARK)
15678                                 return -rte_mtr_error_set(error, ENOTSUP,
15679                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15680                                         NULL, "unsupported action MARK");
15681                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
15682                                 return -rte_mtr_error_set(error, ENOTSUP,
15683                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15684                                         NULL, "unsupported action QUEUE");
15685                         if (action_flags & MLX5_FLOW_ACTION_RSS)
15686                                 return -rte_mtr_error_set(error, ENOTSUP,
15687                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15688                                         NULL, "unsupported action RSS");
15689                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
15690                                 return -rte_mtr_error_set(error, ENOTSUP,
15691                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15692                                         NULL, "no fate action is found");
15693                 } else {
15694                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
15695                                 (domain_color[i] &
15696                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
15697                                 if ((domain_color[i] &
15698                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
15699                                         domain_color[i] =
15700                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
15701                                 else
15702                                         return -rte_mtr_error_set(error,
15703                                         ENOTSUP,
15704                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15705                                         NULL, "no fate action is found");
15706                         }
15707                 }
15708                 if (domain_color[i] != def_domain)
15709                         *domain_bitmap = domain_color[i];
15710         }
15711         return 0;
15712 }
15713
15714 static int
15715 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
15716 {
15717         struct mlx5_priv *priv = dev->data->dev_private;
15718         int ret = 0;
15719
15720         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
15721                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
15722                                                 flags);
15723                 if (ret != 0)
15724                         return ret;
15725         }
15726         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
15727                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
15728                 if (ret != 0)
15729                         return ret;
15730         }
15731         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
15732                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
15733                 if (ret != 0)
15734                         return ret;
15735         }
15736         return 0;
15737 }
15738
15739 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
15740         .validate = flow_dv_validate,
15741         .prepare = flow_dv_prepare,
15742         .translate = flow_dv_translate,
15743         .apply = flow_dv_apply,
15744         .remove = flow_dv_remove,
15745         .destroy = flow_dv_destroy,
15746         .query = flow_dv_query,
15747         .create_mtr_tbls = flow_dv_create_mtr_tbls,
15748         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
15749         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
15750         .create_meter = flow_dv_mtr_alloc,
15751         .free_meter = flow_dv_aso_mtr_release_to_pool,
15752         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
15753         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
15754         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
15755         .create_policy_rules = flow_dv_create_policy_rules,
15756         .destroy_policy_rules = flow_dv_destroy_policy_rules,
15757         .create_def_policy = flow_dv_create_def_policy,
15758         .destroy_def_policy = flow_dv_destroy_def_policy,
15759         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
15760         .counter_alloc = flow_dv_counter_allocate,
15761         .counter_free = flow_dv_counter_free,
15762         .counter_query = flow_dv_counter_query,
15763         .get_aged_flows = flow_get_aged_flows,
15764         .action_validate = flow_dv_action_validate,
15765         .action_create = flow_dv_action_create,
15766         .action_destroy = flow_dv_action_destroy,
15767         .action_update = flow_dv_action_update,
15768         .action_query = flow_dv_action_query,
15769         .sync_domain = flow_dv_sync_domain,
15770 };
15771
15772 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
15773