net/mlx5: prepare sub-policy for flow with meter
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static void
272 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
273                           uint8_t next_protocol, uint64_t *item_flags,
274                           int *tunnel)
275 {
276         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
277                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
278         if (next_protocol == IPPROTO_IPIP) {
279                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
280                 *tunnel = 1;
281         }
282         if (next_protocol == IPPROTO_IPV6) {
283                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
284                 *tunnel = 1;
285         }
286 }
287
288 /* Update VLAN's VID/PCP based on input rte_flow_action.
289  *
290  * @param[in] action
291  *   Pointer to struct rte_flow_action.
292  * @param[out] vlan
293  *   Pointer to struct rte_vlan_hdr.
294  */
295 static void
296 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
297                          struct rte_vlan_hdr *vlan)
298 {
299         uint16_t vlan_tci;
300         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
301                 vlan_tci =
302                     ((const struct rte_flow_action_of_set_vlan_pcp *)
303                                                action->conf)->vlan_pcp;
304                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
306                 vlan->vlan_tci |= vlan_tci;
307         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
308                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
309                 vlan->vlan_tci |= rte_be_to_cpu_16
310                     (((const struct rte_flow_action_of_set_vlan_vid *)
311                                              action->conf)->vlan_vid);
312         }
313 }
314
315 /**
316  * Fetch 1, 2, 3 or 4 byte field from the byte array
317  * and return as unsigned integer in host-endian format.
318  *
319  * @param[in] data
320  *   Pointer to data array.
321  * @param[in] size
322  *   Size of field to extract.
323  *
324  * @return
325  *   converted field in host endian format.
326  */
327 static inline uint32_t
328 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
329 {
330         uint32_t ret;
331
332         switch (size) {
333         case 1:
334                 ret = *data;
335                 break;
336         case 2:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 break;
339         case 3:
340                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
341                 ret = (ret << 8) | *(data + sizeof(uint16_t));
342                 break;
343         case 4:
344                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
345                 break;
346         default:
347                 MLX5_ASSERT(false);
348                 ret = 0;
349                 break;
350         }
351         return ret;
352 }
353
354 /**
355  * Convert modify-header action to DV specification.
356  *
357  * Data length of each action is determined by provided field description
358  * and the item mask. Data bit offset and width of each action is determined
359  * by provided item mask.
360  *
361  * @param[in] item
362  *   Pointer to item specification.
363  * @param[in] field
364  *   Pointer to field modification information.
365  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
366  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
367  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
368  * @param[in] dcopy
369  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
370  *   Negative offset value sets the same offset as source offset.
371  *   size field is ignored, value is taken from source field.
372  * @param[in,out] resource
373  *   Pointer to the modify-header resource.
374  * @param[in] type
375  *   Type of modification.
376  * @param[out] error
377  *   Pointer to the error structure.
378  *
379  * @return
380  *   0 on success, a negative errno value otherwise and rte_errno is set.
381  */
382 static int
383 flow_dv_convert_modify_action(struct rte_flow_item *item,
384                               struct field_modify_info *field,
385                               struct field_modify_info *dcopy,
386                               struct mlx5_flow_dv_modify_hdr_resource *resource,
387                               uint32_t type, struct rte_flow_error *error)
388 {
389         uint32_t i = resource->actions_num;
390         struct mlx5_modification_cmd *actions = resource->actions;
391
392         /*
393          * The item and mask are provided in big-endian format.
394          * The fields should be presented as in big-endian format either.
395          * Mask must be always present, it defines the actual field width.
396          */
397         MLX5_ASSERT(item->mask);
398         MLX5_ASSERT(field->size);
399         do {
400                 unsigned int size_b;
401                 unsigned int off_b;
402                 uint32_t mask;
403                 uint32_t data;
404
405                 if (i >= MLX5_MAX_MODIFY_NUM)
406                         return rte_flow_error_set(error, EINVAL,
407                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
408                                  "too many items to modify");
409                 /* Fetch variable byte size mask from the array. */
410                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
411                                            field->offset, field->size);
412                 if (!mask) {
413                         ++field;
414                         continue;
415                 }
416                 /* Deduce actual data width in bits from mask value. */
417                 off_b = rte_bsf32(mask);
418                 size_b = sizeof(uint32_t) * CHAR_BIT -
419                          off_b - __builtin_clz(mask);
420                 MLX5_ASSERT(size_b);
421                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
422                 actions[i] = (struct mlx5_modification_cmd) {
423                         .action_type = type,
424                         .field = field->id,
425                         .offset = off_b,
426                         .length = size_b,
427                 };
428                 /* Convert entire record to expected big-endian format. */
429                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
430                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
431                         MLX5_ASSERT(dcopy);
432                         actions[i].dst_field = dcopy->id;
433                         actions[i].dst_offset =
434                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
435                         /* Convert entire record to big-endian format. */
436                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
437                         ++dcopy;
438                 } else {
439                         MLX5_ASSERT(item->spec);
440                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
441                                                    field->offset, field->size);
442                         /* Shift out the trailing masked bits from data. */
443                         data = (data & mask) >> off_b;
444                         actions[i].data1 = rte_cpu_to_be_32(data);
445                 }
446                 ++i;
447                 ++field;
448         } while (field->size);
449         if (resource->actions_num == i)
450                 return rte_flow_error_set(error, EINVAL,
451                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
452                                           "invalid modification flow item");
453         resource->actions_num = i;
454         return 0;
455 }
456
457 /**
458  * Convert modify-header set IPv4 address action to DV specification.
459  *
460  * @param[in,out] resource
461  *   Pointer to the modify-header resource.
462  * @param[in] action
463  *   Pointer to action specification.
464  * @param[out] error
465  *   Pointer to the error structure.
466  *
467  * @return
468  *   0 on success, a negative errno value otherwise and rte_errno is set.
469  */
470 static int
471 flow_dv_convert_action_modify_ipv4
472                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
473                          const struct rte_flow_action *action,
474                          struct rte_flow_error *error)
475 {
476         const struct rte_flow_action_set_ipv4 *conf =
477                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
478         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
479         struct rte_flow_item_ipv4 ipv4;
480         struct rte_flow_item_ipv4 ipv4_mask;
481
482         memset(&ipv4, 0, sizeof(ipv4));
483         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
484         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
485                 ipv4.hdr.src_addr = conf->ipv4_addr;
486                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
487         } else {
488                 ipv4.hdr.dst_addr = conf->ipv4_addr;
489                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
490         }
491         item.spec = &ipv4;
492         item.mask = &ipv4_mask;
493         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
494                                              MLX5_MODIFICATION_TYPE_SET, error);
495 }
496
497 /**
498  * Convert modify-header set IPv6 address action to DV specification.
499  *
500  * @param[in,out] resource
501  *   Pointer to the modify-header resource.
502  * @param[in] action
503  *   Pointer to action specification.
504  * @param[out] error
505  *   Pointer to the error structure.
506  *
507  * @return
508  *   0 on success, a negative errno value otherwise and rte_errno is set.
509  */
510 static int
511 flow_dv_convert_action_modify_ipv6
512                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
513                          const struct rte_flow_action *action,
514                          struct rte_flow_error *error)
515 {
516         const struct rte_flow_action_set_ipv6 *conf =
517                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
518         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
519         struct rte_flow_item_ipv6 ipv6;
520         struct rte_flow_item_ipv6 ipv6_mask;
521
522         memset(&ipv6, 0, sizeof(ipv6));
523         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
524         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
525                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
526                        sizeof(ipv6.hdr.src_addr));
527                 memcpy(&ipv6_mask.hdr.src_addr,
528                        &rte_flow_item_ipv6_mask.hdr.src_addr,
529                        sizeof(ipv6.hdr.src_addr));
530         } else {
531                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
532                        sizeof(ipv6.hdr.dst_addr));
533                 memcpy(&ipv6_mask.hdr.dst_addr,
534                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
535                        sizeof(ipv6.hdr.dst_addr));
536         }
537         item.spec = &ipv6;
538         item.mask = &ipv6_mask;
539         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
540                                              MLX5_MODIFICATION_TYPE_SET, error);
541 }
542
543 /**
544  * Convert modify-header set MAC address action to DV specification.
545  *
546  * @param[in,out] resource
547  *   Pointer to the modify-header resource.
548  * @param[in] action
549  *   Pointer to action specification.
550  * @param[out] error
551  *   Pointer to the error structure.
552  *
553  * @return
554  *   0 on success, a negative errno value otherwise and rte_errno is set.
555  */
556 static int
557 flow_dv_convert_action_modify_mac
558                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
559                          const struct rte_flow_action *action,
560                          struct rte_flow_error *error)
561 {
562         const struct rte_flow_action_set_mac *conf =
563                 (const struct rte_flow_action_set_mac *)(action->conf);
564         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
565         struct rte_flow_item_eth eth;
566         struct rte_flow_item_eth eth_mask;
567
568         memset(&eth, 0, sizeof(eth));
569         memset(&eth_mask, 0, sizeof(eth_mask));
570         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
571                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
572                        sizeof(eth.src.addr_bytes));
573                 memcpy(&eth_mask.src.addr_bytes,
574                        &rte_flow_item_eth_mask.src.addr_bytes,
575                        sizeof(eth_mask.src.addr_bytes));
576         } else {
577                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
578                        sizeof(eth.dst.addr_bytes));
579                 memcpy(&eth_mask.dst.addr_bytes,
580                        &rte_flow_item_eth_mask.dst.addr_bytes,
581                        sizeof(eth_mask.dst.addr_bytes));
582         }
583         item.spec = &eth;
584         item.mask = &eth_mask;
585         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
586                                              MLX5_MODIFICATION_TYPE_SET, error);
587 }
588
589 /**
590  * Convert modify-header set VLAN VID action to DV specification.
591  *
592  * @param[in,out] resource
593  *   Pointer to the modify-header resource.
594  * @param[in] action
595  *   Pointer to action specification.
596  * @param[out] error
597  *   Pointer to the error structure.
598  *
599  * @return
600  *   0 on success, a negative errno value otherwise and rte_errno is set.
601  */
602 static int
603 flow_dv_convert_action_modify_vlan_vid
604                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
605                          const struct rte_flow_action *action,
606                          struct rte_flow_error *error)
607 {
608         const struct rte_flow_action_of_set_vlan_vid *conf =
609                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
610         int i = resource->actions_num;
611         struct mlx5_modification_cmd *actions = resource->actions;
612         struct field_modify_info *field = modify_vlan_out_first_vid;
613
614         if (i >= MLX5_MAX_MODIFY_NUM)
615                 return rte_flow_error_set(error, EINVAL,
616                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
617                          "too many items to modify");
618         actions[i] = (struct mlx5_modification_cmd) {
619                 .action_type = MLX5_MODIFICATION_TYPE_SET,
620                 .field = field->id,
621                 .length = field->size,
622                 .offset = field->offset,
623         };
624         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
625         actions[i].data1 = conf->vlan_vid;
626         actions[i].data1 = actions[i].data1 << 16;
627         resource->actions_num = ++i;
628         return 0;
629 }
630
631 /**
632  * Convert modify-header set TP action to DV specification.
633  *
634  * @param[in,out] resource
635  *   Pointer to the modify-header resource.
636  * @param[in] action
637  *   Pointer to action specification.
638  * @param[in] items
639  *   Pointer to rte_flow_item objects list.
640  * @param[in] attr
641  *   Pointer to flow attributes structure.
642  * @param[in] dev_flow
643  *   Pointer to the sub flow.
644  * @param[in] tunnel_decap
645  *   Whether action is after tunnel decapsulation.
646  * @param[out] error
647  *   Pointer to the error structure.
648  *
649  * @return
650  *   0 on success, a negative errno value otherwise and rte_errno is set.
651  */
652 static int
653 flow_dv_convert_action_modify_tp
654                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
655                          const struct rte_flow_action *action,
656                          const struct rte_flow_item *items,
657                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
658                          bool tunnel_decap, struct rte_flow_error *error)
659 {
660         const struct rte_flow_action_set_tp *conf =
661                 (const struct rte_flow_action_set_tp *)(action->conf);
662         struct rte_flow_item item;
663         struct rte_flow_item_udp udp;
664         struct rte_flow_item_udp udp_mask;
665         struct rte_flow_item_tcp tcp;
666         struct rte_flow_item_tcp tcp_mask;
667         struct field_modify_info *field;
668
669         if (!attr->valid)
670                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
671         if (attr->udp) {
672                 memset(&udp, 0, sizeof(udp));
673                 memset(&udp_mask, 0, sizeof(udp_mask));
674                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
675                         udp.hdr.src_port = conf->port;
676                         udp_mask.hdr.src_port =
677                                         rte_flow_item_udp_mask.hdr.src_port;
678                 } else {
679                         udp.hdr.dst_port = conf->port;
680                         udp_mask.hdr.dst_port =
681                                         rte_flow_item_udp_mask.hdr.dst_port;
682                 }
683                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
684                 item.spec = &udp;
685                 item.mask = &udp_mask;
686                 field = modify_udp;
687         } else {
688                 MLX5_ASSERT(attr->tcp);
689                 memset(&tcp, 0, sizeof(tcp));
690                 memset(&tcp_mask, 0, sizeof(tcp_mask));
691                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
692                         tcp.hdr.src_port = conf->port;
693                         tcp_mask.hdr.src_port =
694                                         rte_flow_item_tcp_mask.hdr.src_port;
695                 } else {
696                         tcp.hdr.dst_port = conf->port;
697                         tcp_mask.hdr.dst_port =
698                                         rte_flow_item_tcp_mask.hdr.dst_port;
699                 }
700                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
701                 item.spec = &tcp;
702                 item.mask = &tcp_mask;
703                 field = modify_tcp;
704         }
705         return flow_dv_convert_modify_action(&item, field, NULL, resource,
706                                              MLX5_MODIFICATION_TYPE_SET, error);
707 }
708
709 /**
710  * Convert modify-header set TTL action to DV specification.
711  *
712  * @param[in,out] resource
713  *   Pointer to the modify-header resource.
714  * @param[in] action
715  *   Pointer to action specification.
716  * @param[in] items
717  *   Pointer to rte_flow_item objects list.
718  * @param[in] attr
719  *   Pointer to flow attributes structure.
720  * @param[in] dev_flow
721  *   Pointer to the sub flow.
722  * @param[in] tunnel_decap
723  *   Whether action is after tunnel decapsulation.
724  * @param[out] error
725  *   Pointer to the error structure.
726  *
727  * @return
728  *   0 on success, a negative errno value otherwise and rte_errno is set.
729  */
730 static int
731 flow_dv_convert_action_modify_ttl
732                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
733                          const struct rte_flow_action *action,
734                          const struct rte_flow_item *items,
735                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
736                          bool tunnel_decap, struct rte_flow_error *error)
737 {
738         const struct rte_flow_action_set_ttl *conf =
739                 (const struct rte_flow_action_set_ttl *)(action->conf);
740         struct rte_flow_item item;
741         struct rte_flow_item_ipv4 ipv4;
742         struct rte_flow_item_ipv4 ipv4_mask;
743         struct rte_flow_item_ipv6 ipv6;
744         struct rte_flow_item_ipv6 ipv6_mask;
745         struct field_modify_info *field;
746
747         if (!attr->valid)
748                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
749         if (attr->ipv4) {
750                 memset(&ipv4, 0, sizeof(ipv4));
751                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
752                 ipv4.hdr.time_to_live = conf->ttl_value;
753                 ipv4_mask.hdr.time_to_live = 0xFF;
754                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
755                 item.spec = &ipv4;
756                 item.mask = &ipv4_mask;
757                 field = modify_ipv4;
758         } else {
759                 MLX5_ASSERT(attr->ipv6);
760                 memset(&ipv6, 0, sizeof(ipv6));
761                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
762                 ipv6.hdr.hop_limits = conf->ttl_value;
763                 ipv6_mask.hdr.hop_limits = 0xFF;
764                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
765                 item.spec = &ipv6;
766                 item.mask = &ipv6_mask;
767                 field = modify_ipv6;
768         }
769         return flow_dv_convert_modify_action(&item, field, NULL, resource,
770                                              MLX5_MODIFICATION_TYPE_SET, error);
771 }
772
773 /**
774  * Convert modify-header decrement TTL action to DV specification.
775  *
776  * @param[in,out] resource
777  *   Pointer to the modify-header resource.
778  * @param[in] action
779  *   Pointer to action specification.
780  * @param[in] items
781  *   Pointer to rte_flow_item objects list.
782  * @param[in] attr
783  *   Pointer to flow attributes structure.
784  * @param[in] dev_flow
785  *   Pointer to the sub flow.
786  * @param[in] tunnel_decap
787  *   Whether action is after tunnel decapsulation.
788  * @param[out] error
789  *   Pointer to the error structure.
790  *
791  * @return
792  *   0 on success, a negative errno value otherwise and rte_errno is set.
793  */
794 static int
795 flow_dv_convert_action_modify_dec_ttl
796                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
797                          const struct rte_flow_item *items,
798                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
799                          bool tunnel_decap, struct rte_flow_error *error)
800 {
801         struct rte_flow_item item;
802         struct rte_flow_item_ipv4 ipv4;
803         struct rte_flow_item_ipv4 ipv4_mask;
804         struct rte_flow_item_ipv6 ipv6;
805         struct rte_flow_item_ipv6 ipv6_mask;
806         struct field_modify_info *field;
807
808         if (!attr->valid)
809                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
810         if (attr->ipv4) {
811                 memset(&ipv4, 0, sizeof(ipv4));
812                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
813                 ipv4.hdr.time_to_live = 0xFF;
814                 ipv4_mask.hdr.time_to_live = 0xFF;
815                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
816                 item.spec = &ipv4;
817                 item.mask = &ipv4_mask;
818                 field = modify_ipv4;
819         } else {
820                 MLX5_ASSERT(attr->ipv6);
821                 memset(&ipv6, 0, sizeof(ipv6));
822                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
823                 ipv6.hdr.hop_limits = 0xFF;
824                 ipv6_mask.hdr.hop_limits = 0xFF;
825                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
826                 item.spec = &ipv6;
827                 item.mask = &ipv6_mask;
828                 field = modify_ipv6;
829         }
830         return flow_dv_convert_modify_action(&item, field, NULL, resource,
831                                              MLX5_MODIFICATION_TYPE_ADD, error);
832 }
833
834 /**
835  * Convert modify-header increment/decrement TCP Sequence number
836  * to DV specification.
837  *
838  * @param[in,out] resource
839  *   Pointer to the modify-header resource.
840  * @param[in] action
841  *   Pointer to action specification.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_tcp_seq
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_action *action,
852                          struct rte_flow_error *error)
853 {
854         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
855         uint64_t value = rte_be_to_cpu_32(*conf);
856         struct rte_flow_item item;
857         struct rte_flow_item_tcp tcp;
858         struct rte_flow_item_tcp tcp_mask;
859
860         memset(&tcp, 0, sizeof(tcp));
861         memset(&tcp_mask, 0, sizeof(tcp_mask));
862         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
863                 /*
864                  * The HW has no decrement operation, only increment operation.
865                  * To simulate decrement X from Y using increment operation
866                  * we need to add UINT32_MAX X times to Y.
867                  * Each adding of UINT32_MAX decrements Y by 1.
868                  */
869                 value *= UINT32_MAX;
870         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
871         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
872         item.type = RTE_FLOW_ITEM_TYPE_TCP;
873         item.spec = &tcp;
874         item.mask = &tcp_mask;
875         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
876                                              MLX5_MODIFICATION_TYPE_ADD, error);
877 }
878
879 /**
880  * Convert modify-header increment/decrement TCP Acknowledgment number
881  * to DV specification.
882  *
883  * @param[in,out] resource
884  *   Pointer to the modify-header resource.
885  * @param[in] action
886  *   Pointer to action specification.
887  * @param[out] error
888  *   Pointer to the error structure.
889  *
890  * @return
891  *   0 on success, a negative errno value otherwise and rte_errno is set.
892  */
893 static int
894 flow_dv_convert_action_modify_tcp_ack
895                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
896                          const struct rte_flow_action *action,
897                          struct rte_flow_error *error)
898 {
899         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
900         uint64_t value = rte_be_to_cpu_32(*conf);
901         struct rte_flow_item item;
902         struct rte_flow_item_tcp tcp;
903         struct rte_flow_item_tcp tcp_mask;
904
905         memset(&tcp, 0, sizeof(tcp));
906         memset(&tcp_mask, 0, sizeof(tcp_mask));
907         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
908                 /*
909                  * The HW has no decrement operation, only increment operation.
910                  * To simulate decrement X from Y using increment operation
911                  * we need to add UINT32_MAX X times to Y.
912                  * Each adding of UINT32_MAX decrements Y by 1.
913                  */
914                 value *= UINT32_MAX;
915         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
916         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
917         item.type = RTE_FLOW_ITEM_TYPE_TCP;
918         item.spec = &tcp;
919         item.mask = &tcp_mask;
920         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
921                                              MLX5_MODIFICATION_TYPE_ADD, error);
922 }
923
924 static enum mlx5_modification_field reg_to_field[] = {
925         [REG_NON] = MLX5_MODI_OUT_NONE,
926         [REG_A] = MLX5_MODI_META_DATA_REG_A,
927         [REG_B] = MLX5_MODI_META_DATA_REG_B,
928         [REG_C_0] = MLX5_MODI_META_REG_C_0,
929         [REG_C_1] = MLX5_MODI_META_REG_C_1,
930         [REG_C_2] = MLX5_MODI_META_REG_C_2,
931         [REG_C_3] = MLX5_MODI_META_REG_C_3,
932         [REG_C_4] = MLX5_MODI_META_REG_C_4,
933         [REG_C_5] = MLX5_MODI_META_REG_C_5,
934         [REG_C_6] = MLX5_MODI_META_REG_C_6,
935         [REG_C_7] = MLX5_MODI_META_REG_C_7,
936 };
937
938 /**
939  * Convert register set to DV specification.
940  *
941  * @param[in,out] resource
942  *   Pointer to the modify-header resource.
943  * @param[in] action
944  *   Pointer to action specification.
945  * @param[out] error
946  *   Pointer to the error structure.
947  *
948  * @return
949  *   0 on success, a negative errno value otherwise and rte_errno is set.
950  */
951 static int
952 flow_dv_convert_action_set_reg
953                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
954                          const struct rte_flow_action *action,
955                          struct rte_flow_error *error)
956 {
957         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
958         struct mlx5_modification_cmd *actions = resource->actions;
959         uint32_t i = resource->actions_num;
960
961         if (i >= MLX5_MAX_MODIFY_NUM)
962                 return rte_flow_error_set(error, EINVAL,
963                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
964                                           "too many items to modify");
965         MLX5_ASSERT(conf->id != REG_NON);
966         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
967         actions[i] = (struct mlx5_modification_cmd) {
968                 .action_type = MLX5_MODIFICATION_TYPE_SET,
969                 .field = reg_to_field[conf->id],
970                 .offset = conf->offset,
971                 .length = conf->length,
972         };
973         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
974         actions[i].data1 = rte_cpu_to_be_32(conf->data);
975         ++i;
976         resource->actions_num = i;
977         return 0;
978 }
979
980 /**
981  * Convert SET_TAG action to DV specification.
982  *
983  * @param[in] dev
984  *   Pointer to the rte_eth_dev structure.
985  * @param[in,out] resource
986  *   Pointer to the modify-header resource.
987  * @param[in] conf
988  *   Pointer to action specification.
989  * @param[out] error
990  *   Pointer to the error structure.
991  *
992  * @return
993  *   0 on success, a negative errno value otherwise and rte_errno is set.
994  */
995 static int
996 flow_dv_convert_action_set_tag
997                         (struct rte_eth_dev *dev,
998                          struct mlx5_flow_dv_modify_hdr_resource *resource,
999                          const struct rte_flow_action_set_tag *conf,
1000                          struct rte_flow_error *error)
1001 {
1002         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1003         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1004         struct rte_flow_item item = {
1005                 .spec = &data,
1006                 .mask = &mask,
1007         };
1008         struct field_modify_info reg_c_x[] = {
1009                 [1] = {0, 0, 0},
1010         };
1011         enum mlx5_modification_field reg_type;
1012         int ret;
1013
1014         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1015         if (ret < 0)
1016                 return ret;
1017         MLX5_ASSERT(ret != REG_NON);
1018         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1019         reg_type = reg_to_field[ret];
1020         MLX5_ASSERT(reg_type > 0);
1021         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1022         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1023                                              MLX5_MODIFICATION_TYPE_SET, error);
1024 }
1025
1026 /**
1027  * Convert internal COPY_REG action to DV specification.
1028  *
1029  * @param[in] dev
1030  *   Pointer to the rte_eth_dev structure.
1031  * @param[in,out] res
1032  *   Pointer to the modify-header resource.
1033  * @param[in] action
1034  *   Pointer to action specification.
1035  * @param[out] error
1036  *   Pointer to the error structure.
1037  *
1038  * @return
1039  *   0 on success, a negative errno value otherwise and rte_errno is set.
1040  */
1041 static int
1042 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1043                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1044                                  const struct rte_flow_action *action,
1045                                  struct rte_flow_error *error)
1046 {
1047         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1048         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1049         struct rte_flow_item item = {
1050                 .spec = NULL,
1051                 .mask = &mask,
1052         };
1053         struct field_modify_info reg_src[] = {
1054                 {4, 0, reg_to_field[conf->src]},
1055                 {0, 0, 0},
1056         };
1057         struct field_modify_info reg_dst = {
1058                 .offset = 0,
1059                 .id = reg_to_field[conf->dst],
1060         };
1061         /* Adjust reg_c[0] usage according to reported mask. */
1062         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1063                 struct mlx5_priv *priv = dev->data->dev_private;
1064                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1065
1066                 MLX5_ASSERT(reg_c0);
1067                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1068                 if (conf->dst == REG_C_0) {
1069                         /* Copy to reg_c[0], within mask only. */
1070                         reg_dst.offset = rte_bsf32(reg_c0);
1071                         /*
1072                          * Mask is ignoring the enianness, because
1073                          * there is no conversion in datapath.
1074                          */
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076                         /* Copy from destination lower bits to reg_c[0]. */
1077                         mask = reg_c0 >> reg_dst.offset;
1078 #else
1079                         /* Copy from destination upper bits to reg_c[0]. */
1080                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1081                                           rte_fls_u32(reg_c0));
1082 #endif
1083                 } else {
1084                         mask = rte_cpu_to_be_32(reg_c0);
1085 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1086                         /* Copy from reg_c[0] to destination lower bits. */
1087                         reg_dst.offset = 0;
1088 #else
1089                         /* Copy from reg_c[0] to destination upper bits. */
1090                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1091                                          (rte_fls_u32(reg_c0) -
1092                                           rte_bsf32(reg_c0));
1093 #endif
1094                 }
1095         }
1096         return flow_dv_convert_modify_action(&item,
1097                                              reg_src, &reg_dst, res,
1098                                              MLX5_MODIFICATION_TYPE_COPY,
1099                                              error);
1100 }
1101
1102 /**
1103  * Convert MARK action to DV specification. This routine is used
1104  * in extensive metadata only and requires metadata register to be
1105  * handled. In legacy mode hardware tag resource is engaged.
1106  *
1107  * @param[in] dev
1108  *   Pointer to the rte_eth_dev structure.
1109  * @param[in] conf
1110  *   Pointer to MARK action specification.
1111  * @param[in,out] resource
1112  *   Pointer to the modify-header resource.
1113  * @param[out] error
1114  *   Pointer to the error structure.
1115  *
1116  * @return
1117  *   0 on success, a negative errno value otherwise and rte_errno is set.
1118  */
1119 static int
1120 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1121                             const struct rte_flow_action_mark *conf,
1122                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1123                             struct rte_flow_error *error)
1124 {
1125         struct mlx5_priv *priv = dev->data->dev_private;
1126         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1127                                            priv->sh->dv_mark_mask);
1128         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1129         struct rte_flow_item item = {
1130                 .spec = &data,
1131                 .mask = &mask,
1132         };
1133         struct field_modify_info reg_c_x[] = {
1134                 [1] = {0, 0, 0},
1135         };
1136         int reg;
1137
1138         if (!mask)
1139                 return rte_flow_error_set(error, EINVAL,
1140                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1141                                           NULL, "zero mark action mask");
1142         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1143         if (reg < 0)
1144                 return reg;
1145         MLX5_ASSERT(reg > 0);
1146         if (reg == REG_C_0) {
1147                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1148                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1149
1150                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1151                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1152                 mask = rte_cpu_to_be_32(mask << shl_c0);
1153         }
1154         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1155         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1156                                              MLX5_MODIFICATION_TYPE_SET, error);
1157 }
1158
1159 /**
1160  * Get metadata register index for specified steering domain.
1161  *
1162  * @param[in] dev
1163  *   Pointer to the rte_eth_dev structure.
1164  * @param[in] attr
1165  *   Attributes of flow to determine steering domain.
1166  * @param[out] error
1167  *   Pointer to the error structure.
1168  *
1169  * @return
1170  *   positive index on success, a negative errno value otherwise
1171  *   and rte_errno is set.
1172  */
1173 static enum modify_reg
1174 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1175                          const struct rte_flow_attr *attr,
1176                          struct rte_flow_error *error)
1177 {
1178         int reg =
1179                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1180                                           MLX5_METADATA_FDB :
1181                                             attr->egress ?
1182                                             MLX5_METADATA_TX :
1183                                             MLX5_METADATA_RX, 0, error);
1184         if (reg < 0)
1185                 return rte_flow_error_set(error,
1186                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1187                                           NULL, "unavailable "
1188                                           "metadata register");
1189         return reg;
1190 }
1191
1192 /**
1193  * Convert SET_META action to DV specification.
1194  *
1195  * @param[in] dev
1196  *   Pointer to the rte_eth_dev structure.
1197  * @param[in,out] resource
1198  *   Pointer to the modify-header resource.
1199  * @param[in] attr
1200  *   Attributes of flow that includes this item.
1201  * @param[in] conf
1202  *   Pointer to action specification.
1203  * @param[out] error
1204  *   Pointer to the error structure.
1205  *
1206  * @return
1207  *   0 on success, a negative errno value otherwise and rte_errno is set.
1208  */
1209 static int
1210 flow_dv_convert_action_set_meta
1211                         (struct rte_eth_dev *dev,
1212                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1213                          const struct rte_flow_attr *attr,
1214                          const struct rte_flow_action_set_meta *conf,
1215                          struct rte_flow_error *error)
1216 {
1217         uint32_t data = conf->data;
1218         uint32_t mask = conf->mask;
1219         struct rte_flow_item item = {
1220                 .spec = &data,
1221                 .mask = &mask,
1222         };
1223         struct field_modify_info reg_c_x[] = {
1224                 [1] = {0, 0, 0},
1225         };
1226         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1227
1228         if (reg < 0)
1229                 return reg;
1230         MLX5_ASSERT(reg != REG_NON);
1231         /*
1232          * In datapath code there is no endianness
1233          * coversions for perfromance reasons, all
1234          * pattern conversions are done in rte_flow.
1235          */
1236         if (reg == REG_C_0) {
1237                 struct mlx5_priv *priv = dev->data->dev_private;
1238                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1239                 uint32_t shl_c0;
1240
1241                 MLX5_ASSERT(msk_c0);
1242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1243                 shl_c0 = rte_bsf32(msk_c0);
1244 #else
1245                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1246 #endif
1247                 mask <<= shl_c0;
1248                 data <<= shl_c0;
1249                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1250         }
1251         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1252         /* The routine expects parameters in memory as big-endian ones. */
1253         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1254                                              MLX5_MODIFICATION_TYPE_SET, error);
1255 }
1256
1257 /**
1258  * Convert modify-header set IPv4 DSCP action to DV specification.
1259  *
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] action
1263  *   Pointer to action specification.
1264  * @param[out] error
1265  *   Pointer to the error structure.
1266  *
1267  * @return
1268  *   0 on success, a negative errno value otherwise and rte_errno is set.
1269  */
1270 static int
1271 flow_dv_convert_action_modify_ipv4_dscp
1272                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1273                          const struct rte_flow_action *action,
1274                          struct rte_flow_error *error)
1275 {
1276         const struct rte_flow_action_set_dscp *conf =
1277                 (const struct rte_flow_action_set_dscp *)(action->conf);
1278         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1279         struct rte_flow_item_ipv4 ipv4;
1280         struct rte_flow_item_ipv4 ipv4_mask;
1281
1282         memset(&ipv4, 0, sizeof(ipv4));
1283         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1284         ipv4.hdr.type_of_service = conf->dscp;
1285         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1286         item.spec = &ipv4;
1287         item.mask = &ipv4_mask;
1288         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1289                                              MLX5_MODIFICATION_TYPE_SET, error);
1290 }
1291
1292 /**
1293  * Convert modify-header set IPv6 DSCP action to DV specification.
1294  *
1295  * @param[in,out] resource
1296  *   Pointer to the modify-header resource.
1297  * @param[in] action
1298  *   Pointer to action specification.
1299  * @param[out] error
1300  *   Pointer to the error structure.
1301  *
1302  * @return
1303  *   0 on success, a negative errno value otherwise and rte_errno is set.
1304  */
1305 static int
1306 flow_dv_convert_action_modify_ipv6_dscp
1307                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1308                          const struct rte_flow_action *action,
1309                          struct rte_flow_error *error)
1310 {
1311         const struct rte_flow_action_set_dscp *conf =
1312                 (const struct rte_flow_action_set_dscp *)(action->conf);
1313         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1314         struct rte_flow_item_ipv6 ipv6;
1315         struct rte_flow_item_ipv6 ipv6_mask;
1316
1317         memset(&ipv6, 0, sizeof(ipv6));
1318         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1319         /*
1320          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1321          * rdma-core only accept the DSCP bits byte aligned start from
1322          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1323          * bits in IPv6 case as rdma-core requires byte aligned value.
1324          */
1325         ipv6.hdr.vtc_flow = conf->dscp;
1326         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1327         item.spec = &ipv6;
1328         item.mask = &ipv6_mask;
1329         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1330                                              MLX5_MODIFICATION_TYPE_SET, error);
1331 }
1332
1333 static int
1334 mlx5_flow_item_field_width(enum rte_flow_field_id field)
1335 {
1336         switch (field) {
1337         case RTE_FLOW_FIELD_START:
1338                 return 32;
1339         case RTE_FLOW_FIELD_MAC_DST:
1340         case RTE_FLOW_FIELD_MAC_SRC:
1341                 return 48;
1342         case RTE_FLOW_FIELD_VLAN_TYPE:
1343                 return 16;
1344         case RTE_FLOW_FIELD_VLAN_ID:
1345                 return 12;
1346         case RTE_FLOW_FIELD_MAC_TYPE:
1347                 return 16;
1348         case RTE_FLOW_FIELD_IPV4_DSCP:
1349                 return 6;
1350         case RTE_FLOW_FIELD_IPV4_TTL:
1351                 return 8;
1352         case RTE_FLOW_FIELD_IPV4_SRC:
1353         case RTE_FLOW_FIELD_IPV4_DST:
1354                 return 32;
1355         case RTE_FLOW_FIELD_IPV6_DSCP:
1356                 return 6;
1357         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1358                 return 8;
1359         case RTE_FLOW_FIELD_IPV6_SRC:
1360         case RTE_FLOW_FIELD_IPV6_DST:
1361                 return 128;
1362         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1363         case RTE_FLOW_FIELD_TCP_PORT_DST:
1364                 return 16;
1365         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1366         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1367                 return 32;
1368         case RTE_FLOW_FIELD_TCP_FLAGS:
1369                 return 6;
1370         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1371         case RTE_FLOW_FIELD_UDP_PORT_DST:
1372                 return 16;
1373         case RTE_FLOW_FIELD_VXLAN_VNI:
1374         case RTE_FLOW_FIELD_GENEVE_VNI:
1375                 return 24;
1376         case RTE_FLOW_FIELD_GTP_TEID:
1377         case RTE_FLOW_FIELD_TAG:
1378                 return 32;
1379         case RTE_FLOW_FIELD_MARK:
1380                 return 24;
1381         case RTE_FLOW_FIELD_META:
1382                 return 32;
1383         case RTE_FLOW_FIELD_POINTER:
1384         case RTE_FLOW_FIELD_VALUE:
1385                 return 64;
1386         default:
1387                 MLX5_ASSERT(false);
1388         }
1389         return 0;
1390 }
1391
1392 static void
1393 mlx5_flow_field_id_to_modify_info
1394                 (const struct rte_flow_action_modify_data *data,
1395                  struct field_modify_info *info,
1396                  uint32_t *mask, uint32_t *value,
1397                  uint32_t width, uint32_t dst_width,
1398                  struct rte_eth_dev *dev,
1399                  const struct rte_flow_attr *attr,
1400                  struct rte_flow_error *error)
1401 {
1402         uint32_t idx = 0;
1403         uint64_t val = 0;
1404         switch (data->field) {
1405         case RTE_FLOW_FIELD_START:
1406                 /* not supported yet */
1407                 MLX5_ASSERT(false);
1408                 break;
1409         case RTE_FLOW_FIELD_MAC_DST:
1410                 if (mask) {
1411                         if (data->offset < 32) {
1412                                 info[idx] = (struct field_modify_info){4, 0,
1413                                                 MLX5_MODI_OUT_DMAC_47_16};
1414                                 if (width < 32) {
1415                                         mask[idx] =
1416                                                 rte_cpu_to_be_32(0xffffffff >>
1417                                                                  (32 - width));
1418                                         width = 0;
1419                                 } else {
1420                                         mask[idx] = RTE_BE32(0xffffffff);
1421                                         width -= 32;
1422                                 }
1423                                 if (!width)
1424                                         break;
1425                                 ++idx;
1426                         }
1427                         info[idx] = (struct field_modify_info){2, 4 * idx,
1428                                                 MLX5_MODI_OUT_DMAC_15_0};
1429                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1430                 } else {
1431                         if (data->offset < 32)
1432                                 info[idx++] = (struct field_modify_info){4, 0,
1433                                                 MLX5_MODI_OUT_DMAC_47_16};
1434                         info[idx] = (struct field_modify_info){2, 0,
1435                                                 MLX5_MODI_OUT_DMAC_15_0};
1436                 }
1437                 break;
1438         case RTE_FLOW_FIELD_MAC_SRC:
1439                 if (mask) {
1440                         if (data->offset < 32) {
1441                                 info[idx] = (struct field_modify_info){4, 0,
1442                                                 MLX5_MODI_OUT_SMAC_47_16};
1443                                 if (width < 32) {
1444                                         mask[idx] =
1445                                                 rte_cpu_to_be_32(0xffffffff >>
1446                                                                 (32 - width));
1447                                         width = 0;
1448                                 } else {
1449                                         mask[idx] = RTE_BE32(0xffffffff);
1450                                         width -= 32;
1451                                 }
1452                                 if (!width)
1453                                         break;
1454                                 ++idx;
1455                         }
1456                         info[idx] = (struct field_modify_info){2, 4 * idx,
1457                                                 MLX5_MODI_OUT_SMAC_15_0};
1458                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1459                 } else {
1460                         if (data->offset < 32)
1461                                 info[idx++] = (struct field_modify_info){4, 0,
1462                                                 MLX5_MODI_OUT_SMAC_47_16};
1463                         info[idx] = (struct field_modify_info){2, 0,
1464                                                 MLX5_MODI_OUT_SMAC_15_0};
1465                 }
1466                 break;
1467         case RTE_FLOW_FIELD_VLAN_TYPE:
1468                 /* not supported yet */
1469                 break;
1470         case RTE_FLOW_FIELD_VLAN_ID:
1471                 info[idx] = (struct field_modify_info){2, 0,
1472                                         MLX5_MODI_OUT_FIRST_VID};
1473                 if (mask)
1474                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1475                 break;
1476         case RTE_FLOW_FIELD_MAC_TYPE:
1477                 info[idx] = (struct field_modify_info){2, 0,
1478                                         MLX5_MODI_OUT_ETHERTYPE};
1479                 if (mask)
1480                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1481                 break;
1482         case RTE_FLOW_FIELD_IPV4_DSCP:
1483                 info[idx] = (struct field_modify_info){1, 0,
1484                                         MLX5_MODI_OUT_IP_DSCP};
1485                 if (mask)
1486                         mask[idx] = 0x3f >> (6 - width);
1487                 break;
1488         case RTE_FLOW_FIELD_IPV4_TTL:
1489                 info[idx] = (struct field_modify_info){1, 0,
1490                                         MLX5_MODI_OUT_IPV4_TTL};
1491                 if (mask)
1492                         mask[idx] = 0xff >> (8 - width);
1493                 break;
1494         case RTE_FLOW_FIELD_IPV4_SRC:
1495                 info[idx] = (struct field_modify_info){4, 0,
1496                                         MLX5_MODI_OUT_SIPV4};
1497                 if (mask)
1498                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1499                                                      (32 - width));
1500                 break;
1501         case RTE_FLOW_FIELD_IPV4_DST:
1502                 info[idx] = (struct field_modify_info){4, 0,
1503                                         MLX5_MODI_OUT_DIPV4};
1504                 if (mask)
1505                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1506                                                      (32 - width));
1507                 break;
1508         case RTE_FLOW_FIELD_IPV6_DSCP:
1509                 info[idx] = (struct field_modify_info){1, 0,
1510                                         MLX5_MODI_OUT_IP_DSCP};
1511                 if (mask)
1512                         mask[idx] = 0x3f >> (6 - width);
1513                 break;
1514         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1515                 info[idx] = (struct field_modify_info){1, 0,
1516                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1517                 if (mask)
1518                         mask[idx] = 0xff >> (8 - width);
1519                 break;
1520         case RTE_FLOW_FIELD_IPV6_SRC:
1521                 if (mask) {
1522                         if (data->offset < 32) {
1523                                 info[idx] = (struct field_modify_info){4,
1524                                                 4 * idx,
1525                                                 MLX5_MODI_OUT_SIPV6_31_0};
1526                                 if (width < 32) {
1527                                         mask[idx] =
1528                                                 rte_cpu_to_be_32(0xffffffff >>
1529                                                                  (32 - width));
1530                                         width = 0;
1531                                 } else {
1532                                         mask[idx] = RTE_BE32(0xffffffff);
1533                                         width -= 32;
1534                                 }
1535                                 if (!width)
1536                                         break;
1537                                 ++idx;
1538                         }
1539                         if (data->offset < 64) {
1540                                 info[idx] = (struct field_modify_info){4,
1541                                                 4 * idx,
1542                                                 MLX5_MODI_OUT_SIPV6_63_32};
1543                                 if (width < 32) {
1544                                         mask[idx] =
1545                                                 rte_cpu_to_be_32(0xffffffff >>
1546                                                                  (32 - width));
1547                                         width = 0;
1548                                 } else {
1549                                         mask[idx] = RTE_BE32(0xffffffff);
1550                                         width -= 32;
1551                                 }
1552                                 if (!width)
1553                                         break;
1554                                 ++idx;
1555                         }
1556                         if (data->offset < 96) {
1557                                 info[idx] = (struct field_modify_info){4,
1558                                                 4 * idx,
1559                                                 MLX5_MODI_OUT_SIPV6_95_64};
1560                                 if (width < 32) {
1561                                         mask[idx] =
1562                                                 rte_cpu_to_be_32(0xffffffff >>
1563                                                                  (32 - width));
1564                                         width = 0;
1565                                 } else {
1566                                         mask[idx] = RTE_BE32(0xffffffff);
1567                                         width -= 32;
1568                                 }
1569                                 if (!width)
1570                                         break;
1571                                 ++idx;
1572                         }
1573                         info[idx] = (struct field_modify_info){4, 4 * idx,
1574                                                 MLX5_MODI_OUT_SIPV6_127_96};
1575                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1576                                                      (32 - width));
1577                 } else {
1578                         if (data->offset < 32)
1579                                 info[idx++] = (struct field_modify_info){4, 0,
1580                                                 MLX5_MODI_OUT_SIPV6_31_0};
1581                         if (data->offset < 64)
1582                                 info[idx++] = (struct field_modify_info){4, 0,
1583                                                 MLX5_MODI_OUT_SIPV6_63_32};
1584                         if (data->offset < 96)
1585                                 info[idx++] = (struct field_modify_info){4, 0,
1586                                                 MLX5_MODI_OUT_SIPV6_95_64};
1587                         if (data->offset < 128)
1588                                 info[idx++] = (struct field_modify_info){4, 0,
1589                                                 MLX5_MODI_OUT_SIPV6_127_96};
1590                 }
1591                 break;
1592         case RTE_FLOW_FIELD_IPV6_DST:
1593                 if (mask) {
1594                         if (data->offset < 32) {
1595                                 info[idx] = (struct field_modify_info){4,
1596                                                 4 * idx,
1597                                                 MLX5_MODI_OUT_DIPV6_31_0};
1598                                 if (width < 32) {
1599                                         mask[idx] =
1600                                                 rte_cpu_to_be_32(0xffffffff >>
1601                                                                  (32 - width));
1602                                         width = 0;
1603                                 } else {
1604                                         mask[idx] = RTE_BE32(0xffffffff);
1605                                         width -= 32;
1606                                 }
1607                                 if (!width)
1608                                         break;
1609                                 ++idx;
1610                         }
1611                         if (data->offset < 64) {
1612                                 info[idx] = (struct field_modify_info){4,
1613                                                 4 * idx,
1614                                                 MLX5_MODI_OUT_DIPV6_63_32};
1615                                 if (width < 32) {
1616                                         mask[idx] =
1617                                                 rte_cpu_to_be_32(0xffffffff >>
1618                                                                  (32 - width));
1619                                         width = 0;
1620                                 } else {
1621                                         mask[idx] = RTE_BE32(0xffffffff);
1622                                         width -= 32;
1623                                 }
1624                                 if (!width)
1625                                         break;
1626                                 ++idx;
1627                         }
1628                         if (data->offset < 96) {
1629                                 info[idx] = (struct field_modify_info){4,
1630                                                 4 * idx,
1631                                                 MLX5_MODI_OUT_DIPV6_95_64};
1632                                 if (width < 32) {
1633                                         mask[idx] =
1634                                                 rte_cpu_to_be_32(0xffffffff >>
1635                                                                  (32 - width));
1636                                         width = 0;
1637                                 } else {
1638                                         mask[idx] = RTE_BE32(0xffffffff);
1639                                         width -= 32;
1640                                 }
1641                                 if (!width)
1642                                         break;
1643                                 ++idx;
1644                         }
1645                         info[idx] = (struct field_modify_info){4, 4 * idx,
1646                                                 MLX5_MODI_OUT_DIPV6_127_96};
1647                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1648                                                      (32 - width));
1649                 } else {
1650                         if (data->offset < 32)
1651                                 info[idx++] = (struct field_modify_info){4, 0,
1652                                                 MLX5_MODI_OUT_DIPV6_31_0};
1653                         if (data->offset < 64)
1654                                 info[idx++] = (struct field_modify_info){4, 0,
1655                                                 MLX5_MODI_OUT_DIPV6_63_32};
1656                         if (data->offset < 96)
1657                                 info[idx++] = (struct field_modify_info){4, 0,
1658                                                 MLX5_MODI_OUT_DIPV6_95_64};
1659                         if (data->offset < 128)
1660                                 info[idx++] = (struct field_modify_info){4, 0,
1661                                                 MLX5_MODI_OUT_DIPV6_127_96};
1662                 }
1663                 break;
1664         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1665                 info[idx] = (struct field_modify_info){2, 0,
1666                                         MLX5_MODI_OUT_TCP_SPORT};
1667                 if (mask)
1668                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1669                 break;
1670         case RTE_FLOW_FIELD_TCP_PORT_DST:
1671                 info[idx] = (struct field_modify_info){2, 0,
1672                                         MLX5_MODI_OUT_TCP_DPORT};
1673                 if (mask)
1674                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1675                 break;
1676         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1677                 info[idx] = (struct field_modify_info){4, 0,
1678                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1679                 if (mask)
1680                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1681                                                      (32 - width));
1682                 break;
1683         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1684                 info[idx] = (struct field_modify_info){4, 0,
1685                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1686                 if (mask)
1687                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1688                                                      (32 - width));
1689                 break;
1690         case RTE_FLOW_FIELD_TCP_FLAGS:
1691                 info[idx] = (struct field_modify_info){1, 0,
1692                                         MLX5_MODI_OUT_TCP_FLAGS};
1693                 if (mask)
1694                         mask[idx] = 0x3f >> (6 - width);
1695                 break;
1696         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1697                 info[idx] = (struct field_modify_info){2, 0,
1698                                         MLX5_MODI_OUT_UDP_SPORT};
1699                 if (mask)
1700                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1701                 break;
1702         case RTE_FLOW_FIELD_UDP_PORT_DST:
1703                 info[idx] = (struct field_modify_info){2, 0,
1704                                         MLX5_MODI_OUT_UDP_DPORT};
1705                 if (mask)
1706                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1707                 break;
1708         case RTE_FLOW_FIELD_VXLAN_VNI:
1709                 /* not supported yet */
1710                 break;
1711         case RTE_FLOW_FIELD_GENEVE_VNI:
1712                 /* not supported yet*/
1713                 break;
1714         case RTE_FLOW_FIELD_GTP_TEID:
1715                 info[idx] = (struct field_modify_info){4, 0,
1716                                         MLX5_MODI_GTP_TEID};
1717                 if (mask)
1718                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1719                                                      (32 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TAG:
1722                 {
1723                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1724                                                    data->level, error);
1725                         if (reg < 0)
1726                                 return;
1727                         MLX5_ASSERT(reg != REG_NON);
1728                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1729                         info[idx] = (struct field_modify_info){4, 0,
1730                                                 reg_to_field[reg]};
1731                         if (mask)
1732                                 mask[idx] =
1733                                         rte_cpu_to_be_32(0xffffffff >>
1734                                                          (32 - width));
1735                 }
1736                 break;
1737         case RTE_FLOW_FIELD_MARK:
1738                 {
1739                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1740                                                        0, error);
1741                         if (reg < 0)
1742                                 return;
1743                         MLX5_ASSERT(reg != REG_NON);
1744                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1745                         info[idx] = (struct field_modify_info){4, 0,
1746                                                 reg_to_field[reg]};
1747                         if (mask)
1748                                 mask[idx] =
1749                                         rte_cpu_to_be_32(0xffffffff >>
1750                                                          (32 - width));
1751                 }
1752                 break;
1753         case RTE_FLOW_FIELD_META:
1754                 {
1755                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1756                         if (reg < 0)
1757                                 return;
1758                         MLX5_ASSERT(reg != REG_NON);
1759                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1760                         info[idx] = (struct field_modify_info){4, 0,
1761                                                 reg_to_field[reg]};
1762                         if (mask)
1763                                 mask[idx] =
1764                                         rte_cpu_to_be_32(0xffffffff >>
1765                                                          (32 - width));
1766                 }
1767                 break;
1768         case RTE_FLOW_FIELD_POINTER:
1769         case RTE_FLOW_FIELD_VALUE:
1770                 if (data->field == RTE_FLOW_FIELD_POINTER)
1771                         memcpy(&val, (void *)(uintptr_t)data->value,
1772                                sizeof(uint64_t));
1773                 else
1774                         val = data->value;
1775                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1776                         if (mask[idx]) {
1777                                 if (dst_width > 16) {
1778                                         value[idx] = rte_cpu_to_be_32(val);
1779                                         val >>= 32;
1780                                 } else if (dst_width > 8) {
1781                                         value[idx] = rte_cpu_to_be_16(val);
1782                                         val >>= 16;
1783                                 } else {
1784                                         value[idx] = (uint8_t)val;
1785                                         val >>= 8;
1786                                 }
1787                                 if (!val)
1788                                         break;
1789                         }
1790                 }
1791                 break;
1792         default:
1793                 MLX5_ASSERT(false);
1794                 break;
1795         }
1796 }
1797
1798 /**
1799  * Convert modify_field action to DV specification.
1800  *
1801  * @param[in] dev
1802  *   Pointer to the rte_eth_dev structure.
1803  * @param[in,out] resource
1804  *   Pointer to the modify-header resource.
1805  * @param[in] action
1806  *   Pointer to action specification.
1807  * @param[in] attr
1808  *   Attributes of flow that includes this item.
1809  * @param[out] error
1810  *   Pointer to the error structure.
1811  *
1812  * @return
1813  *   0 on success, a negative errno value otherwise and rte_errno is set.
1814  */
1815 static int
1816 flow_dv_convert_action_modify_field
1817                         (struct rte_eth_dev *dev,
1818                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1819                          const struct rte_flow_action *action,
1820                          const struct rte_flow_attr *attr,
1821                          struct rte_flow_error *error)
1822 {
1823         const struct rte_flow_action_modify_field *conf =
1824                 (const struct rte_flow_action_modify_field *)(action->conf);
1825         struct rte_flow_item item;
1826         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1827                                                                 {0, 0, 0} };
1828         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1829                                                                 {0, 0, 0} };
1830         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1831         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1832         uint32_t type;
1833         uint32_t dst_width = mlx5_flow_item_field_width(conf->dst.field);
1834
1835         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1836                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1837                 type = MLX5_MODIFICATION_TYPE_SET;
1838                 /** For SET fill the destination field (field) first. */
1839                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1840                         value, conf->width, dst_width, dev, attr, error);
1841                 /** Then copy immediate value from source as per mask. */
1842                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1843                         value, conf->width, dst_width, dev, attr, error);
1844                 item.spec = &value;
1845         } else {
1846                 type = MLX5_MODIFICATION_TYPE_COPY;
1847                 /** For COPY fill the destination field (dcopy) without mask. */
1848                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1849                         value, conf->width, dst_width, dev, attr, error);
1850                 /** Then construct the source field (field) with mask. */
1851                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1852                         value, conf->width, dst_width, dev, attr, error);
1853         }
1854         item.mask = &mask;
1855         return flow_dv_convert_modify_action(&item,
1856                         field, dcopy, resource, type, error);
1857 }
1858
1859 /**
1860  * Validate MARK item.
1861  *
1862  * @param[in] dev
1863  *   Pointer to the rte_eth_dev structure.
1864  * @param[in] item
1865  *   Item specification.
1866  * @param[in] attr
1867  *   Attributes of flow that includes this item.
1868  * @param[out] error
1869  *   Pointer to error structure.
1870  *
1871  * @return
1872  *   0 on success, a negative errno value otherwise and rte_errno is set.
1873  */
1874 static int
1875 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1876                            const struct rte_flow_item *item,
1877                            const struct rte_flow_attr *attr __rte_unused,
1878                            struct rte_flow_error *error)
1879 {
1880         struct mlx5_priv *priv = dev->data->dev_private;
1881         struct mlx5_dev_config *config = &priv->config;
1882         const struct rte_flow_item_mark *spec = item->spec;
1883         const struct rte_flow_item_mark *mask = item->mask;
1884         const struct rte_flow_item_mark nic_mask = {
1885                 .id = priv->sh->dv_mark_mask,
1886         };
1887         int ret;
1888
1889         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1890                 return rte_flow_error_set(error, ENOTSUP,
1891                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1892                                           "extended metadata feature"
1893                                           " isn't enabled");
1894         if (!mlx5_flow_ext_mreg_supported(dev))
1895                 return rte_flow_error_set(error, ENOTSUP,
1896                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1897                                           "extended metadata register"
1898                                           " isn't supported");
1899         if (!nic_mask.id)
1900                 return rte_flow_error_set(error, ENOTSUP,
1901                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1902                                           "extended metadata register"
1903                                           " isn't available");
1904         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1905         if (ret < 0)
1906                 return ret;
1907         if (!spec)
1908                 return rte_flow_error_set(error, EINVAL,
1909                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1910                                           item->spec,
1911                                           "data cannot be empty");
1912         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1913                 return rte_flow_error_set(error, EINVAL,
1914                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1915                                           &spec->id,
1916                                           "mark id exceeds the limit");
1917         if (!mask)
1918                 mask = &nic_mask;
1919         if (!mask->id)
1920                 return rte_flow_error_set(error, EINVAL,
1921                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1922                                         "mask cannot be zero");
1923
1924         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1925                                         (const uint8_t *)&nic_mask,
1926                                         sizeof(struct rte_flow_item_mark),
1927                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1928         if (ret < 0)
1929                 return ret;
1930         return 0;
1931 }
1932
1933 /**
1934  * Validate META item.
1935  *
1936  * @param[in] dev
1937  *   Pointer to the rte_eth_dev structure.
1938  * @param[in] item
1939  *   Item specification.
1940  * @param[in] attr
1941  *   Attributes of flow that includes this item.
1942  * @param[out] error
1943  *   Pointer to error structure.
1944  *
1945  * @return
1946  *   0 on success, a negative errno value otherwise and rte_errno is set.
1947  */
1948 static int
1949 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1950                            const struct rte_flow_item *item,
1951                            const struct rte_flow_attr *attr,
1952                            struct rte_flow_error *error)
1953 {
1954         struct mlx5_priv *priv = dev->data->dev_private;
1955         struct mlx5_dev_config *config = &priv->config;
1956         const struct rte_flow_item_meta *spec = item->spec;
1957         const struct rte_flow_item_meta *mask = item->mask;
1958         struct rte_flow_item_meta nic_mask = {
1959                 .data = UINT32_MAX
1960         };
1961         int reg;
1962         int ret;
1963
1964         if (!spec)
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1967                                           item->spec,
1968                                           "data cannot be empty");
1969         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1970                 if (!mlx5_flow_ext_mreg_supported(dev))
1971                         return rte_flow_error_set(error, ENOTSUP,
1972                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1973                                           "extended metadata register"
1974                                           " isn't supported");
1975                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1976                 if (reg < 0)
1977                         return reg;
1978                 if (reg == REG_NON)
1979                         return rte_flow_error_set(error, ENOTSUP,
1980                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                         "unavalable extended metadata register");
1982                 if (reg == REG_B)
1983                         return rte_flow_error_set(error, ENOTSUP,
1984                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1985                                           "match on reg_b "
1986                                           "isn't supported");
1987                 if (reg != REG_A)
1988                         nic_mask.data = priv->sh->dv_meta_mask;
1989         } else {
1990                 if (attr->transfer)
1991                         return rte_flow_error_set(error, ENOTSUP,
1992                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1993                                         "extended metadata feature "
1994                                         "should be enabled when "
1995                                         "meta item is requested "
1996                                         "with e-switch mode ");
1997                 if (attr->ingress)
1998                         return rte_flow_error_set(error, ENOTSUP,
1999                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2000                                         "match on metadata for ingress "
2001                                         "is not supported in legacy "
2002                                         "metadata mode");
2003         }
2004         if (!mask)
2005                 mask = &rte_flow_item_meta_mask;
2006         if (!mask->data)
2007                 return rte_flow_error_set(error, EINVAL,
2008                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2009                                         "mask cannot be zero");
2010
2011         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2012                                         (const uint8_t *)&nic_mask,
2013                                         sizeof(struct rte_flow_item_meta),
2014                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2015         return ret;
2016 }
2017
2018 /**
2019  * Validate TAG item.
2020  *
2021  * @param[in] dev
2022  *   Pointer to the rte_eth_dev structure.
2023  * @param[in] item
2024  *   Item specification.
2025  * @param[in] attr
2026  *   Attributes of flow that includes this item.
2027  * @param[out] error
2028  *   Pointer to error structure.
2029  *
2030  * @return
2031  *   0 on success, a negative errno value otherwise and rte_errno is set.
2032  */
2033 static int
2034 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2035                           const struct rte_flow_item *item,
2036                           const struct rte_flow_attr *attr __rte_unused,
2037                           struct rte_flow_error *error)
2038 {
2039         const struct rte_flow_item_tag *spec = item->spec;
2040         const struct rte_flow_item_tag *mask = item->mask;
2041         const struct rte_flow_item_tag nic_mask = {
2042                 .data = RTE_BE32(UINT32_MAX),
2043                 .index = 0xff,
2044         };
2045         int ret;
2046
2047         if (!mlx5_flow_ext_mreg_supported(dev))
2048                 return rte_flow_error_set(error, ENOTSUP,
2049                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2050                                           "extensive metadata register"
2051                                           " isn't supported");
2052         if (!spec)
2053                 return rte_flow_error_set(error, EINVAL,
2054                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2055                                           item->spec,
2056                                           "data cannot be empty");
2057         if (!mask)
2058                 mask = &rte_flow_item_tag_mask;
2059         if (!mask->data)
2060                 return rte_flow_error_set(error, EINVAL,
2061                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2062                                         "mask cannot be zero");
2063
2064         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2065                                         (const uint8_t *)&nic_mask,
2066                                         sizeof(struct rte_flow_item_tag),
2067                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2068         if (ret < 0)
2069                 return ret;
2070         if (mask->index != 0xff)
2071                 return rte_flow_error_set(error, EINVAL,
2072                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2073                                           "partial mask for tag index"
2074                                           " is not supported");
2075         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2076         if (ret < 0)
2077                 return ret;
2078         MLX5_ASSERT(ret != REG_NON);
2079         return 0;
2080 }
2081
2082 /**
2083  * Validate vport item.
2084  *
2085  * @param[in] dev
2086  *   Pointer to the rte_eth_dev structure.
2087  * @param[in] item
2088  *   Item specification.
2089  * @param[in] attr
2090  *   Attributes of flow that includes this item.
2091  * @param[in] item_flags
2092  *   Bit-fields that holds the items detected until now.
2093  * @param[out] error
2094  *   Pointer to error structure.
2095  *
2096  * @return
2097  *   0 on success, a negative errno value otherwise and rte_errno is set.
2098  */
2099 static int
2100 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2101                               const struct rte_flow_item *item,
2102                               const struct rte_flow_attr *attr,
2103                               uint64_t item_flags,
2104                               struct rte_flow_error *error)
2105 {
2106         const struct rte_flow_item_port_id *spec = item->spec;
2107         const struct rte_flow_item_port_id *mask = item->mask;
2108         const struct rte_flow_item_port_id switch_mask = {
2109                         .id = 0xffffffff,
2110         };
2111         struct mlx5_priv *esw_priv;
2112         struct mlx5_priv *dev_priv;
2113         int ret;
2114
2115         if (!attr->transfer)
2116                 return rte_flow_error_set(error, EINVAL,
2117                                           RTE_FLOW_ERROR_TYPE_ITEM,
2118                                           NULL,
2119                                           "match on port id is valid only"
2120                                           " when transfer flag is enabled");
2121         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2122                 return rte_flow_error_set(error, ENOTSUP,
2123                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2124                                           "multiple source ports are not"
2125                                           " supported");
2126         if (!mask)
2127                 mask = &switch_mask;
2128         if (mask->id != 0xffffffff)
2129                 return rte_flow_error_set(error, ENOTSUP,
2130                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2131                                            mask,
2132                                            "no support for partial mask on"
2133                                            " \"id\" field");
2134         ret = mlx5_flow_item_acceptable
2135                                 (item, (const uint8_t *)mask,
2136                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2137                                  sizeof(struct rte_flow_item_port_id),
2138                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2139         if (ret)
2140                 return ret;
2141         if (!spec)
2142                 return 0;
2143         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2144         if (!esw_priv)
2145                 return rte_flow_error_set(error, rte_errno,
2146                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2147                                           "failed to obtain E-Switch info for"
2148                                           " port");
2149         dev_priv = mlx5_dev_to_eswitch_info(dev);
2150         if (!dev_priv)
2151                 return rte_flow_error_set(error, rte_errno,
2152                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2153                                           NULL,
2154                                           "failed to obtain E-Switch info");
2155         if (esw_priv->domain_id != dev_priv->domain_id)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2158                                           "cannot match on a port from a"
2159                                           " different E-Switch");
2160         return 0;
2161 }
2162
2163 /**
2164  * Validate VLAN item.
2165  *
2166  * @param[in] item
2167  *   Item specification.
2168  * @param[in] item_flags
2169  *   Bit-fields that holds the items detected until now.
2170  * @param[in] dev
2171  *   Ethernet device flow is being created on.
2172  * @param[out] error
2173  *   Pointer to error structure.
2174  *
2175  * @return
2176  *   0 on success, a negative errno value otherwise and rte_errno is set.
2177  */
2178 static int
2179 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2180                            uint64_t item_flags,
2181                            struct rte_eth_dev *dev,
2182                            struct rte_flow_error *error)
2183 {
2184         const struct rte_flow_item_vlan *mask = item->mask;
2185         const struct rte_flow_item_vlan nic_mask = {
2186                 .tci = RTE_BE16(UINT16_MAX),
2187                 .inner_type = RTE_BE16(UINT16_MAX),
2188                 .has_more_vlan = 1,
2189         };
2190         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2191         int ret;
2192         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2193                                         MLX5_FLOW_LAYER_INNER_L4) :
2194                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2195                                         MLX5_FLOW_LAYER_OUTER_L4);
2196         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2197                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2198
2199         if (item_flags & vlanm)
2200                 return rte_flow_error_set(error, EINVAL,
2201                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2202                                           "multiple VLAN layers not supported");
2203         else if ((item_flags & l34m) != 0)
2204                 return rte_flow_error_set(error, EINVAL,
2205                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2206                                           "VLAN cannot follow L3/L4 layer");
2207         if (!mask)
2208                 mask = &rte_flow_item_vlan_mask;
2209         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2210                                         (const uint8_t *)&nic_mask,
2211                                         sizeof(struct rte_flow_item_vlan),
2212                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2213         if (ret)
2214                 return ret;
2215         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2216                 struct mlx5_priv *priv = dev->data->dev_private;
2217
2218                 if (priv->vmwa_context) {
2219                         /*
2220                          * Non-NULL context means we have a virtual machine
2221                          * and SR-IOV enabled, we have to create VLAN interface
2222                          * to make hypervisor to setup E-Switch vport
2223                          * context correctly. We avoid creating the multiple
2224                          * VLAN interfaces, so we cannot support VLAN tag mask.
2225                          */
2226                         return rte_flow_error_set(error, EINVAL,
2227                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2228                                                   item,
2229                                                   "VLAN tag mask is not"
2230                                                   " supported in virtual"
2231                                                   " environment");
2232                 }
2233         }
2234         return 0;
2235 }
2236
2237 /*
2238  * GTP flags are contained in 1 byte of the format:
2239  * -------------------------------------------
2240  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2241  * |-----------------------------------------|
2242  * | value | Version | PT | Res | E | S | PN |
2243  * -------------------------------------------
2244  *
2245  * Matching is supported only for GTP flags E, S, PN.
2246  */
2247 #define MLX5_GTP_FLAGS_MASK     0x07
2248
2249 /**
2250  * Validate GTP item.
2251  *
2252  * @param[in] dev
2253  *   Pointer to the rte_eth_dev structure.
2254  * @param[in] item
2255  *   Item specification.
2256  * @param[in] item_flags
2257  *   Bit-fields that holds the items detected until now.
2258  * @param[out] error
2259  *   Pointer to error structure.
2260  *
2261  * @return
2262  *   0 on success, a negative errno value otherwise and rte_errno is set.
2263  */
2264 static int
2265 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2266                           const struct rte_flow_item *item,
2267                           uint64_t item_flags,
2268                           struct rte_flow_error *error)
2269 {
2270         struct mlx5_priv *priv = dev->data->dev_private;
2271         const struct rte_flow_item_gtp *spec = item->spec;
2272         const struct rte_flow_item_gtp *mask = item->mask;
2273         const struct rte_flow_item_gtp nic_mask = {
2274                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2275                 .msg_type = 0xff,
2276                 .teid = RTE_BE32(0xffffffff),
2277         };
2278
2279         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2280                 return rte_flow_error_set(error, ENOTSUP,
2281                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2282                                           "GTP support is not enabled");
2283         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2284                 return rte_flow_error_set(error, ENOTSUP,
2285                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2286                                           "multiple tunnel layers not"
2287                                           " supported");
2288         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2289                 return rte_flow_error_set(error, EINVAL,
2290                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2291                                           "no outer UDP layer found");
2292         if (!mask)
2293                 mask = &rte_flow_item_gtp_mask;
2294         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2295                 return rte_flow_error_set(error, ENOTSUP,
2296                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2297                                           "Match is supported for GTP"
2298                                           " flags only");
2299         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2300                                          (const uint8_t *)&nic_mask,
2301                                          sizeof(struct rte_flow_item_gtp),
2302                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2303 }
2304
2305 /**
2306  * Validate GTP PSC item.
2307  *
2308  * @param[in] item
2309  *   Item specification.
2310  * @param[in] last_item
2311  *   Previous validated item in the pattern items.
2312  * @param[in] gtp_item
2313  *   Previous GTP item specification.
2314  * @param[in] attr
2315  *   Pointer to flow attributes.
2316  * @param[out] error
2317  *   Pointer to error structure.
2318  *
2319  * @return
2320  *   0 on success, a negative errno value otherwise and rte_errno is set.
2321  */
2322 static int
2323 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2324                               uint64_t last_item,
2325                               const struct rte_flow_item *gtp_item,
2326                               const struct rte_flow_attr *attr,
2327                               struct rte_flow_error *error)
2328 {
2329         const struct rte_flow_item_gtp *gtp_spec;
2330         const struct rte_flow_item_gtp *gtp_mask;
2331         const struct rte_flow_item_gtp_psc *spec;
2332         const struct rte_flow_item_gtp_psc *mask;
2333         const struct rte_flow_item_gtp_psc nic_mask = {
2334                 .pdu_type = 0xFF,
2335                 .qfi = 0xFF,
2336         };
2337
2338         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2339                 return rte_flow_error_set
2340                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2341                          "GTP PSC item must be preceded with GTP item");
2342         gtp_spec = gtp_item->spec;
2343         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2344         /* GTP spec and E flag is requested to match zero. */
2345         if (gtp_spec &&
2346                 (gtp_mask->v_pt_rsv_flags &
2347                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2348                 return rte_flow_error_set
2349                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2350                          "GTP E flag must be 1 to match GTP PSC");
2351         /* Check the flow is not created in group zero. */
2352         if (!attr->transfer && !attr->group)
2353                 return rte_flow_error_set
2354                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2355                          "GTP PSC is not supported for group 0");
2356         /* GTP spec is here and E flag is requested to match zero. */
2357         if (!item->spec)
2358                 return 0;
2359         spec = item->spec;
2360         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2361         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2362                 return rte_flow_error_set
2363                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                          "PDU type should be smaller than 16");
2365         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2366                                          (const uint8_t *)&nic_mask,
2367                                          sizeof(struct rte_flow_item_gtp_psc),
2368                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2369 }
2370
2371 /**
2372  * Validate IPV4 item.
2373  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2374  * add specific validation of fragment_offset field,
2375  *
2376  * @param[in] item
2377  *   Item specification.
2378  * @param[in] item_flags
2379  *   Bit-fields that holds the items detected until now.
2380  * @param[out] error
2381  *   Pointer to error structure.
2382  *
2383  * @return
2384  *   0 on success, a negative errno value otherwise and rte_errno is set.
2385  */
2386 static int
2387 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2388                            uint64_t item_flags,
2389                            uint64_t last_item,
2390                            uint16_t ether_type,
2391                            struct rte_flow_error *error)
2392 {
2393         int ret;
2394         const struct rte_flow_item_ipv4 *spec = item->spec;
2395         const struct rte_flow_item_ipv4 *last = item->last;
2396         const struct rte_flow_item_ipv4 *mask = item->mask;
2397         rte_be16_t fragment_offset_spec = 0;
2398         rte_be16_t fragment_offset_last = 0;
2399         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2400                 .hdr = {
2401                         .src_addr = RTE_BE32(0xffffffff),
2402                         .dst_addr = RTE_BE32(0xffffffff),
2403                         .type_of_service = 0xff,
2404                         .fragment_offset = RTE_BE16(0xffff),
2405                         .next_proto_id = 0xff,
2406                         .time_to_live = 0xff,
2407                 },
2408         };
2409
2410         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2411                                            ether_type, &nic_ipv4_mask,
2412                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2413         if (ret < 0)
2414                 return ret;
2415         if (spec && mask)
2416                 fragment_offset_spec = spec->hdr.fragment_offset &
2417                                        mask->hdr.fragment_offset;
2418         if (!fragment_offset_spec)
2419                 return 0;
2420         /*
2421          * spec and mask are valid, enforce using full mask to make sure the
2422          * complete value is used correctly.
2423          */
2424         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2425                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2426                 return rte_flow_error_set(error, EINVAL,
2427                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2428                                           item, "must use full mask for"
2429                                           " fragment_offset");
2430         /*
2431          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2432          * indicating this is 1st fragment of fragmented packet.
2433          * This is not yet supported in MLX5, return appropriate error message.
2434          */
2435         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2436                 return rte_flow_error_set(error, ENOTSUP,
2437                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2438                                           "match on first fragment not "
2439                                           "supported");
2440         if (fragment_offset_spec && !last)
2441                 return rte_flow_error_set(error, ENOTSUP,
2442                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2443                                           "specified value not supported");
2444         /* spec and last are valid, validate the specified range. */
2445         fragment_offset_last = last->hdr.fragment_offset &
2446                                mask->hdr.fragment_offset;
2447         /*
2448          * Match on fragment_offset spec 0x2001 and last 0x3fff
2449          * means MF is 1 and frag-offset is > 0.
2450          * This packet is fragment 2nd and onward, excluding last.
2451          * This is not yet supported in MLX5, return appropriate
2452          * error message.
2453          */
2454         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2455             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2456                 return rte_flow_error_set(error, ENOTSUP,
2457                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2458                                           last, "match on following "
2459                                           "fragments not supported");
2460         /*
2461          * Match on fragment_offset spec 0x0001 and last 0x1fff
2462          * means MF is 0 and frag-offset is > 0.
2463          * This packet is last fragment of fragmented packet.
2464          * This is not yet supported in MLX5, return appropriate
2465          * error message.
2466          */
2467         if (fragment_offset_spec == RTE_BE16(1) &&
2468             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2469                 return rte_flow_error_set(error, ENOTSUP,
2470                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2471                                           last, "match on last "
2472                                           "fragment not supported");
2473         /*
2474          * Match on fragment_offset spec 0x0001 and last 0x3fff
2475          * means MF and/or frag-offset is not 0.
2476          * This is a fragmented packet.
2477          * Other range values are invalid and rejected.
2478          */
2479         if (!(fragment_offset_spec == RTE_BE16(1) &&
2480               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2481                 return rte_flow_error_set(error, ENOTSUP,
2482                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2483                                           "specified range not supported");
2484         return 0;
2485 }
2486
2487 /**
2488  * Validate IPV6 fragment extension item.
2489  *
2490  * @param[in] item
2491  *   Item specification.
2492  * @param[in] item_flags
2493  *   Bit-fields that holds the items detected until now.
2494  * @param[out] error
2495  *   Pointer to error structure.
2496  *
2497  * @return
2498  *   0 on success, a negative errno value otherwise and rte_errno is set.
2499  */
2500 static int
2501 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2502                                     uint64_t item_flags,
2503                                     struct rte_flow_error *error)
2504 {
2505         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2506         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2507         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2508         rte_be16_t frag_data_spec = 0;
2509         rte_be16_t frag_data_last = 0;
2510         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2511         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2512                                       MLX5_FLOW_LAYER_OUTER_L4;
2513         int ret = 0;
2514         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2515                 .hdr = {
2516                         .next_header = 0xff,
2517                         .frag_data = RTE_BE16(0xffff),
2518                 },
2519         };
2520
2521         if (item_flags & l4m)
2522                 return rte_flow_error_set(error, EINVAL,
2523                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2524                                           "ipv6 fragment extension item cannot "
2525                                           "follow L4 item.");
2526         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2527             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2528                 return rte_flow_error_set(error, EINVAL,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2530                                           "ipv6 fragment extension item must "
2531                                           "follow ipv6 item");
2532         if (spec && mask)
2533                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2534         if (!frag_data_spec)
2535                 return 0;
2536         /*
2537          * spec and mask are valid, enforce using full mask to make sure the
2538          * complete value is used correctly.
2539          */
2540         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2541                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2542                 return rte_flow_error_set(error, EINVAL,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2544                                           item, "must use full mask for"
2545                                           " frag_data");
2546         /*
2547          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2548          * This is 1st fragment of fragmented packet.
2549          */
2550         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2551                 return rte_flow_error_set(error, ENOTSUP,
2552                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2553                                           "match on first fragment not "
2554                                           "supported");
2555         if (frag_data_spec && !last)
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2558                                           "specified value not supported");
2559         ret = mlx5_flow_item_acceptable
2560                                 (item, (const uint8_t *)mask,
2561                                  (const uint8_t *)&nic_mask,
2562                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2563                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2564         if (ret)
2565                 return ret;
2566         /* spec and last are valid, validate the specified range. */
2567         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2568         /*
2569          * Match on frag_data spec 0x0009 and last 0xfff9
2570          * means M is 1 and frag-offset is > 0.
2571          * This packet is fragment 2nd and onward, excluding last.
2572          * This is not yet supported in MLX5, return appropriate
2573          * error message.
2574          */
2575         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2576                                        RTE_IPV6_EHDR_MF_MASK) &&
2577             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2578                 return rte_flow_error_set(error, ENOTSUP,
2579                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2580                                           last, "match on following "
2581                                           "fragments not supported");
2582         /*
2583          * Match on frag_data spec 0x0008 and last 0xfff8
2584          * means M is 0 and frag-offset is > 0.
2585          * This packet is last fragment of fragmented packet.
2586          * This is not yet supported in MLX5, return appropriate
2587          * error message.
2588          */
2589         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2590             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2591                 return rte_flow_error_set(error, ENOTSUP,
2592                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2593                                           last, "match on last "
2594                                           "fragment not supported");
2595         /* Other range values are invalid and rejected. */
2596         return rte_flow_error_set(error, EINVAL,
2597                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2598                                   "specified range not supported");
2599 }
2600
2601 /**
2602  * Validate the pop VLAN action.
2603  *
2604  * @param[in] dev
2605  *   Pointer to the rte_eth_dev structure.
2606  * @param[in] action_flags
2607  *   Holds the actions detected until now.
2608  * @param[in] action
2609  *   Pointer to the pop vlan action.
2610  * @param[in] item_flags
2611  *   The items found in this flow rule.
2612  * @param[in] attr
2613  *   Pointer to flow attributes.
2614  * @param[out] error
2615  *   Pointer to error structure.
2616  *
2617  * @return
2618  *   0 on success, a negative errno value otherwise and rte_errno is set.
2619  */
2620 static int
2621 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2622                                  uint64_t action_flags,
2623                                  const struct rte_flow_action *action,
2624                                  uint64_t item_flags,
2625                                  const struct rte_flow_attr *attr,
2626                                  struct rte_flow_error *error)
2627 {
2628         const struct mlx5_priv *priv = dev->data->dev_private;
2629
2630         (void)action;
2631         (void)attr;
2632         if (!priv->sh->pop_vlan_action)
2633                 return rte_flow_error_set(error, ENOTSUP,
2634                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2635                                           NULL,
2636                                           "pop vlan action is not supported");
2637         if (attr->egress)
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2640                                           NULL,
2641                                           "pop vlan action not supported for "
2642                                           "egress");
2643         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2644                 return rte_flow_error_set(error, ENOTSUP,
2645                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2646                                           "no support for multiple VLAN "
2647                                           "actions");
2648         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2649         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2650             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2653                                           NULL,
2654                                           "cannot pop vlan after decap without "
2655                                           "match on inner vlan in the flow");
2656         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2657         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2658             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2659                 return rte_flow_error_set(error, ENOTSUP,
2660                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2661                                           NULL,
2662                                           "cannot pop vlan without a "
2663                                           "match on (outer) vlan in the flow");
2664         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2665                 return rte_flow_error_set(error, EINVAL,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2667                                           "wrong action order, port_id should "
2668                                           "be after pop VLAN action");
2669         if (!attr->transfer && priv->representor)
2670                 return rte_flow_error_set(error, ENOTSUP,
2671                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2672                                           "pop vlan action for VF representor "
2673                                           "not supported on NIC table");
2674         return 0;
2675 }
2676
2677 /**
2678  * Get VLAN default info from vlan match info.
2679  *
2680  * @param[in] items
2681  *   the list of item specifications.
2682  * @param[out] vlan
2683  *   pointer VLAN info to fill to.
2684  *
2685  * @return
2686  *   0 on success, a negative errno value otherwise and rte_errno is set.
2687  */
2688 static void
2689 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2690                                   struct rte_vlan_hdr *vlan)
2691 {
2692         const struct rte_flow_item_vlan nic_mask = {
2693                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2694                                 MLX5DV_FLOW_VLAN_VID_MASK),
2695                 .inner_type = RTE_BE16(0xffff),
2696         };
2697
2698         if (items == NULL)
2699                 return;
2700         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2701                 int type = items->type;
2702
2703                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2704                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2705                         break;
2706         }
2707         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2708                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2709                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2710
2711                 /* If VLAN item in pattern doesn't contain data, return here. */
2712                 if (!vlan_v)
2713                         return;
2714                 if (!vlan_m)
2715                         vlan_m = &nic_mask;
2716                 /* Only full match values are accepted */
2717                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2718                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2719                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2720                         vlan->vlan_tci |=
2721                                 rte_be_to_cpu_16(vlan_v->tci &
2722                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2723                 }
2724                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2725                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2726                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2727                         vlan->vlan_tci |=
2728                                 rte_be_to_cpu_16(vlan_v->tci &
2729                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2730                 }
2731                 if (vlan_m->inner_type == nic_mask.inner_type)
2732                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2733                                                            vlan_m->inner_type);
2734         }
2735 }
2736
2737 /**
2738  * Validate the push VLAN action.
2739  *
2740  * @param[in] dev
2741  *   Pointer to the rte_eth_dev structure.
2742  * @param[in] action_flags
2743  *   Holds the actions detected until now.
2744  * @param[in] item_flags
2745  *   The items found in this flow rule.
2746  * @param[in] action
2747  *   Pointer to the action structure.
2748  * @param[in] attr
2749  *   Pointer to flow attributes
2750  * @param[out] error
2751  *   Pointer to error structure.
2752  *
2753  * @return
2754  *   0 on success, a negative errno value otherwise and rte_errno is set.
2755  */
2756 static int
2757 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2758                                   uint64_t action_flags,
2759                                   const struct rte_flow_item_vlan *vlan_m,
2760                                   const struct rte_flow_action *action,
2761                                   const struct rte_flow_attr *attr,
2762                                   struct rte_flow_error *error)
2763 {
2764         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2765         const struct mlx5_priv *priv = dev->data->dev_private;
2766
2767         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2768             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2769                 return rte_flow_error_set(error, EINVAL,
2770                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2771                                           "invalid vlan ethertype");
2772         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2773                 return rte_flow_error_set(error, EINVAL,
2774                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2775                                           "wrong action order, port_id should "
2776                                           "be after push VLAN");
2777         if (!attr->transfer && priv->representor)
2778                 return rte_flow_error_set(error, ENOTSUP,
2779                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2780                                           "push vlan action for VF representor "
2781                                           "not supported on NIC table");
2782         if (vlan_m &&
2783             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2784             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2785                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2786             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2787             !(mlx5_flow_find_action
2788                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2789                 return rte_flow_error_set(error, EINVAL,
2790                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2791                                           "not full match mask on VLAN PCP and "
2792                                           "there is no of_set_vlan_pcp action, "
2793                                           "push VLAN action cannot figure out "
2794                                           "PCP value");
2795         if (vlan_m &&
2796             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2797             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2798                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2799             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2800             !(mlx5_flow_find_action
2801                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2802                 return rte_flow_error_set(error, EINVAL,
2803                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2804                                           "not full match mask on VLAN VID and "
2805                                           "there is no of_set_vlan_vid action, "
2806                                           "push VLAN action cannot figure out "
2807                                           "VID value");
2808         (void)attr;
2809         return 0;
2810 }
2811
2812 /**
2813  * Validate the set VLAN PCP.
2814  *
2815  * @param[in] action_flags
2816  *   Holds the actions detected until now.
2817  * @param[in] actions
2818  *   Pointer to the list of actions remaining in the flow rule.
2819  * @param[out] error
2820  *   Pointer to error structure.
2821  *
2822  * @return
2823  *   0 on success, a negative errno value otherwise and rte_errno is set.
2824  */
2825 static int
2826 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2827                                      const struct rte_flow_action actions[],
2828                                      struct rte_flow_error *error)
2829 {
2830         const struct rte_flow_action *action = actions;
2831         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2832
2833         if (conf->vlan_pcp > 7)
2834                 return rte_flow_error_set(error, EINVAL,
2835                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2836                                           "VLAN PCP value is too big");
2837         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2838                 return rte_flow_error_set(error, ENOTSUP,
2839                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2840                                           "set VLAN PCP action must follow "
2841                                           "the push VLAN action");
2842         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2843                 return rte_flow_error_set(error, ENOTSUP,
2844                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2845                                           "Multiple VLAN PCP modification are "
2846                                           "not supported");
2847         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2848                 return rte_flow_error_set(error, EINVAL,
2849                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2850                                           "wrong action order, port_id should "
2851                                           "be after set VLAN PCP");
2852         return 0;
2853 }
2854
2855 /**
2856  * Validate the set VLAN VID.
2857  *
2858  * @param[in] item_flags
2859  *   Holds the items detected in this rule.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] actions
2863  *   Pointer to the list of actions remaining in the flow rule.
2864  * @param[out] error
2865  *   Pointer to error structure.
2866  *
2867  * @return
2868  *   0 on success, a negative errno value otherwise and rte_errno is set.
2869  */
2870 static int
2871 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2872                                      uint64_t action_flags,
2873                                      const struct rte_flow_action actions[],
2874                                      struct rte_flow_error *error)
2875 {
2876         const struct rte_flow_action *action = actions;
2877         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2878
2879         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2880                 return rte_flow_error_set(error, EINVAL,
2881                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2882                                           "VLAN VID value is too big");
2883         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2884             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2885                 return rte_flow_error_set(error, ENOTSUP,
2886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2887                                           "set VLAN VID action must follow push"
2888                                           " VLAN action or match on VLAN item");
2889         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2892                                           "Multiple VLAN VID modifications are "
2893                                           "not supported");
2894         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2895                 return rte_flow_error_set(error, EINVAL,
2896                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2897                                           "wrong action order, port_id should "
2898                                           "be after set VLAN VID");
2899         return 0;
2900 }
2901
2902 /*
2903  * Validate the FLAG action.
2904  *
2905  * @param[in] dev
2906  *   Pointer to the rte_eth_dev structure.
2907  * @param[in] action_flags
2908  *   Holds the actions detected until now.
2909  * @param[in] attr
2910  *   Pointer to flow attributes
2911  * @param[out] error
2912  *   Pointer to error structure.
2913  *
2914  * @return
2915  *   0 on success, a negative errno value otherwise and rte_errno is set.
2916  */
2917 static int
2918 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2919                              uint64_t action_flags,
2920                              const struct rte_flow_attr *attr,
2921                              struct rte_flow_error *error)
2922 {
2923         struct mlx5_priv *priv = dev->data->dev_private;
2924         struct mlx5_dev_config *config = &priv->config;
2925         int ret;
2926
2927         /* Fall back if no extended metadata register support. */
2928         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2929                 return mlx5_flow_validate_action_flag(action_flags, attr,
2930                                                       error);
2931         /* Extensive metadata mode requires registers. */
2932         if (!mlx5_flow_ext_mreg_supported(dev))
2933                 return rte_flow_error_set(error, ENOTSUP,
2934                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2935                                           "no metadata registers "
2936                                           "to support flag action");
2937         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2938                 return rte_flow_error_set(error, ENOTSUP,
2939                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2940                                           "extended metadata register"
2941                                           " isn't available");
2942         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2943         if (ret < 0)
2944                 return ret;
2945         MLX5_ASSERT(ret > 0);
2946         if (action_flags & MLX5_FLOW_ACTION_MARK)
2947                 return rte_flow_error_set(error, EINVAL,
2948                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2949                                           "can't mark and flag in same flow");
2950         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2951                 return rte_flow_error_set(error, EINVAL,
2952                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2953                                           "can't have 2 flag"
2954                                           " actions in same flow");
2955         return 0;
2956 }
2957
2958 /**
2959  * Validate MARK action.
2960  *
2961  * @param[in] dev
2962  *   Pointer to the rte_eth_dev structure.
2963  * @param[in] action
2964  *   Pointer to action.
2965  * @param[in] action_flags
2966  *   Holds the actions detected until now.
2967  * @param[in] attr
2968  *   Pointer to flow attributes
2969  * @param[out] error
2970  *   Pointer to error structure.
2971  *
2972  * @return
2973  *   0 on success, a negative errno value otherwise and rte_errno is set.
2974  */
2975 static int
2976 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2977                              const struct rte_flow_action *action,
2978                              uint64_t action_flags,
2979                              const struct rte_flow_attr *attr,
2980                              struct rte_flow_error *error)
2981 {
2982         struct mlx5_priv *priv = dev->data->dev_private;
2983         struct mlx5_dev_config *config = &priv->config;
2984         const struct rte_flow_action_mark *mark = action->conf;
2985         int ret;
2986
2987         if (is_tunnel_offload_active(dev))
2988                 return rte_flow_error_set(error, ENOTSUP,
2989                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2990                                           "no mark action "
2991                                           "if tunnel offload active");
2992         /* Fall back if no extended metadata register support. */
2993         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2994                 return mlx5_flow_validate_action_mark(action, action_flags,
2995                                                       attr, error);
2996         /* Extensive metadata mode requires registers. */
2997         if (!mlx5_flow_ext_mreg_supported(dev))
2998                 return rte_flow_error_set(error, ENOTSUP,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3000                                           "no metadata registers "
3001                                           "to support mark action");
3002         if (!priv->sh->dv_mark_mask)
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3005                                           "extended metadata register"
3006                                           " isn't available");
3007         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3008         if (ret < 0)
3009                 return ret;
3010         MLX5_ASSERT(ret > 0);
3011         if (!mark)
3012                 return rte_flow_error_set(error, EINVAL,
3013                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                           "configuration cannot be null");
3015         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3016                 return rte_flow_error_set(error, EINVAL,
3017                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3018                                           &mark->id,
3019                                           "mark id exceeds the limit");
3020         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3021                 return rte_flow_error_set(error, EINVAL,
3022                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3023                                           "can't flag and mark in same flow");
3024         if (action_flags & MLX5_FLOW_ACTION_MARK)
3025                 return rte_flow_error_set(error, EINVAL,
3026                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3027                                           "can't have 2 mark actions in same"
3028                                           " flow");
3029         return 0;
3030 }
3031
3032 /**
3033  * Validate SET_META action.
3034  *
3035  * @param[in] dev
3036  *   Pointer to the rte_eth_dev structure.
3037  * @param[in] action
3038  *   Pointer to the action structure.
3039  * @param[in] action_flags
3040  *   Holds the actions detected until now.
3041  * @param[in] attr
3042  *   Pointer to flow attributes
3043  * @param[out] error
3044  *   Pointer to error structure.
3045  *
3046  * @return
3047  *   0 on success, a negative errno value otherwise and rte_errno is set.
3048  */
3049 static int
3050 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3051                                  const struct rte_flow_action *action,
3052                                  uint64_t action_flags __rte_unused,
3053                                  const struct rte_flow_attr *attr,
3054                                  struct rte_flow_error *error)
3055 {
3056         const struct rte_flow_action_set_meta *conf;
3057         uint32_t nic_mask = UINT32_MAX;
3058         int reg;
3059
3060         if (!mlx5_flow_ext_mreg_supported(dev))
3061                 return rte_flow_error_set(error, ENOTSUP,
3062                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3063                                           "extended metadata register"
3064                                           " isn't supported");
3065         reg = flow_dv_get_metadata_reg(dev, attr, error);
3066         if (reg < 0)
3067                 return reg;
3068         if (reg == REG_NON)
3069                 return rte_flow_error_set(error, ENOTSUP,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3071                                           "unavalable extended metadata register");
3072         if (reg != REG_A && reg != REG_B) {
3073                 struct mlx5_priv *priv = dev->data->dev_private;
3074
3075                 nic_mask = priv->sh->dv_meta_mask;
3076         }
3077         if (!(action->conf))
3078                 return rte_flow_error_set(error, EINVAL,
3079                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3080                                           "configuration cannot be null");
3081         conf = (const struct rte_flow_action_set_meta *)action->conf;
3082         if (!conf->mask)
3083                 return rte_flow_error_set(error, EINVAL,
3084                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3085                                           "zero mask doesn't have any effect");
3086         if (conf->mask & ~nic_mask)
3087                 return rte_flow_error_set(error, EINVAL,
3088                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3089                                           "meta data must be within reg C0");
3090         return 0;
3091 }
3092
3093 /**
3094  * Validate SET_TAG action.
3095  *
3096  * @param[in] dev
3097  *   Pointer to the rte_eth_dev structure.
3098  * @param[in] action
3099  *   Pointer to the action structure.
3100  * @param[in] action_flags
3101  *   Holds the actions detected until now.
3102  * @param[in] attr
3103  *   Pointer to flow attributes
3104  * @param[out] error
3105  *   Pointer to error structure.
3106  *
3107  * @return
3108  *   0 on success, a negative errno value otherwise and rte_errno is set.
3109  */
3110 static int
3111 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3112                                 const struct rte_flow_action *action,
3113                                 uint64_t action_flags,
3114                                 const struct rte_flow_attr *attr,
3115                                 struct rte_flow_error *error)
3116 {
3117         const struct rte_flow_action_set_tag *conf;
3118         const uint64_t terminal_action_flags =
3119                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3120                 MLX5_FLOW_ACTION_RSS;
3121         int ret;
3122
3123         if (!mlx5_flow_ext_mreg_supported(dev))
3124                 return rte_flow_error_set(error, ENOTSUP,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3126                                           "extensive metadata register"
3127                                           " isn't supported");
3128         if (!(action->conf))
3129                 return rte_flow_error_set(error, EINVAL,
3130                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3131                                           "configuration cannot be null");
3132         conf = (const struct rte_flow_action_set_tag *)action->conf;
3133         if (!conf->mask)
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3136                                           "zero mask doesn't have any effect");
3137         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3138         if (ret < 0)
3139                 return ret;
3140         if (!attr->transfer && attr->ingress &&
3141             (action_flags & terminal_action_flags))
3142                 return rte_flow_error_set(error, EINVAL,
3143                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3144                                           "set_tag has no effect"
3145                                           " with terminal actions");
3146         return 0;
3147 }
3148
3149 /**
3150  * Validate count action.
3151  *
3152  * @param[in] dev
3153  *   Pointer to rte_eth_dev structure.
3154  * @param[in] action
3155  *   Pointer to the action structure.
3156  * @param[in] action_flags
3157  *   Holds the actions detected until now.
3158  * @param[out] error
3159  *   Pointer to error structure.
3160  *
3161  * @return
3162  *   0 on success, a negative errno value otherwise and rte_errno is set.
3163  */
3164 static int
3165 flow_dv_validate_action_count(struct rte_eth_dev *dev,
3166                               const struct rte_flow_action *action,
3167                               uint64_t action_flags,
3168                               struct rte_flow_error *error)
3169 {
3170         struct mlx5_priv *priv = dev->data->dev_private;
3171         const struct rte_flow_action_count *count;
3172
3173         if (!priv->config.devx)
3174                 goto notsup_err;
3175         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3176                 return rte_flow_error_set(error, EINVAL,
3177                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3178                                           "duplicate count actions set");
3179         count = (const struct rte_flow_action_count *)action->conf;
3180         if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3181             !priv->sh->flow_hit_aso_en)
3182                 return rte_flow_error_set(error, EINVAL,
3183                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3184                                           "old age and shared count combination is not supported");
3185 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3186         return 0;
3187 #endif
3188 notsup_err:
3189         return rte_flow_error_set
3190                       (error, ENOTSUP,
3191                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3192                        NULL,
3193                        "count action not supported");
3194 }
3195
3196 /**
3197  * Validate the L2 encap action.
3198  *
3199  * @param[in] dev
3200  *   Pointer to the rte_eth_dev structure.
3201  * @param[in] action_flags
3202  *   Holds the actions detected until now.
3203  * @param[in] action
3204  *   Pointer to the action structure.
3205  * @param[in] attr
3206  *   Pointer to flow attributes.
3207  * @param[out] error
3208  *   Pointer to error structure.
3209  *
3210  * @return
3211  *   0 on success, a negative errno value otherwise and rte_errno is set.
3212  */
3213 static int
3214 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3215                                  uint64_t action_flags,
3216                                  const struct rte_flow_action *action,
3217                                  const struct rte_flow_attr *attr,
3218                                  struct rte_flow_error *error)
3219 {
3220         const struct mlx5_priv *priv = dev->data->dev_private;
3221
3222         if (!(action->conf))
3223                 return rte_flow_error_set(error, EINVAL,
3224                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3225                                           "configuration cannot be null");
3226         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3227                 return rte_flow_error_set(error, EINVAL,
3228                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3229                                           "can only have a single encap action "
3230                                           "in a flow");
3231         if (!attr->transfer && priv->representor)
3232                 return rte_flow_error_set(error, ENOTSUP,
3233                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3234                                           "encap action for VF representor "
3235                                           "not supported on NIC table");
3236         return 0;
3237 }
3238
3239 /**
3240  * Validate a decap action.
3241  *
3242  * @param[in] dev
3243  *   Pointer to the rte_eth_dev structure.
3244  * @param[in] action_flags
3245  *   Holds the actions detected until now.
3246  * @param[in] action
3247  *   Pointer to the action structure.
3248  * @param[in] item_flags
3249  *   Holds the items detected.
3250  * @param[in] attr
3251  *   Pointer to flow attributes
3252  * @param[out] error
3253  *   Pointer to error structure.
3254  *
3255  * @return
3256  *   0 on success, a negative errno value otherwise and rte_errno is set.
3257  */
3258 static int
3259 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3260                               uint64_t action_flags,
3261                               const struct rte_flow_action *action,
3262                               const uint64_t item_flags,
3263                               const struct rte_flow_attr *attr,
3264                               struct rte_flow_error *error)
3265 {
3266         const struct mlx5_priv *priv = dev->data->dev_private;
3267
3268         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3269             !priv->config.decap_en)
3270                 return rte_flow_error_set(error, ENOTSUP,
3271                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3272                                           "decap is not enabled");
3273         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3274                 return rte_flow_error_set(error, ENOTSUP,
3275                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3276                                           action_flags &
3277                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3278                                           "have a single decap action" : "decap "
3279                                           "after encap is not supported");
3280         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3281                 return rte_flow_error_set(error, EINVAL,
3282                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3283                                           "can't have decap action after"
3284                                           " modify action");
3285         if (attr->egress)
3286                 return rte_flow_error_set(error, ENOTSUP,
3287                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3288                                           NULL,
3289                                           "decap action not supported for "
3290                                           "egress");
3291         if (!attr->transfer && priv->representor)
3292                 return rte_flow_error_set(error, ENOTSUP,
3293                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3294                                           "decap action for VF representor "
3295                                           "not supported on NIC table");
3296         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3297             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3298                 return rte_flow_error_set(error, ENOTSUP,
3299                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3300                                 "VXLAN item should be present for VXLAN decap");
3301         return 0;
3302 }
3303
3304 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3305
3306 /**
3307  * Validate the raw encap and decap actions.
3308  *
3309  * @param[in] dev
3310  *   Pointer to the rte_eth_dev structure.
3311  * @param[in] decap
3312  *   Pointer to the decap action.
3313  * @param[in] encap
3314  *   Pointer to the encap action.
3315  * @param[in] attr
3316  *   Pointer to flow attributes
3317  * @param[in/out] action_flags
3318  *   Holds the actions detected until now.
3319  * @param[out] actions_n
3320  *   pointer to the number of actions counter.
3321  * @param[in] action
3322  *   Pointer to the action structure.
3323  * @param[in] item_flags
3324  *   Holds the items detected.
3325  * @param[out] error
3326  *   Pointer to error structure.
3327  *
3328  * @return
3329  *   0 on success, a negative errno value otherwise and rte_errno is set.
3330  */
3331 static int
3332 flow_dv_validate_action_raw_encap_decap
3333         (struct rte_eth_dev *dev,
3334          const struct rte_flow_action_raw_decap *decap,
3335          const struct rte_flow_action_raw_encap *encap,
3336          const struct rte_flow_attr *attr, uint64_t *action_flags,
3337          int *actions_n, const struct rte_flow_action *action,
3338          uint64_t item_flags, struct rte_flow_error *error)
3339 {
3340         const struct mlx5_priv *priv = dev->data->dev_private;
3341         int ret;
3342
3343         if (encap && (!encap->size || !encap->data))
3344                 return rte_flow_error_set(error, EINVAL,
3345                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3346                                           "raw encap data cannot be empty");
3347         if (decap && encap) {
3348                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3349                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3350                         /* L3 encap. */
3351                         decap = NULL;
3352                 else if (encap->size <=
3353                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3354                            decap->size >
3355                            MLX5_ENCAPSULATION_DECISION_SIZE)
3356                         /* L3 decap. */
3357                         encap = NULL;
3358                 else if (encap->size >
3359                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3360                            decap->size >
3361                            MLX5_ENCAPSULATION_DECISION_SIZE)
3362                         /* 2 L2 actions: encap and decap. */
3363                         ;
3364                 else
3365                         return rte_flow_error_set(error,
3366                                 ENOTSUP,
3367                                 RTE_FLOW_ERROR_TYPE_ACTION,
3368                                 NULL, "unsupported too small "
3369                                 "raw decap and too small raw "
3370                                 "encap combination");
3371         }
3372         if (decap) {
3373                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3374                                                     item_flags, attr, error);
3375                 if (ret < 0)
3376                         return ret;
3377                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3378                 ++(*actions_n);
3379         }
3380         if (encap) {
3381                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3382                         return rte_flow_error_set(error, ENOTSUP,
3383                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3384                                                   NULL,
3385                                                   "small raw encap size");
3386                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3387                         return rte_flow_error_set(error, EINVAL,
3388                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3389                                                   NULL,
3390                                                   "more than one encap action");
3391                 if (!attr->transfer && priv->representor)
3392                         return rte_flow_error_set
3393                                         (error, ENOTSUP,
3394                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3395                                          "encap action for VF representor "
3396                                          "not supported on NIC table");
3397                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3398                 ++(*actions_n);
3399         }
3400         return 0;
3401 }
3402
3403 /**
3404  * Match encap_decap resource.
3405  *
3406  * @param list
3407  *   Pointer to the hash list.
3408  * @param entry
3409  *   Pointer to exist resource entry object.
3410  * @param key
3411  *   Key of the new entry.
3412  * @param ctx_cb
3413  *   Pointer to new encap_decap resource.
3414  *
3415  * @return
3416  *   0 on matching, none-zero otherwise.
3417  */
3418 int
3419 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3420                              struct mlx5_hlist_entry *entry,
3421                              uint64_t key __rte_unused, void *cb_ctx)
3422 {
3423         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3424         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3425         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3426
3427         cache_resource = container_of(entry,
3428                                       struct mlx5_flow_dv_encap_decap_resource,
3429                                       entry);
3430         if (resource->reformat_type == cache_resource->reformat_type &&
3431             resource->ft_type == cache_resource->ft_type &&
3432             resource->flags == cache_resource->flags &&
3433             resource->size == cache_resource->size &&
3434             !memcmp((const void *)resource->buf,
3435                     (const void *)cache_resource->buf,
3436                     resource->size))
3437                 return 0;
3438         return -1;
3439 }
3440
3441 /**
3442  * Allocate encap_decap resource.
3443  *
3444  * @param list
3445  *   Pointer to the hash list.
3446  * @param entry
3447  *   Pointer to exist resource entry object.
3448  * @param ctx_cb
3449  *   Pointer to new encap_decap resource.
3450  *
3451  * @return
3452  *   0 on matching, none-zero otherwise.
3453  */
3454 struct mlx5_hlist_entry *
3455 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3456                               uint64_t key __rte_unused,
3457                               void *cb_ctx)
3458 {
3459         struct mlx5_dev_ctx_shared *sh = list->ctx;
3460         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3461         struct mlx5dv_dr_domain *domain;
3462         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3463         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3464         uint32_t idx;
3465         int ret;
3466
3467         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3468                 domain = sh->fdb_domain;
3469         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3470                 domain = sh->rx_domain;
3471         else
3472                 domain = sh->tx_domain;
3473         /* Register new encap/decap resource. */
3474         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3475                                        &idx);
3476         if (!cache_resource) {
3477                 rte_flow_error_set(ctx->error, ENOMEM,
3478                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3479                                    "cannot allocate resource memory");
3480                 return NULL;
3481         }
3482         *cache_resource = *resource;
3483         cache_resource->idx = idx;
3484         ret = mlx5_flow_os_create_flow_action_packet_reformat
3485                                         (sh->ctx, domain, cache_resource,
3486                                          &cache_resource->action);
3487         if (ret) {
3488                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3489                 rte_flow_error_set(ctx->error, ENOMEM,
3490                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3491                                    NULL, "cannot create action");
3492                 return NULL;
3493         }
3494
3495         return &cache_resource->entry;
3496 }
3497
3498 /**
3499  * Find existing encap/decap resource or create and register a new one.
3500  *
3501  * @param[in, out] dev
3502  *   Pointer to rte_eth_dev structure.
3503  * @param[in, out] resource
3504  *   Pointer to encap/decap resource.
3505  * @parm[in, out] dev_flow
3506  *   Pointer to the dev_flow.
3507  * @param[out] error
3508  *   pointer to error structure.
3509  *
3510  * @return
3511  *   0 on success otherwise -errno and errno is set.
3512  */
3513 static int
3514 flow_dv_encap_decap_resource_register
3515                         (struct rte_eth_dev *dev,
3516                          struct mlx5_flow_dv_encap_decap_resource *resource,
3517                          struct mlx5_flow *dev_flow,
3518                          struct rte_flow_error *error)
3519 {
3520         struct mlx5_priv *priv = dev->data->dev_private;
3521         struct mlx5_dev_ctx_shared *sh = priv->sh;
3522         struct mlx5_hlist_entry *entry;
3523         union {
3524                 struct {
3525                         uint32_t ft_type:8;
3526                         uint32_t refmt_type:8;
3527                         /*
3528                          * Header reformat actions can be shared between
3529                          * non-root tables. One bit to indicate non-root
3530                          * table or not.
3531                          */
3532                         uint32_t is_root:1;
3533                         uint32_t reserve:15;
3534                 };
3535                 uint32_t v32;
3536         } encap_decap_key = {
3537                 {
3538                         .ft_type = resource->ft_type,
3539                         .refmt_type = resource->reformat_type,
3540                         .is_root = !!dev_flow->dv.group,
3541                         .reserve = 0,
3542                 }
3543         };
3544         struct mlx5_flow_cb_ctx ctx = {
3545                 .error = error,
3546                 .data = resource,
3547         };
3548         uint64_t key64;
3549
3550         resource->flags = dev_flow->dv.group ? 0 : 1;
3551         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3552                                  sizeof(encap_decap_key.v32), 0);
3553         if (resource->reformat_type !=
3554             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3555             resource->size)
3556                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3557         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3558         if (!entry)
3559                 return -rte_errno;
3560         resource = container_of(entry, typeof(*resource), entry);
3561         dev_flow->dv.encap_decap = resource;
3562         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3563         return 0;
3564 }
3565
3566 /**
3567  * Find existing table jump resource or create and register a new one.
3568  *
3569  * @param[in, out] dev
3570  *   Pointer to rte_eth_dev structure.
3571  * @param[in, out] tbl
3572  *   Pointer to flow table resource.
3573  * @parm[in, out] dev_flow
3574  *   Pointer to the dev_flow.
3575  * @param[out] error
3576  *   pointer to error structure.
3577  *
3578  * @return
3579  *   0 on success otherwise -errno and errno is set.
3580  */
3581 static int
3582 flow_dv_jump_tbl_resource_register
3583                         (struct rte_eth_dev *dev __rte_unused,
3584                          struct mlx5_flow_tbl_resource *tbl,
3585                          struct mlx5_flow *dev_flow,
3586                          struct rte_flow_error *error __rte_unused)
3587 {
3588         struct mlx5_flow_tbl_data_entry *tbl_data =
3589                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3590
3591         MLX5_ASSERT(tbl);
3592         MLX5_ASSERT(tbl_data->jump.action);
3593         dev_flow->handle->rix_jump = tbl_data->idx;
3594         dev_flow->dv.jump = &tbl_data->jump;
3595         return 0;
3596 }
3597
3598 int
3599 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3600                          struct mlx5_cache_entry *entry, void *cb_ctx)
3601 {
3602         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3603         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3604         struct mlx5_flow_dv_port_id_action_resource *res =
3605                         container_of(entry, typeof(*res), entry);
3606
3607         return ref->port_id != res->port_id;
3608 }
3609
3610 struct mlx5_cache_entry *
3611 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3612                           struct mlx5_cache_entry *entry __rte_unused,
3613                           void *cb_ctx)
3614 {
3615         struct mlx5_dev_ctx_shared *sh = list->ctx;
3616         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3617         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3618         struct mlx5_flow_dv_port_id_action_resource *cache;
3619         uint32_t idx;
3620         int ret;
3621
3622         /* Register new port id action resource. */
3623         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3624         if (!cache) {
3625                 rte_flow_error_set(ctx->error, ENOMEM,
3626                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3627                                    "cannot allocate port_id action cache memory");
3628                 return NULL;
3629         }
3630         *cache = *ref;
3631         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3632                                                         ref->port_id,
3633                                                         &cache->action);
3634         if (ret) {
3635                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3636                 rte_flow_error_set(ctx->error, ENOMEM,
3637                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3638                                    "cannot create action");
3639                 return NULL;
3640         }
3641         cache->idx = idx;
3642         return &cache->entry;
3643 }
3644
3645 /**
3646  * Find existing table port ID resource or create and register a new one.
3647  *
3648  * @param[in, out] dev
3649  *   Pointer to rte_eth_dev structure.
3650  * @param[in, out] resource
3651  *   Pointer to port ID action resource.
3652  * @parm[in, out] dev_flow
3653  *   Pointer to the dev_flow.
3654  * @param[out] error
3655  *   pointer to error structure.
3656  *
3657  * @return
3658  *   0 on success otherwise -errno and errno is set.
3659  */
3660 static int
3661 flow_dv_port_id_action_resource_register
3662                         (struct rte_eth_dev *dev,
3663                          struct mlx5_flow_dv_port_id_action_resource *resource,
3664                          struct mlx5_flow *dev_flow,
3665                          struct rte_flow_error *error)
3666 {
3667         struct mlx5_priv *priv = dev->data->dev_private;
3668         struct mlx5_cache_entry *entry;
3669         struct mlx5_flow_dv_port_id_action_resource *cache;
3670         struct mlx5_flow_cb_ctx ctx = {
3671                 .error = error,
3672                 .data = resource,
3673         };
3674
3675         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3676         if (!entry)
3677                 return -rte_errno;
3678         cache = container_of(entry, typeof(*cache), entry);
3679         dev_flow->dv.port_id_action = cache;
3680         dev_flow->handle->rix_port_id_action = cache->idx;
3681         return 0;
3682 }
3683
3684 int
3685 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3686                          struct mlx5_cache_entry *entry, void *cb_ctx)
3687 {
3688         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3689         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3690         struct mlx5_flow_dv_push_vlan_action_resource *res =
3691                         container_of(entry, typeof(*res), entry);
3692
3693         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3694 }
3695
3696 struct mlx5_cache_entry *
3697 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3698                           struct mlx5_cache_entry *entry __rte_unused,
3699                           void *cb_ctx)
3700 {
3701         struct mlx5_dev_ctx_shared *sh = list->ctx;
3702         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3703         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3704         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3705         struct mlx5dv_dr_domain *domain;
3706         uint32_t idx;
3707         int ret;
3708
3709         /* Register new port id action resource. */
3710         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3711         if (!cache) {
3712                 rte_flow_error_set(ctx->error, ENOMEM,
3713                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3714                                    "cannot allocate push_vlan action cache memory");
3715                 return NULL;
3716         }
3717         *cache = *ref;
3718         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3719                 domain = sh->fdb_domain;
3720         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3721                 domain = sh->rx_domain;
3722         else
3723                 domain = sh->tx_domain;
3724         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3725                                                         &cache->action);
3726         if (ret) {
3727                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3728                 rte_flow_error_set(ctx->error, ENOMEM,
3729                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3730                                    "cannot create push vlan action");
3731                 return NULL;
3732         }
3733         cache->idx = idx;
3734         return &cache->entry;
3735 }
3736
3737 /**
3738  * Find existing push vlan resource or create and register a new one.
3739  *
3740  * @param [in, out] dev
3741  *   Pointer to rte_eth_dev structure.
3742  * @param[in, out] resource
3743  *   Pointer to port ID action resource.
3744  * @parm[in, out] dev_flow
3745  *   Pointer to the dev_flow.
3746  * @param[out] error
3747  *   pointer to error structure.
3748  *
3749  * @return
3750  *   0 on success otherwise -errno and errno is set.
3751  */
3752 static int
3753 flow_dv_push_vlan_action_resource_register
3754                        (struct rte_eth_dev *dev,
3755                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3756                         struct mlx5_flow *dev_flow,
3757                         struct rte_flow_error *error)
3758 {
3759         struct mlx5_priv *priv = dev->data->dev_private;
3760         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3761         struct mlx5_cache_entry *entry;
3762         struct mlx5_flow_cb_ctx ctx = {
3763                 .error = error,
3764                 .data = resource,
3765         };
3766
3767         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3768         if (!entry)
3769                 return -rte_errno;
3770         cache = container_of(entry, typeof(*cache), entry);
3771
3772         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3773         dev_flow->dv.push_vlan_res = cache;
3774         return 0;
3775 }
3776
3777 /**
3778  * Get the size of specific rte_flow_item_type hdr size
3779  *
3780  * @param[in] item_type
3781  *   Tested rte_flow_item_type.
3782  *
3783  * @return
3784  *   sizeof struct item_type, 0 if void or irrelevant.
3785  */
3786 static size_t
3787 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3788 {
3789         size_t retval;
3790
3791         switch (item_type) {
3792         case RTE_FLOW_ITEM_TYPE_ETH:
3793                 retval = sizeof(struct rte_ether_hdr);
3794                 break;
3795         case RTE_FLOW_ITEM_TYPE_VLAN:
3796                 retval = sizeof(struct rte_vlan_hdr);
3797                 break;
3798         case RTE_FLOW_ITEM_TYPE_IPV4:
3799                 retval = sizeof(struct rte_ipv4_hdr);
3800                 break;
3801         case RTE_FLOW_ITEM_TYPE_IPV6:
3802                 retval = sizeof(struct rte_ipv6_hdr);
3803                 break;
3804         case RTE_FLOW_ITEM_TYPE_UDP:
3805                 retval = sizeof(struct rte_udp_hdr);
3806                 break;
3807         case RTE_FLOW_ITEM_TYPE_TCP:
3808                 retval = sizeof(struct rte_tcp_hdr);
3809                 break;
3810         case RTE_FLOW_ITEM_TYPE_VXLAN:
3811         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3812                 retval = sizeof(struct rte_vxlan_hdr);
3813                 break;
3814         case RTE_FLOW_ITEM_TYPE_GRE:
3815         case RTE_FLOW_ITEM_TYPE_NVGRE:
3816                 retval = sizeof(struct rte_gre_hdr);
3817                 break;
3818         case RTE_FLOW_ITEM_TYPE_MPLS:
3819                 retval = sizeof(struct rte_mpls_hdr);
3820                 break;
3821         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3822         default:
3823                 retval = 0;
3824                 break;
3825         }
3826         return retval;
3827 }
3828
3829 #define MLX5_ENCAP_IPV4_VERSION         0x40
3830 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3831 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3832 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3833 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3834 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3835 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3836
3837 /**
3838  * Convert the encap action data from list of rte_flow_item to raw buffer
3839  *
3840  * @param[in] items
3841  *   Pointer to rte_flow_item objects list.
3842  * @param[out] buf
3843  *   Pointer to the output buffer.
3844  * @param[out] size
3845  *   Pointer to the output buffer size.
3846  * @param[out] error
3847  *   Pointer to the error structure.
3848  *
3849  * @return
3850  *   0 on success, a negative errno value otherwise and rte_errno is set.
3851  */
3852 static int
3853 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3854                            size_t *size, struct rte_flow_error *error)
3855 {
3856         struct rte_ether_hdr *eth = NULL;
3857         struct rte_vlan_hdr *vlan = NULL;
3858         struct rte_ipv4_hdr *ipv4 = NULL;
3859         struct rte_ipv6_hdr *ipv6 = NULL;
3860         struct rte_udp_hdr *udp = NULL;
3861         struct rte_vxlan_hdr *vxlan = NULL;
3862         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3863         struct rte_gre_hdr *gre = NULL;
3864         size_t len;
3865         size_t temp_size = 0;
3866
3867         if (!items)
3868                 return rte_flow_error_set(error, EINVAL,
3869                                           RTE_FLOW_ERROR_TYPE_ACTION,
3870                                           NULL, "invalid empty data");
3871         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3872                 len = flow_dv_get_item_hdr_len(items->type);
3873                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3874                         return rte_flow_error_set(error, EINVAL,
3875                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3876                                                   (void *)items->type,
3877                                                   "items total size is too big"
3878                                                   " for encap action");
3879                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3880                 switch (items->type) {
3881                 case RTE_FLOW_ITEM_TYPE_ETH:
3882                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3883                         break;
3884                 case RTE_FLOW_ITEM_TYPE_VLAN:
3885                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3886                         if (!eth)
3887                                 return rte_flow_error_set(error, EINVAL,
3888                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3889                                                 (void *)items->type,
3890                                                 "eth header not found");
3891                         if (!eth->ether_type)
3892                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3893                         break;
3894                 case RTE_FLOW_ITEM_TYPE_IPV4:
3895                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3896                         if (!vlan && !eth)
3897                                 return rte_flow_error_set(error, EINVAL,
3898                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3899                                                 (void *)items->type,
3900                                                 "neither eth nor vlan"
3901                                                 " header found");
3902                         if (vlan && !vlan->eth_proto)
3903                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3904                         else if (eth && !eth->ether_type)
3905                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3906                         if (!ipv4->version_ihl)
3907                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3908                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3909                         if (!ipv4->time_to_live)
3910                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3911                         break;
3912                 case RTE_FLOW_ITEM_TYPE_IPV6:
3913                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3914                         if (!vlan && !eth)
3915                                 return rte_flow_error_set(error, EINVAL,
3916                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3917                                                 (void *)items->type,
3918                                                 "neither eth nor vlan"
3919                                                 " header found");
3920                         if (vlan && !vlan->eth_proto)
3921                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3922                         else if (eth && !eth->ether_type)
3923                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3924                         if (!ipv6->vtc_flow)
3925                                 ipv6->vtc_flow =
3926                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3927                         if (!ipv6->hop_limits)
3928                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3929                         break;
3930                 case RTE_FLOW_ITEM_TYPE_UDP:
3931                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3932                         if (!ipv4 && !ipv6)
3933                                 return rte_flow_error_set(error, EINVAL,
3934                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3935                                                 (void *)items->type,
3936                                                 "ip header not found");
3937                         if (ipv4 && !ipv4->next_proto_id)
3938                                 ipv4->next_proto_id = IPPROTO_UDP;
3939                         else if (ipv6 && !ipv6->proto)
3940                                 ipv6->proto = IPPROTO_UDP;
3941                         break;
3942                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3943                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3944                         if (!udp)
3945                                 return rte_flow_error_set(error, EINVAL,
3946                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3947                                                 (void *)items->type,
3948                                                 "udp header not found");
3949                         if (!udp->dst_port)
3950                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3951                         if (!vxlan->vx_flags)
3952                                 vxlan->vx_flags =
3953                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3954                         break;
3955                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3956                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3957                         if (!udp)
3958                                 return rte_flow_error_set(error, EINVAL,
3959                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3960                                                 (void *)items->type,
3961                                                 "udp header not found");
3962                         if (!vxlan_gpe->proto)
3963                                 return rte_flow_error_set(error, EINVAL,
3964                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3965                                                 (void *)items->type,
3966                                                 "next protocol not found");
3967                         if (!udp->dst_port)
3968                                 udp->dst_port =
3969                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3970                         if (!vxlan_gpe->vx_flags)
3971                                 vxlan_gpe->vx_flags =
3972                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3973                         break;
3974                 case RTE_FLOW_ITEM_TYPE_GRE:
3975                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3976                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3977                         if (!gre->proto)
3978                                 return rte_flow_error_set(error, EINVAL,
3979                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3980                                                 (void *)items->type,
3981                                                 "next protocol not found");
3982                         if (!ipv4 && !ipv6)
3983                                 return rte_flow_error_set(error, EINVAL,
3984                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3985                                                 (void *)items->type,
3986                                                 "ip header not found");
3987                         if (ipv4 && !ipv4->next_proto_id)
3988                                 ipv4->next_proto_id = IPPROTO_GRE;
3989                         else if (ipv6 && !ipv6->proto)
3990                                 ipv6->proto = IPPROTO_GRE;
3991                         break;
3992                 case RTE_FLOW_ITEM_TYPE_VOID:
3993                         break;
3994                 default:
3995                         return rte_flow_error_set(error, EINVAL,
3996                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3997                                                   (void *)items->type,
3998                                                   "unsupported item type");
3999                         break;
4000                 }
4001                 temp_size += len;
4002         }
4003         *size = temp_size;
4004         return 0;
4005 }
4006
4007 static int
4008 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4009 {
4010         struct rte_ether_hdr *eth = NULL;
4011         struct rte_vlan_hdr *vlan = NULL;
4012         struct rte_ipv6_hdr *ipv6 = NULL;
4013         struct rte_udp_hdr *udp = NULL;
4014         char *next_hdr;
4015         uint16_t proto;
4016
4017         eth = (struct rte_ether_hdr *)data;
4018         next_hdr = (char *)(eth + 1);
4019         proto = RTE_BE16(eth->ether_type);
4020
4021         /* VLAN skipping */
4022         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4023                 vlan = (struct rte_vlan_hdr *)next_hdr;
4024                 proto = RTE_BE16(vlan->eth_proto);
4025                 next_hdr += sizeof(struct rte_vlan_hdr);
4026         }
4027
4028         /* HW calculates IPv4 csum. no need to proceed */
4029         if (proto == RTE_ETHER_TYPE_IPV4)
4030                 return 0;
4031
4032         /* non IPv4/IPv6 header. not supported */
4033         if (proto != RTE_ETHER_TYPE_IPV6) {
4034                 return rte_flow_error_set(error, ENOTSUP,
4035                                           RTE_FLOW_ERROR_TYPE_ACTION,
4036                                           NULL, "Cannot offload non IPv4/IPv6");
4037         }
4038
4039         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4040
4041         /* ignore non UDP */
4042         if (ipv6->proto != IPPROTO_UDP)
4043                 return 0;
4044
4045         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4046         udp->dgram_cksum = 0;
4047
4048         return 0;
4049 }
4050
4051 /**
4052  * Convert L2 encap action to DV specification.
4053  *
4054  * @param[in] dev
4055  *   Pointer to rte_eth_dev structure.
4056  * @param[in] action
4057  *   Pointer to action structure.
4058  * @param[in, out] dev_flow
4059  *   Pointer to the mlx5_flow.
4060  * @param[in] transfer
4061  *   Mark if the flow is E-Switch flow.
4062  * @param[out] error
4063  *   Pointer to the error structure.
4064  *
4065  * @return
4066  *   0 on success, a negative errno value otherwise and rte_errno is set.
4067  */
4068 static int
4069 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4070                                const struct rte_flow_action *action,
4071                                struct mlx5_flow *dev_flow,
4072                                uint8_t transfer,
4073                                struct rte_flow_error *error)
4074 {
4075         const struct rte_flow_item *encap_data;
4076         const struct rte_flow_action_raw_encap *raw_encap_data;
4077         struct mlx5_flow_dv_encap_decap_resource res = {
4078                 .reformat_type =
4079                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4080                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4081                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4082         };
4083
4084         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4085                 raw_encap_data =
4086                         (const struct rte_flow_action_raw_encap *)action->conf;
4087                 res.size = raw_encap_data->size;
4088                 memcpy(res.buf, raw_encap_data->data, res.size);
4089         } else {
4090                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4091                         encap_data =
4092                                 ((const struct rte_flow_action_vxlan_encap *)
4093                                                 action->conf)->definition;
4094                 else
4095                         encap_data =
4096                                 ((const struct rte_flow_action_nvgre_encap *)
4097                                                 action->conf)->definition;
4098                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4099                                                &res.size, error))
4100                         return -rte_errno;
4101         }
4102         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4103                 return -rte_errno;
4104         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4105                 return rte_flow_error_set(error, EINVAL,
4106                                           RTE_FLOW_ERROR_TYPE_ACTION,
4107                                           NULL, "can't create L2 encap action");
4108         return 0;
4109 }
4110
4111 /**
4112  * Convert L2 decap action to DV specification.
4113  *
4114  * @param[in] dev
4115  *   Pointer to rte_eth_dev structure.
4116  * @param[in, out] dev_flow
4117  *   Pointer to the mlx5_flow.
4118  * @param[in] transfer
4119  *   Mark if the flow is E-Switch flow.
4120  * @param[out] error
4121  *   Pointer to the error structure.
4122  *
4123  * @return
4124  *   0 on success, a negative errno value otherwise and rte_errno is set.
4125  */
4126 static int
4127 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4128                                struct mlx5_flow *dev_flow,
4129                                uint8_t transfer,
4130                                struct rte_flow_error *error)
4131 {
4132         struct mlx5_flow_dv_encap_decap_resource res = {
4133                 .size = 0,
4134                 .reformat_type =
4135                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4136                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4137                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4138         };
4139
4140         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4141                 return rte_flow_error_set(error, EINVAL,
4142                                           RTE_FLOW_ERROR_TYPE_ACTION,
4143                                           NULL, "can't create L2 decap action");
4144         return 0;
4145 }
4146
4147 /**
4148  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4149  *
4150  * @param[in] dev
4151  *   Pointer to rte_eth_dev structure.
4152  * @param[in] action
4153  *   Pointer to action structure.
4154  * @param[in, out] dev_flow
4155  *   Pointer to the mlx5_flow.
4156  * @param[in] attr
4157  *   Pointer to the flow attributes.
4158  * @param[out] error
4159  *   Pointer to the error structure.
4160  *
4161  * @return
4162  *   0 on success, a negative errno value otherwise and rte_errno is set.
4163  */
4164 static int
4165 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4166                                 const struct rte_flow_action *action,
4167                                 struct mlx5_flow *dev_flow,
4168                                 const struct rte_flow_attr *attr,
4169                                 struct rte_flow_error *error)
4170 {
4171         const struct rte_flow_action_raw_encap *encap_data;
4172         struct mlx5_flow_dv_encap_decap_resource res;
4173
4174         memset(&res, 0, sizeof(res));
4175         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4176         res.size = encap_data->size;
4177         memcpy(res.buf, encap_data->data, res.size);
4178         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4179                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4180                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4181         if (attr->transfer)
4182                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4183         else
4184                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4185                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4186         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4187                 return rte_flow_error_set(error, EINVAL,
4188                                           RTE_FLOW_ERROR_TYPE_ACTION,
4189                                           NULL, "can't create encap action");
4190         return 0;
4191 }
4192
4193 /**
4194  * Create action push VLAN.
4195  *
4196  * @param[in] dev
4197  *   Pointer to rte_eth_dev structure.
4198  * @param[in] attr
4199  *   Pointer to the flow attributes.
4200  * @param[in] vlan
4201  *   Pointer to the vlan to push to the Ethernet header.
4202  * @param[in, out] dev_flow
4203  *   Pointer to the mlx5_flow.
4204  * @param[out] error
4205  *   Pointer to the error structure.
4206  *
4207  * @return
4208  *   0 on success, a negative errno value otherwise and rte_errno is set.
4209  */
4210 static int
4211 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4212                                 const struct rte_flow_attr *attr,
4213                                 const struct rte_vlan_hdr *vlan,
4214                                 struct mlx5_flow *dev_flow,
4215                                 struct rte_flow_error *error)
4216 {
4217         struct mlx5_flow_dv_push_vlan_action_resource res;
4218
4219         memset(&res, 0, sizeof(res));
4220         res.vlan_tag =
4221                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4222                                  vlan->vlan_tci);
4223         if (attr->transfer)
4224                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4225         else
4226                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4227                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4228         return flow_dv_push_vlan_action_resource_register
4229                                             (dev, &res, dev_flow, error);
4230 }
4231
4232 /**
4233  * Validate the modify-header actions.
4234  *
4235  * @param[in] action_flags
4236  *   Holds the actions detected until now.
4237  * @param[in] action
4238  *   Pointer to the modify action.
4239  * @param[out] error
4240  *   Pointer to error structure.
4241  *
4242  * @return
4243  *   0 on success, a negative errno value otherwise and rte_errno is set.
4244  */
4245 static int
4246 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4247                                    const struct rte_flow_action *action,
4248                                    struct rte_flow_error *error)
4249 {
4250         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4251                 return rte_flow_error_set(error, EINVAL,
4252                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4253                                           NULL, "action configuration not set");
4254         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4255                 return rte_flow_error_set(error, EINVAL,
4256                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4257                                           "can't have encap action before"
4258                                           " modify action");
4259         return 0;
4260 }
4261
4262 /**
4263  * Validate the modify-header MAC address actions.
4264  *
4265  * @param[in] action_flags
4266  *   Holds the actions detected until now.
4267  * @param[in] action
4268  *   Pointer to the modify action.
4269  * @param[in] item_flags
4270  *   Holds the items detected.
4271  * @param[out] error
4272  *   Pointer to error structure.
4273  *
4274  * @return
4275  *   0 on success, a negative errno value otherwise and rte_errno is set.
4276  */
4277 static int
4278 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4279                                    const struct rte_flow_action *action,
4280                                    const uint64_t item_flags,
4281                                    struct rte_flow_error *error)
4282 {
4283         int ret = 0;
4284
4285         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4286         if (!ret) {
4287                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4288                         return rte_flow_error_set(error, EINVAL,
4289                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4290                                                   NULL,
4291                                                   "no L2 item in pattern");
4292         }
4293         return ret;
4294 }
4295
4296 /**
4297  * Validate the modify-header IPv4 address actions.
4298  *
4299  * @param[in] action_flags
4300  *   Holds the actions detected until now.
4301  * @param[in] action
4302  *   Pointer to the modify action.
4303  * @param[in] item_flags
4304  *   Holds the items detected.
4305  * @param[out] error
4306  *   Pointer to error structure.
4307  *
4308  * @return
4309  *   0 on success, a negative errno value otherwise and rte_errno is set.
4310  */
4311 static int
4312 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4313                                     const struct rte_flow_action *action,
4314                                     const uint64_t item_flags,
4315                                     struct rte_flow_error *error)
4316 {
4317         int ret = 0;
4318         uint64_t layer;
4319
4320         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4321         if (!ret) {
4322                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4323                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4324                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4325                 if (!(item_flags & layer))
4326                         return rte_flow_error_set(error, EINVAL,
4327                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4328                                                   NULL,
4329                                                   "no ipv4 item in pattern");
4330         }
4331         return ret;
4332 }
4333
4334 /**
4335  * Validate the modify-header IPv6 address actions.
4336  *
4337  * @param[in] action_flags
4338  *   Holds the actions detected until now.
4339  * @param[in] action
4340  *   Pointer to the modify action.
4341  * @param[in] item_flags
4342  *   Holds the items detected.
4343  * @param[out] error
4344  *   Pointer to error structure.
4345  *
4346  * @return
4347  *   0 on success, a negative errno value otherwise and rte_errno is set.
4348  */
4349 static int
4350 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4351                                     const struct rte_flow_action *action,
4352                                     const uint64_t item_flags,
4353                                     struct rte_flow_error *error)
4354 {
4355         int ret = 0;
4356         uint64_t layer;
4357
4358         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4359         if (!ret) {
4360                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4361                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4362                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4363                 if (!(item_flags & layer))
4364                         return rte_flow_error_set(error, EINVAL,
4365                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4366                                                   NULL,
4367                                                   "no ipv6 item in pattern");
4368         }
4369         return ret;
4370 }
4371
4372 /**
4373  * Validate the modify-header TP actions.
4374  *
4375  * @param[in] action_flags
4376  *   Holds the actions detected until now.
4377  * @param[in] action
4378  *   Pointer to the modify action.
4379  * @param[in] item_flags
4380  *   Holds the items detected.
4381  * @param[out] error
4382  *   Pointer to error structure.
4383  *
4384  * @return
4385  *   0 on success, a negative errno value otherwise and rte_errno is set.
4386  */
4387 static int
4388 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4389                                   const struct rte_flow_action *action,
4390                                   const uint64_t item_flags,
4391                                   struct rte_flow_error *error)
4392 {
4393         int ret = 0;
4394         uint64_t layer;
4395
4396         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4397         if (!ret) {
4398                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4399                                  MLX5_FLOW_LAYER_INNER_L4 :
4400                                  MLX5_FLOW_LAYER_OUTER_L4;
4401                 if (!(item_flags & layer))
4402                         return rte_flow_error_set(error, EINVAL,
4403                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4404                                                   NULL, "no transport layer "
4405                                                   "in pattern");
4406         }
4407         return ret;
4408 }
4409
4410 /**
4411  * Validate the modify-header actions of increment/decrement
4412  * TCP Sequence-number.
4413  *
4414  * @param[in] action_flags
4415  *   Holds the actions detected until now.
4416  * @param[in] action
4417  *   Pointer to the modify action.
4418  * @param[in] item_flags
4419  *   Holds the items detected.
4420  * @param[out] error
4421  *   Pointer to error structure.
4422  *
4423  * @return
4424  *   0 on success, a negative errno value otherwise and rte_errno is set.
4425  */
4426 static int
4427 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4428                                        const struct rte_flow_action *action,
4429                                        const uint64_t item_flags,
4430                                        struct rte_flow_error *error)
4431 {
4432         int ret = 0;
4433         uint64_t layer;
4434
4435         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4436         if (!ret) {
4437                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4438                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4439                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4440                 if (!(item_flags & layer))
4441                         return rte_flow_error_set(error, EINVAL,
4442                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4443                                                   NULL, "no TCP item in"
4444                                                   " pattern");
4445                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4446                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4447                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4448                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4449                         return rte_flow_error_set(error, EINVAL,
4450                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4451                                                   NULL,
4452                                                   "cannot decrease and increase"
4453                                                   " TCP sequence number"
4454                                                   " at the same time");
4455         }
4456         return ret;
4457 }
4458
4459 /**
4460  * Validate the modify-header actions of increment/decrement
4461  * TCP Acknowledgment number.
4462  *
4463  * @param[in] action_flags
4464  *   Holds the actions detected until now.
4465  * @param[in] action
4466  *   Pointer to the modify action.
4467  * @param[in] item_flags
4468  *   Holds the items detected.
4469  * @param[out] error
4470  *   Pointer to error structure.
4471  *
4472  * @return
4473  *   0 on success, a negative errno value otherwise and rte_errno is set.
4474  */
4475 static int
4476 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4477                                        const struct rte_flow_action *action,
4478                                        const uint64_t item_flags,
4479                                        struct rte_flow_error *error)
4480 {
4481         int ret = 0;
4482         uint64_t layer;
4483
4484         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4485         if (!ret) {
4486                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4487                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4488                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4489                 if (!(item_flags & layer))
4490                         return rte_flow_error_set(error, EINVAL,
4491                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4492                                                   NULL, "no TCP item in"
4493                                                   " pattern");
4494                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4495                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4496                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4497                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4498                         return rte_flow_error_set(error, EINVAL,
4499                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4500                                                   NULL,
4501                                                   "cannot decrease and increase"
4502                                                   " TCP acknowledgment number"
4503                                                   " at the same time");
4504         }
4505         return ret;
4506 }
4507
4508 /**
4509  * Validate the modify-header TTL actions.
4510  *
4511  * @param[in] action_flags
4512  *   Holds the actions detected until now.
4513  * @param[in] action
4514  *   Pointer to the modify action.
4515  * @param[in] item_flags
4516  *   Holds the items detected.
4517  * @param[out] error
4518  *   Pointer to error structure.
4519  *
4520  * @return
4521  *   0 on success, a negative errno value otherwise and rte_errno is set.
4522  */
4523 static int
4524 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4525                                    const struct rte_flow_action *action,
4526                                    const uint64_t item_flags,
4527                                    struct rte_flow_error *error)
4528 {
4529         int ret = 0;
4530         uint64_t layer;
4531
4532         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4533         if (!ret) {
4534                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4535                                  MLX5_FLOW_LAYER_INNER_L3 :
4536                                  MLX5_FLOW_LAYER_OUTER_L3;
4537                 if (!(item_flags & layer))
4538                         return rte_flow_error_set(error, EINVAL,
4539                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4540                                                   NULL,
4541                                                   "no IP protocol in pattern");
4542         }
4543         return ret;
4544 }
4545
4546 /**
4547  * Validate the generic modify field actions.
4548  * @param[in] dev
4549  *   Pointer to the rte_eth_dev structure.
4550  * @param[in] action_flags
4551  *   Holds the actions detected until now.
4552  * @param[in] action
4553  *   Pointer to the modify action.
4554  * @param[in] attr
4555  *   Pointer to the flow attributes.
4556  * @param[out] error
4557  *   Pointer to error structure.
4558  *
4559  * @return
4560  *   Number of header fields to modify (0 or more) on success,
4561  *   a negative errno value otherwise and rte_errno is set.
4562  */
4563 static int
4564 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4565                                    const uint64_t action_flags,
4566                                    const struct rte_flow_action *action,
4567                                    const struct rte_flow_attr *attr,
4568                                    struct rte_flow_error *error)
4569 {
4570         int ret = 0;
4571         struct mlx5_priv *priv = dev->data->dev_private;
4572         struct mlx5_dev_config *config = &priv->config;
4573         const struct rte_flow_action_modify_field *action_modify_field =
4574                 action->conf;
4575         uint32_t dst_width =
4576                 mlx5_flow_item_field_width(action_modify_field->dst.field);
4577         uint32_t src_width =
4578                 mlx5_flow_item_field_width(action_modify_field->src.field);
4579
4580         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4581         if (ret)
4582                 return ret;
4583
4584         if (action_modify_field->width == 0)
4585                 return rte_flow_error_set(error, EINVAL,
4586                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4587                                 "no bits are requested to be modified");
4588         else if (action_modify_field->width > dst_width ||
4589                  action_modify_field->width > src_width)
4590                 return rte_flow_error_set(error, EINVAL,
4591                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4592                                 "cannot modify more bits than"
4593                                 " the width of a field");
4594         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4595             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4596                 if ((action_modify_field->dst.offset +
4597                      action_modify_field->width > dst_width) ||
4598                     (action_modify_field->dst.offset % 32))
4599                         return rte_flow_error_set(error, EINVAL,
4600                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4601                                         "destination offset is too big"
4602                                         " or not aligned to 4 bytes");
4603                 if (action_modify_field->dst.level &&
4604                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4605                         return rte_flow_error_set(error, ENOTSUP,
4606                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4607                                         "inner header fields modification"
4608                                         " is not supported");
4609         }
4610         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4611             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4612                 if (!attr->transfer && !attr->group)
4613                         return rte_flow_error_set(error, ENOTSUP,
4614                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4615                                         "modify field action is not"
4616                                         " supported for group 0");
4617                 if ((action_modify_field->src.offset +
4618                      action_modify_field->width > src_width) ||
4619                     (action_modify_field->src.offset % 32))
4620                         return rte_flow_error_set(error, EINVAL,
4621                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4622                                         "source offset is too big"
4623                                         " or not aligned to 4 bytes");
4624                 if (action_modify_field->src.level &&
4625                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4626                         return rte_flow_error_set(error, ENOTSUP,
4627                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4628                                         "inner header fields modification"
4629                                         " is not supported");
4630         }
4631         if (action_modify_field->dst.field ==
4632             action_modify_field->src.field)
4633                 return rte_flow_error_set(error, EINVAL,
4634                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4635                                 "source and destination fields"
4636                                 " cannot be the same");
4637         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4638             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4639                 return rte_flow_error_set(error, EINVAL,
4640                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4641                                 "immediate value or a pointer to it"
4642                                 " cannot be used as a destination");
4643         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4644             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4645                 return rte_flow_error_set(error, ENOTSUP,
4646                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4647                                 "modifications of an arbitrary"
4648                                 " place in a packet is not supported");
4649         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4650             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4651                 return rte_flow_error_set(error, ENOTSUP,
4652                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4653                                 "modifications of the 802.1Q Tag"
4654                                 " Identifier is not supported");
4655         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4656             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4657                 return rte_flow_error_set(error, ENOTSUP,
4658                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4659                                 "modifications of the VXLAN Network"
4660                                 " Identifier is not supported");
4661         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4662             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4663                 return rte_flow_error_set(error, ENOTSUP,
4664                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4665                                 "modifications of the GENEVE Network"
4666                                 " Identifier is not supported");
4667         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4668             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4669             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4670             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4671                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4672                     !mlx5_flow_ext_mreg_supported(dev))
4673                         return rte_flow_error_set(error, ENOTSUP,
4674                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4675                                         "cannot modify mark or metadata without"
4676                                         " extended metadata register support");
4677         }
4678         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4679                 return rte_flow_error_set(error, ENOTSUP,
4680                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4681                                 "add and sub operations"
4682                                 " are not supported");
4683         return (action_modify_field->width / 32) +
4684                !!(action_modify_field->width % 32);
4685 }
4686
4687 /**
4688  * Validate jump action.
4689  *
4690  * @param[in] action
4691  *   Pointer to the jump action.
4692  * @param[in] action_flags
4693  *   Holds the actions detected until now.
4694  * @param[in] attributes
4695  *   Pointer to flow attributes
4696  * @param[in] external
4697  *   Action belongs to flow rule created by request external to PMD.
4698  * @param[out] error
4699  *   Pointer to error structure.
4700  *
4701  * @return
4702  *   0 on success, a negative errno value otherwise and rte_errno is set.
4703  */
4704 static int
4705 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4706                              const struct mlx5_flow_tunnel *tunnel,
4707                              const struct rte_flow_action *action,
4708                              uint64_t action_flags,
4709                              const struct rte_flow_attr *attributes,
4710                              bool external, struct rte_flow_error *error)
4711 {
4712         uint32_t target_group, table;
4713         int ret = 0;
4714         struct flow_grp_info grp_info = {
4715                 .external = !!external,
4716                 .transfer = !!attributes->transfer,
4717                 .fdb_def_rule = 1,
4718                 .std_tbl_fix = 0
4719         };
4720         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4721                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4722                 return rte_flow_error_set(error, EINVAL,
4723                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4724                                           "can't have 2 fate actions in"
4725                                           " same flow");
4726         if (!action->conf)
4727                 return rte_flow_error_set(error, EINVAL,
4728                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4729                                           NULL, "action configuration not set");
4730         target_group =
4731                 ((const struct rte_flow_action_jump *)action->conf)->group;
4732         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4733                                        &grp_info, error);
4734         if (ret)
4735                 return ret;
4736         if (attributes->group == target_group &&
4737             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4738                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4739                 return rte_flow_error_set(error, EINVAL,
4740                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4741                                           "target group must be other than"
4742                                           " the current flow group");
4743         return 0;
4744 }
4745
4746 /*
4747  * Validate the port_id action.
4748  *
4749  * @param[in] dev
4750  *   Pointer to rte_eth_dev structure.
4751  * @param[in] action_flags
4752  *   Bit-fields that holds the actions detected until now.
4753  * @param[in] action
4754  *   Port_id RTE action structure.
4755  * @param[in] attr
4756  *   Attributes of flow that includes this action.
4757  * @param[out] error
4758  *   Pointer to error structure.
4759  *
4760  * @return
4761  *   0 on success, a negative errno value otherwise and rte_errno is set.
4762  */
4763 static int
4764 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4765                                 uint64_t action_flags,
4766                                 const struct rte_flow_action *action,
4767                                 const struct rte_flow_attr *attr,
4768                                 struct rte_flow_error *error)
4769 {
4770         const struct rte_flow_action_port_id *port_id;
4771         struct mlx5_priv *act_priv;
4772         struct mlx5_priv *dev_priv;
4773         uint16_t port;
4774
4775         if (!attr->transfer)
4776                 return rte_flow_error_set(error, ENOTSUP,
4777                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4778                                           NULL,
4779                                           "port id action is valid in transfer"
4780                                           " mode only");
4781         if (!action || !action->conf)
4782                 return rte_flow_error_set(error, ENOTSUP,
4783                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4784                                           NULL,
4785                                           "port id action parameters must be"
4786                                           " specified");
4787         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4788                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4789                 return rte_flow_error_set(error, EINVAL,
4790                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4791                                           "can have only one fate actions in"
4792                                           " a flow");
4793         dev_priv = mlx5_dev_to_eswitch_info(dev);
4794         if (!dev_priv)
4795                 return rte_flow_error_set(error, rte_errno,
4796                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4797                                           NULL,
4798                                           "failed to obtain E-Switch info");
4799         port_id = action->conf;
4800         port = port_id->original ? dev->data->port_id : port_id->id;
4801         act_priv = mlx5_port_to_eswitch_info(port, false);
4802         if (!act_priv)
4803                 return rte_flow_error_set
4804                                 (error, rte_errno,
4805                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4806                                  "failed to obtain E-Switch port id for port");
4807         if (act_priv->domain_id != dev_priv->domain_id)
4808                 return rte_flow_error_set
4809                                 (error, EINVAL,
4810                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4811                                  "port does not belong to"
4812                                  " E-Switch being configured");
4813         return 0;
4814 }
4815
4816 /**
4817  * Get the maximum number of modify header actions.
4818  *
4819  * @param dev
4820  *   Pointer to rte_eth_dev structure.
4821  * @param flags
4822  *   Flags bits to check if root level.
4823  *
4824  * @return
4825  *   Max number of modify header actions device can support.
4826  */
4827 static inline unsigned int
4828 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4829                               uint64_t flags)
4830 {
4831         /*
4832          * There's no way to directly query the max capacity from FW.
4833          * The maximal value on root table should be assumed to be supported.
4834          */
4835         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4836                 return MLX5_MAX_MODIFY_NUM;
4837         else
4838                 return MLX5_ROOT_TBL_MODIFY_NUM;
4839 }
4840
4841 /**
4842  * Validate the meter action.
4843  *
4844  * @param[in] dev
4845  *   Pointer to rte_eth_dev structure.
4846  * @param[in] action_flags
4847  *   Bit-fields that holds the actions detected until now.
4848  * @param[in] action
4849  *   Pointer to the meter action.
4850  * @param[in] attr
4851  *   Attributes of flow that includes this action.
4852  * @param[out] error
4853  *   Pointer to error structure.
4854  *
4855  * @return
4856  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4857  */
4858 static int
4859 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4860                                 uint64_t action_flags,
4861                                 const struct rte_flow_action *action,
4862                                 const struct rte_flow_attr *attr,
4863                                 bool *def_policy,
4864                                 struct rte_flow_error *error)
4865 {
4866         struct mlx5_priv *priv = dev->data->dev_private;
4867         const struct rte_flow_action_meter *am = action->conf;
4868         struct mlx5_flow_meter_info *fm;
4869         struct mlx5_flow_meter_policy *mtr_policy;
4870         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
4871
4872         if (!am)
4873                 return rte_flow_error_set(error, EINVAL,
4874                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4875                                           "meter action conf is NULL");
4876
4877         if (action_flags & MLX5_FLOW_ACTION_METER)
4878                 return rte_flow_error_set(error, ENOTSUP,
4879                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4880                                           "meter chaining not support");
4881         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4882                 return rte_flow_error_set(error, ENOTSUP,
4883                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4884                                           "meter with jump not support");
4885         if (!priv->mtr_en)
4886                 return rte_flow_error_set(error, ENOTSUP,
4887                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4888                                           NULL,
4889                                           "meter action not supported");
4890         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
4891         if (!fm)
4892                 return rte_flow_error_set(error, EINVAL,
4893                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4894                                           "Meter not found");
4895         /* aso meter can always be shared by different domains */
4896         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
4897             !(fm->transfer == attr->transfer ||
4898               (!fm->ingress && !attr->ingress && attr->egress) ||
4899               (!fm->egress && !attr->egress && attr->ingress)))
4900                 return rte_flow_error_set(error, EINVAL,
4901                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4902                         "Flow attributes domain are either invalid "
4903                         "or have a domain conflict with current "
4904                         "meter attributes");
4905         if (fm->def_policy) {
4906                 if (!((attr->transfer &&
4907                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
4908                         (attr->egress &&
4909                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
4910                         (attr->ingress &&
4911                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
4912                         return rte_flow_error_set(error, EINVAL,
4913                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4914                                           "Flow attributes domain "
4915                                           "have a conflict with current "
4916                                           "meter domain attributes");
4917                 *def_policy = true;
4918         } else {
4919                 mtr_policy = mlx5_flow_meter_policy_find(dev,
4920                                                 fm->policy_id, NULL);
4921                 if (!mtr_policy)
4922                         return rte_flow_error_set(error, EINVAL,
4923                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4924                                           "Invalid policy id for meter ");
4925                 if (!((attr->transfer && mtr_policy->transfer) ||
4926                         (attr->egress && mtr_policy->egress) ||
4927                         (attr->ingress && mtr_policy->ingress)))
4928                         return rte_flow_error_set(error, EINVAL,
4929                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4930                                           "Flow attributes domain "
4931                                           "have a conflict with current "
4932                                           "meter domain attributes");
4933                 *def_policy = false;
4934         }
4935         return 0;
4936 }
4937
4938 /**
4939  * Validate the age action.
4940  *
4941  * @param[in] action_flags
4942  *   Holds the actions detected until now.
4943  * @param[in] action
4944  *   Pointer to the age action.
4945  * @param[in] dev
4946  *   Pointer to the Ethernet device structure.
4947  * @param[out] error
4948  *   Pointer to error structure.
4949  *
4950  * @return
4951  *   0 on success, a negative errno value otherwise and rte_errno is set.
4952  */
4953 static int
4954 flow_dv_validate_action_age(uint64_t action_flags,
4955                             const struct rte_flow_action *action,
4956                             struct rte_eth_dev *dev,
4957                             struct rte_flow_error *error)
4958 {
4959         struct mlx5_priv *priv = dev->data->dev_private;
4960         const struct rte_flow_action_age *age = action->conf;
4961
4962         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4963             !priv->sh->aso_age_mng))
4964                 return rte_flow_error_set(error, ENOTSUP,
4965                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4966                                           NULL,
4967                                           "age action not supported");
4968         if (!(action->conf))
4969                 return rte_flow_error_set(error, EINVAL,
4970                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4971                                           "configuration cannot be null");
4972         if (!(age->timeout))
4973                 return rte_flow_error_set(error, EINVAL,
4974                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4975                                           "invalid timeout value 0");
4976         if (action_flags & MLX5_FLOW_ACTION_AGE)
4977                 return rte_flow_error_set(error, EINVAL,
4978                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4979                                           "duplicate age actions set");
4980         return 0;
4981 }
4982
4983 /**
4984  * Validate the modify-header IPv4 DSCP actions.
4985  *
4986  * @param[in] action_flags
4987  *   Holds the actions detected until now.
4988  * @param[in] action
4989  *   Pointer to the modify action.
4990  * @param[in] item_flags
4991  *   Holds the items detected.
4992  * @param[out] error
4993  *   Pointer to error structure.
4994  *
4995  * @return
4996  *   0 on success, a negative errno value otherwise and rte_errno is set.
4997  */
4998 static int
4999 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5000                                          const struct rte_flow_action *action,
5001                                          const uint64_t item_flags,
5002                                          struct rte_flow_error *error)
5003 {
5004         int ret = 0;
5005
5006         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5007         if (!ret) {
5008                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5009                         return rte_flow_error_set(error, EINVAL,
5010                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5011                                                   NULL,
5012                                                   "no ipv4 item in pattern");
5013         }
5014         return ret;
5015 }
5016
5017 /**
5018  * Validate the modify-header IPv6 DSCP actions.
5019  *
5020  * @param[in] action_flags
5021  *   Holds the actions detected until now.
5022  * @param[in] action
5023  *   Pointer to the modify action.
5024  * @param[in] item_flags
5025  *   Holds the items detected.
5026  * @param[out] error
5027  *   Pointer to error structure.
5028  *
5029  * @return
5030  *   0 on success, a negative errno value otherwise and rte_errno is set.
5031  */
5032 static int
5033 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5034                                          const struct rte_flow_action *action,
5035                                          const uint64_t item_flags,
5036                                          struct rte_flow_error *error)
5037 {
5038         int ret = 0;
5039
5040         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5041         if (!ret) {
5042                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5043                         return rte_flow_error_set(error, EINVAL,
5044                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5045                                                   NULL,
5046                                                   "no ipv6 item in pattern");
5047         }
5048         return ret;
5049 }
5050
5051 /**
5052  * Match modify-header resource.
5053  *
5054  * @param list
5055  *   Pointer to the hash list.
5056  * @param entry
5057  *   Pointer to exist resource entry object.
5058  * @param key
5059  *   Key of the new entry.
5060  * @param ctx
5061  *   Pointer to new modify-header resource.
5062  *
5063  * @return
5064  *   0 on matching, non-zero otherwise.
5065  */
5066 int
5067 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5068                         struct mlx5_hlist_entry *entry,
5069                         uint64_t key __rte_unused, void *cb_ctx)
5070 {
5071         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5072         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5073         struct mlx5_flow_dv_modify_hdr_resource *resource =
5074                         container_of(entry, typeof(*resource), entry);
5075         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5076
5077         key_len += ref->actions_num * sizeof(ref->actions[0]);
5078         return ref->actions_num != resource->actions_num ||
5079                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5080 }
5081
5082 struct mlx5_hlist_entry *
5083 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5084                          void *cb_ctx)
5085 {
5086         struct mlx5_dev_ctx_shared *sh = list->ctx;
5087         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5088         struct mlx5dv_dr_domain *ns;
5089         struct mlx5_flow_dv_modify_hdr_resource *entry;
5090         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5091         int ret;
5092         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5093         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5094
5095         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5096                             SOCKET_ID_ANY);
5097         if (!entry) {
5098                 rte_flow_error_set(ctx->error, ENOMEM,
5099                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5100                                    "cannot allocate resource memory");
5101                 return NULL;
5102         }
5103         rte_memcpy(&entry->ft_type,
5104                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5105                    key_len + data_len);
5106         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5107                 ns = sh->fdb_domain;
5108         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5109                 ns = sh->tx_domain;
5110         else
5111                 ns = sh->rx_domain;
5112         ret = mlx5_flow_os_create_flow_action_modify_header
5113                                         (sh->ctx, ns, entry,
5114                                          data_len, &entry->action);
5115         if (ret) {
5116                 mlx5_free(entry);
5117                 rte_flow_error_set(ctx->error, ENOMEM,
5118                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5119                                    NULL, "cannot create modification action");
5120                 return NULL;
5121         }
5122         return &entry->entry;
5123 }
5124
5125 /**
5126  * Validate the sample action.
5127  *
5128  * @param[in, out] action_flags
5129  *   Holds the actions detected until now.
5130  * @param[in] action
5131  *   Pointer to the sample action.
5132  * @param[in] dev
5133  *   Pointer to the Ethernet device structure.
5134  * @param[in] attr
5135  *   Attributes of flow that includes this action.
5136  * @param[in] item_flags
5137  *   Holds the items detected.
5138  * @param[in] rss
5139  *   Pointer to the RSS action.
5140  * @param[out] sample_rss
5141  *   Pointer to the RSS action in sample action list.
5142  * @param[out] count
5143  *   Pointer to the COUNT action in sample action list.
5144  * @param[out] fdb_mirror_limit
5145  *   Pointer to the FDB mirror limitation flag.
5146  * @param[out] error
5147  *   Pointer to error structure.
5148  *
5149  * @return
5150  *   0 on success, a negative errno value otherwise and rte_errno is set.
5151  */
5152 static int
5153 flow_dv_validate_action_sample(uint64_t *action_flags,
5154                                const struct rte_flow_action *action,
5155                                struct rte_eth_dev *dev,
5156                                const struct rte_flow_attr *attr,
5157                                uint64_t item_flags,
5158                                const struct rte_flow_action_rss *rss,
5159                                const struct rte_flow_action_rss **sample_rss,
5160                                const struct rte_flow_action_count **count,
5161                                int *fdb_mirror_limit,
5162                                struct rte_flow_error *error)
5163 {
5164         struct mlx5_priv *priv = dev->data->dev_private;
5165         struct mlx5_dev_config *dev_conf = &priv->config;
5166         const struct rte_flow_action_sample *sample = action->conf;
5167         const struct rte_flow_action *act;
5168         uint64_t sub_action_flags = 0;
5169         uint16_t queue_index = 0xFFFF;
5170         int actions_n = 0;
5171         int ret;
5172
5173         if (!sample)
5174                 return rte_flow_error_set(error, EINVAL,
5175                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5176                                           "configuration cannot be NULL");
5177         if (sample->ratio == 0)
5178                 return rte_flow_error_set(error, EINVAL,
5179                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5180                                           "ratio value starts from 1");
5181         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5182                 return rte_flow_error_set(error, ENOTSUP,
5183                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5184                                           NULL,
5185                                           "sample action not supported");
5186         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5187                 return rte_flow_error_set(error, EINVAL,
5188                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5189                                           "Multiple sample actions not "
5190                                           "supported");
5191         if (*action_flags & MLX5_FLOW_ACTION_METER)
5192                 return rte_flow_error_set(error, EINVAL,
5193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5194                                           "wrong action order, meter should "
5195                                           "be after sample action");
5196         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5197                 return rte_flow_error_set(error, EINVAL,
5198                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5199                                           "wrong action order, jump should "
5200                                           "be after sample action");
5201         act = sample->actions;
5202         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5203                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5204                         return rte_flow_error_set(error, ENOTSUP,
5205                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5206                                                   act, "too many actions");
5207                 switch (act->type) {
5208                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5209                         ret = mlx5_flow_validate_action_queue(act,
5210                                                               sub_action_flags,
5211                                                               dev,
5212                                                               attr, error);
5213                         if (ret < 0)
5214                                 return ret;
5215                         queue_index = ((const struct rte_flow_action_queue *)
5216                                                         (act->conf))->index;
5217                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5218                         ++actions_n;
5219                         break;
5220                 case RTE_FLOW_ACTION_TYPE_RSS:
5221                         *sample_rss = act->conf;
5222                         ret = mlx5_flow_validate_action_rss(act,
5223                                                             sub_action_flags,
5224                                                             dev, attr,
5225                                                             item_flags,
5226                                                             error);
5227                         if (ret < 0)
5228                                 return ret;
5229                         if (rss && *sample_rss &&
5230                             ((*sample_rss)->level != rss->level ||
5231                             (*sample_rss)->types != rss->types))
5232                                 return rte_flow_error_set(error, ENOTSUP,
5233                                         RTE_FLOW_ERROR_TYPE_ACTION,
5234                                         NULL,
5235                                         "Can't use the different RSS types "
5236                                         "or level in the same flow");
5237                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5238                                 queue_index = (*sample_rss)->queue[0];
5239                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5240                         ++actions_n;
5241                         break;
5242                 case RTE_FLOW_ACTION_TYPE_MARK:
5243                         ret = flow_dv_validate_action_mark(dev, act,
5244                                                            sub_action_flags,
5245                                                            attr, error);
5246                         if (ret < 0)
5247                                 return ret;
5248                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5249                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5250                                                 MLX5_FLOW_ACTION_MARK_EXT;
5251                         else
5252                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5253                         ++actions_n;
5254                         break;
5255                 case RTE_FLOW_ACTION_TYPE_COUNT:
5256                         ret = flow_dv_validate_action_count
5257                                 (dev, act,
5258                                  *action_flags | sub_action_flags,
5259                                  error);
5260                         if (ret < 0)
5261                                 return ret;
5262                         *count = act->conf;
5263                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5264                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5265                         ++actions_n;
5266                         break;
5267                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5268                         ret = flow_dv_validate_action_port_id(dev,
5269                                                               sub_action_flags,
5270                                                               act,
5271                                                               attr,
5272                                                               error);
5273                         if (ret)
5274                                 return ret;
5275                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5276                         ++actions_n;
5277                         break;
5278                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5279                         ret = flow_dv_validate_action_raw_encap_decap
5280                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5281                                  &actions_n, action, item_flags, error);
5282                         if (ret < 0)
5283                                 return ret;
5284                         ++actions_n;
5285                         break;
5286                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5287                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5288                         ret = flow_dv_validate_action_l2_encap(dev,
5289                                                                sub_action_flags,
5290                                                                act, attr,
5291                                                                error);
5292                         if (ret < 0)
5293                                 return ret;
5294                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5295                         ++actions_n;
5296                         break;
5297                 default:
5298                         return rte_flow_error_set(error, ENOTSUP,
5299                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5300                                                   NULL,
5301                                                   "Doesn't support optional "
5302                                                   "action");
5303                 }
5304         }
5305         if (attr->ingress && !attr->transfer) {
5306                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5307                                           MLX5_FLOW_ACTION_RSS)))
5308                         return rte_flow_error_set(error, EINVAL,
5309                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5310                                                   NULL,
5311                                                   "Ingress must has a dest "
5312                                                   "QUEUE for Sample");
5313         } else if (attr->egress && !attr->transfer) {
5314                 return rte_flow_error_set(error, ENOTSUP,
5315                                           RTE_FLOW_ERROR_TYPE_ACTION,
5316                                           NULL,
5317                                           "Sample Only support Ingress "
5318                                           "or E-Switch");
5319         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5320                 MLX5_ASSERT(attr->transfer);
5321                 if (sample->ratio > 1)
5322                         return rte_flow_error_set(error, ENOTSUP,
5323                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5324                                                   NULL,
5325                                                   "E-Switch doesn't support "
5326                                                   "any optional action "
5327                                                   "for sampling");
5328                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5329                         return rte_flow_error_set(error, ENOTSUP,
5330                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5331                                                   NULL,
5332                                                   "unsupported action QUEUE");
5333                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5334                         return rte_flow_error_set(error, ENOTSUP,
5335                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5336                                                   NULL,
5337                                                   "unsupported action QUEUE");
5338                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5339                         return rte_flow_error_set(error, EINVAL,
5340                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5341                                                   NULL,
5342                                                   "E-Switch must has a dest "
5343                                                   "port for mirroring");
5344                 if (!priv->config.hca_attr.reg_c_preserve &&
5345                      priv->representor_id != -1)
5346                         *fdb_mirror_limit = 1;
5347         }
5348         /* Continue validation for Xcap actions.*/
5349         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5350             (queue_index == 0xFFFF ||
5351              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5352                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5353                      MLX5_FLOW_XCAP_ACTIONS)
5354                         return rte_flow_error_set(error, ENOTSUP,
5355                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5356                                                   NULL, "encap and decap "
5357                                                   "combination aren't "
5358                                                   "supported");
5359                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5360                                                         MLX5_FLOW_ACTION_ENCAP))
5361                         return rte_flow_error_set(error, ENOTSUP,
5362                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5363                                                   NULL, "encap is not supported"
5364                                                   " for ingress traffic");
5365         }
5366         return 0;
5367 }
5368
5369 /**
5370  * Find existing modify-header resource or create and register a new one.
5371  *
5372  * @param dev[in, out]
5373  *   Pointer to rte_eth_dev structure.
5374  * @param[in, out] resource
5375  *   Pointer to modify-header resource.
5376  * @parm[in, out] dev_flow
5377  *   Pointer to the dev_flow.
5378  * @param[out] error
5379  *   pointer to error structure.
5380  *
5381  * @return
5382  *   0 on success otherwise -errno and errno is set.
5383  */
5384 static int
5385 flow_dv_modify_hdr_resource_register
5386                         (struct rte_eth_dev *dev,
5387                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5388                          struct mlx5_flow *dev_flow,
5389                          struct rte_flow_error *error)
5390 {
5391         struct mlx5_priv *priv = dev->data->dev_private;
5392         struct mlx5_dev_ctx_shared *sh = priv->sh;
5393         uint32_t key_len = sizeof(*resource) -
5394                            offsetof(typeof(*resource), ft_type) +
5395                            resource->actions_num * sizeof(resource->actions[0]);
5396         struct mlx5_hlist_entry *entry;
5397         struct mlx5_flow_cb_ctx ctx = {
5398                 .error = error,
5399                 .data = resource,
5400         };
5401         uint64_t key64;
5402
5403         resource->flags = dev_flow->dv.group ? 0 :
5404                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5405         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5406                                     resource->flags))
5407                 return rte_flow_error_set(error, EOVERFLOW,
5408                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5409                                           "too many modify header items");
5410         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5411         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5412         if (!entry)
5413                 return -rte_errno;
5414         resource = container_of(entry, typeof(*resource), entry);
5415         dev_flow->handle->dvh.modify_hdr = resource;
5416         return 0;
5417 }
5418
5419 /**
5420  * Get DV flow counter by index.
5421  *
5422  * @param[in] dev
5423  *   Pointer to the Ethernet device structure.
5424  * @param[in] idx
5425  *   mlx5 flow counter index in the container.
5426  * @param[out] ppool
5427  *   mlx5 flow counter pool in the container,
5428  *
5429  * @return
5430  *   Pointer to the counter, NULL otherwise.
5431  */
5432 static struct mlx5_flow_counter *
5433 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5434                            uint32_t idx,
5435                            struct mlx5_flow_counter_pool **ppool)
5436 {
5437         struct mlx5_priv *priv = dev->data->dev_private;
5438         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5439         struct mlx5_flow_counter_pool *pool;
5440
5441         /* Decrease to original index and clear shared bit. */
5442         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5443         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5444         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5445         MLX5_ASSERT(pool);
5446         if (ppool)
5447                 *ppool = pool;
5448         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5449 }
5450
5451 /**
5452  * Check the devx counter belongs to the pool.
5453  *
5454  * @param[in] pool
5455  *   Pointer to the counter pool.
5456  * @param[in] id
5457  *   The counter devx ID.
5458  *
5459  * @return
5460  *   True if counter belongs to the pool, false otherwise.
5461  */
5462 static bool
5463 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5464 {
5465         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5466                    MLX5_COUNTERS_PER_POOL;
5467
5468         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5469                 return true;
5470         return false;
5471 }
5472
5473 /**
5474  * Get a pool by devx counter ID.
5475  *
5476  * @param[in] cmng
5477  *   Pointer to the counter management.
5478  * @param[in] id
5479  *   The counter devx ID.
5480  *
5481  * @return
5482  *   The counter pool pointer if exists, NULL otherwise,
5483  */
5484 static struct mlx5_flow_counter_pool *
5485 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5486 {
5487         uint32_t i;
5488         struct mlx5_flow_counter_pool *pool = NULL;
5489
5490         rte_spinlock_lock(&cmng->pool_update_sl);
5491         /* Check last used pool. */
5492         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5493             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5494                 pool = cmng->pools[cmng->last_pool_idx];
5495                 goto out;
5496         }
5497         /* ID out of range means no suitable pool in the container. */
5498         if (id > cmng->max_id || id < cmng->min_id)
5499                 goto out;
5500         /*
5501          * Find the pool from the end of the container, since mostly counter
5502          * ID is sequence increasing, and the last pool should be the needed
5503          * one.
5504          */
5505         i = cmng->n_valid;
5506         while (i--) {
5507                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5508
5509                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5510                         pool = pool_tmp;
5511                         break;
5512                 }
5513         }
5514 out:
5515         rte_spinlock_unlock(&cmng->pool_update_sl);
5516         return pool;
5517 }
5518
5519 /**
5520  * Resize a counter container.
5521  *
5522  * @param[in] dev
5523  *   Pointer to the Ethernet device structure.
5524  *
5525  * @return
5526  *   0 on success, otherwise negative errno value and rte_errno is set.
5527  */
5528 static int
5529 flow_dv_container_resize(struct rte_eth_dev *dev)
5530 {
5531         struct mlx5_priv *priv = dev->data->dev_private;
5532         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5533         void *old_pools = cmng->pools;
5534         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5535         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5536         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5537
5538         if (!pools) {
5539                 rte_errno = ENOMEM;
5540                 return -ENOMEM;
5541         }
5542         if (old_pools)
5543                 memcpy(pools, old_pools, cmng->n *
5544                                        sizeof(struct mlx5_flow_counter_pool *));
5545         cmng->n = resize;
5546         cmng->pools = pools;
5547         if (old_pools)
5548                 mlx5_free(old_pools);
5549         return 0;
5550 }
5551
5552 /**
5553  * Query a devx flow counter.
5554  *
5555  * @param[in] dev
5556  *   Pointer to the Ethernet device structure.
5557  * @param[in] cnt
5558  *   Index to the flow counter.
5559  * @param[out] pkts
5560  *   The statistics value of packets.
5561  * @param[out] bytes
5562  *   The statistics value of bytes.
5563  *
5564  * @return
5565  *   0 on success, otherwise a negative errno value and rte_errno is set.
5566  */
5567 static inline int
5568 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5569                      uint64_t *bytes)
5570 {
5571         struct mlx5_priv *priv = dev->data->dev_private;
5572         struct mlx5_flow_counter_pool *pool = NULL;
5573         struct mlx5_flow_counter *cnt;
5574         int offset;
5575
5576         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5577         MLX5_ASSERT(pool);
5578         if (priv->sh->cmng.counter_fallback)
5579                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5580                                         0, pkts, bytes, 0, NULL, NULL, 0);
5581         rte_spinlock_lock(&pool->sl);
5582         if (!pool->raw) {
5583                 *pkts = 0;
5584                 *bytes = 0;
5585         } else {
5586                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5587                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5588                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5589         }
5590         rte_spinlock_unlock(&pool->sl);
5591         return 0;
5592 }
5593
5594 /**
5595  * Create and initialize a new counter pool.
5596  *
5597  * @param[in] dev
5598  *   Pointer to the Ethernet device structure.
5599  * @param[out] dcs
5600  *   The devX counter handle.
5601  * @param[in] age
5602  *   Whether the pool is for counter that was allocated for aging.
5603  * @param[in/out] cont_cur
5604  *   Pointer to the container pointer, it will be update in pool resize.
5605  *
5606  * @return
5607  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5608  */
5609 static struct mlx5_flow_counter_pool *
5610 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5611                     uint32_t age)
5612 {
5613         struct mlx5_priv *priv = dev->data->dev_private;
5614         struct mlx5_flow_counter_pool *pool;
5615         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5616         bool fallback = priv->sh->cmng.counter_fallback;
5617         uint32_t size = sizeof(*pool);
5618
5619         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5620         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5621         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5622         if (!pool) {
5623                 rte_errno = ENOMEM;
5624                 return NULL;
5625         }
5626         pool->raw = NULL;
5627         pool->is_aged = !!age;
5628         pool->query_gen = 0;
5629         pool->min_dcs = dcs;
5630         rte_spinlock_init(&pool->sl);
5631         rte_spinlock_init(&pool->csl);
5632         TAILQ_INIT(&pool->counters[0]);
5633         TAILQ_INIT(&pool->counters[1]);
5634         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5635         rte_spinlock_lock(&cmng->pool_update_sl);
5636         pool->index = cmng->n_valid;
5637         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5638                 mlx5_free(pool);
5639                 rte_spinlock_unlock(&cmng->pool_update_sl);
5640                 return NULL;
5641         }
5642         cmng->pools[pool->index] = pool;
5643         cmng->n_valid++;
5644         if (unlikely(fallback)) {
5645                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5646
5647                 if (base < cmng->min_id)
5648                         cmng->min_id = base;
5649                 if (base > cmng->max_id)
5650                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5651                 cmng->last_pool_idx = pool->index;
5652         }
5653         rte_spinlock_unlock(&cmng->pool_update_sl);
5654         return pool;
5655 }
5656
5657 /**
5658  * Prepare a new counter and/or a new counter pool.
5659  *
5660  * @param[in] dev
5661  *   Pointer to the Ethernet device structure.
5662  * @param[out] cnt_free
5663  *   Where to put the pointer of a new counter.
5664  * @param[in] age
5665  *   Whether the pool is for counter that was allocated for aging.
5666  *
5667  * @return
5668  *   The counter pool pointer and @p cnt_free is set on success,
5669  *   NULL otherwise and rte_errno is set.
5670  */
5671 static struct mlx5_flow_counter_pool *
5672 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5673                              struct mlx5_flow_counter **cnt_free,
5674                              uint32_t age)
5675 {
5676         struct mlx5_priv *priv = dev->data->dev_private;
5677         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5678         struct mlx5_flow_counter_pool *pool;
5679         struct mlx5_counters tmp_tq;
5680         struct mlx5_devx_obj *dcs = NULL;
5681         struct mlx5_flow_counter *cnt;
5682         enum mlx5_counter_type cnt_type =
5683                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5684         bool fallback = priv->sh->cmng.counter_fallback;
5685         uint32_t i;
5686
5687         if (fallback) {
5688                 /* bulk_bitmap must be 0 for single counter allocation. */
5689                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5690                 if (!dcs)
5691                         return NULL;
5692                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5693                 if (!pool) {
5694                         pool = flow_dv_pool_create(dev, dcs, age);
5695                         if (!pool) {
5696                                 mlx5_devx_cmd_destroy(dcs);
5697                                 return NULL;
5698                         }
5699                 }
5700                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5701                 cnt = MLX5_POOL_GET_CNT(pool, i);
5702                 cnt->pool = pool;
5703                 cnt->dcs_when_free = dcs;
5704                 *cnt_free = cnt;
5705                 return pool;
5706         }
5707         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5708         if (!dcs) {
5709                 rte_errno = ENODATA;
5710                 return NULL;
5711         }
5712         pool = flow_dv_pool_create(dev, dcs, age);
5713         if (!pool) {
5714                 mlx5_devx_cmd_destroy(dcs);
5715                 return NULL;
5716         }
5717         TAILQ_INIT(&tmp_tq);
5718         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5719                 cnt = MLX5_POOL_GET_CNT(pool, i);
5720                 cnt->pool = pool;
5721                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5722         }
5723         rte_spinlock_lock(&cmng->csl[cnt_type]);
5724         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5725         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5726         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5727         (*cnt_free)->pool = pool;
5728         return pool;
5729 }
5730
5731 /**
5732  * Allocate a flow counter.
5733  *
5734  * @param[in] dev
5735  *   Pointer to the Ethernet device structure.
5736  * @param[in] age
5737  *   Whether the counter was allocated for aging.
5738  *
5739  * @return
5740  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5741  */
5742 static uint32_t
5743 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5744 {
5745         struct mlx5_priv *priv = dev->data->dev_private;
5746         struct mlx5_flow_counter_pool *pool = NULL;
5747         struct mlx5_flow_counter *cnt_free = NULL;
5748         bool fallback = priv->sh->cmng.counter_fallback;
5749         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5750         enum mlx5_counter_type cnt_type =
5751                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5752         uint32_t cnt_idx;
5753
5754         if (!priv->config.devx) {
5755                 rte_errno = ENOTSUP;
5756                 return 0;
5757         }
5758         /* Get free counters from container. */
5759         rte_spinlock_lock(&cmng->csl[cnt_type]);
5760         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5761         if (cnt_free)
5762                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5763         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5764         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5765                 goto err;
5766         pool = cnt_free->pool;
5767         if (fallback)
5768                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5769         /* Create a DV counter action only in the first time usage. */
5770         if (!cnt_free->action) {
5771                 uint16_t offset;
5772                 struct mlx5_devx_obj *dcs;
5773                 int ret;
5774
5775                 if (!fallback) {
5776                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5777                         dcs = pool->min_dcs;
5778                 } else {
5779                         offset = 0;
5780                         dcs = cnt_free->dcs_when_free;
5781                 }
5782                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5783                                                             &cnt_free->action);
5784                 if (ret) {
5785                         rte_errno = errno;
5786                         goto err;
5787                 }
5788         }
5789         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5790                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5791         /* Update the counter reset values. */
5792         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5793                                  &cnt_free->bytes))
5794                 goto err;
5795         if (!fallback && !priv->sh->cmng.query_thread_on)
5796                 /* Start the asynchronous batch query by the host thread. */
5797                 mlx5_set_query_alarm(priv->sh);
5798         return cnt_idx;
5799 err:
5800         if (cnt_free) {
5801                 cnt_free->pool = pool;
5802                 if (fallback)
5803                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5804                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5805                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5806                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5807         }
5808         return 0;
5809 }
5810
5811 /**
5812  * Allocate a shared flow counter.
5813  *
5814  * @param[in] ctx
5815  *   Pointer to the shared counter configuration.
5816  * @param[in] data
5817  *   Pointer to save the allocated counter index.
5818  *
5819  * @return
5820  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5821  */
5822
5823 static int32_t
5824 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5825 {
5826         struct mlx5_shared_counter_conf *conf = ctx;
5827         struct rte_eth_dev *dev = conf->dev;
5828         struct mlx5_flow_counter *cnt;
5829
5830         data->dword = flow_dv_counter_alloc(dev, 0);
5831         data->dword |= MLX5_CNT_SHARED_OFFSET;
5832         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5833         cnt->shared_info.id = conf->id;
5834         return 0;
5835 }
5836
5837 /**
5838  * Get a shared flow counter.
5839  *
5840  * @param[in] dev
5841  *   Pointer to the Ethernet device structure.
5842  * @param[in] id
5843  *   Counter identifier.
5844  *
5845  * @return
5846  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5847  */
5848 static uint32_t
5849 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5850 {
5851         struct mlx5_priv *priv = dev->data->dev_private;
5852         struct mlx5_shared_counter_conf conf = {
5853                 .dev = dev,
5854                 .id = id,
5855         };
5856         union mlx5_l3t_data data = {
5857                 .dword = 0,
5858         };
5859
5860         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5861                                flow_dv_counter_alloc_shared_cb, &conf);
5862         return data.dword;
5863 }
5864
5865 /**
5866  * Get age param from counter index.
5867  *
5868  * @param[in] dev
5869  *   Pointer to the Ethernet device structure.
5870  * @param[in] counter
5871  *   Index to the counter handler.
5872  *
5873  * @return
5874  *   The aging parameter specified for the counter index.
5875  */
5876 static struct mlx5_age_param*
5877 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5878                                 uint32_t counter)
5879 {
5880         struct mlx5_flow_counter *cnt;
5881         struct mlx5_flow_counter_pool *pool = NULL;
5882
5883         flow_dv_counter_get_by_idx(dev, counter, &pool);
5884         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5885         cnt = MLX5_POOL_GET_CNT(pool, counter);
5886         return MLX5_CNT_TO_AGE(cnt);
5887 }
5888
5889 /**
5890  * Remove a flow counter from aged counter list.
5891  *
5892  * @param[in] dev
5893  *   Pointer to the Ethernet device structure.
5894  * @param[in] counter
5895  *   Index to the counter handler.
5896  * @param[in] cnt
5897  *   Pointer to the counter handler.
5898  */
5899 static void
5900 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5901                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5902 {
5903         struct mlx5_age_info *age_info;
5904         struct mlx5_age_param *age_param;
5905         struct mlx5_priv *priv = dev->data->dev_private;
5906         uint16_t expected = AGE_CANDIDATE;
5907
5908         age_info = GET_PORT_AGE_INFO(priv);
5909         age_param = flow_dv_counter_idx_get_age(dev, counter);
5910         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5911                                          AGE_FREE, false, __ATOMIC_RELAXED,
5912                                          __ATOMIC_RELAXED)) {
5913                 /**
5914                  * We need the lock even it is age timeout,
5915                  * since counter may still in process.
5916                  */
5917                 rte_spinlock_lock(&age_info->aged_sl);
5918                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5919                 rte_spinlock_unlock(&age_info->aged_sl);
5920                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5921         }
5922 }
5923
5924 /**
5925  * Release a flow counter.
5926  *
5927  * @param[in] dev
5928  *   Pointer to the Ethernet device structure.
5929  * @param[in] counter
5930  *   Index to the counter handler.
5931  */
5932 static void
5933 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5934 {
5935         struct mlx5_priv *priv = dev->data->dev_private;
5936         struct mlx5_flow_counter_pool *pool = NULL;
5937         struct mlx5_flow_counter *cnt;
5938         enum mlx5_counter_type cnt_type;
5939
5940         if (!counter)
5941                 return;
5942         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5943         MLX5_ASSERT(pool);
5944         if (IS_SHARED_CNT(counter) &&
5945             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5946                 return;
5947         if (pool->is_aged)
5948                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5949         cnt->pool = pool;
5950         /*
5951          * Put the counter back to list to be updated in none fallback mode.
5952          * Currently, we are using two list alternately, while one is in query,
5953          * add the freed counter to the other list based on the pool query_gen
5954          * value. After query finishes, add counter the list to the global
5955          * container counter list. The list changes while query starts. In
5956          * this case, lock will not be needed as query callback and release
5957          * function both operate with the different list.
5958          *
5959          */
5960         if (!priv->sh->cmng.counter_fallback) {
5961                 rte_spinlock_lock(&pool->csl);
5962                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5963                 rte_spinlock_unlock(&pool->csl);
5964         } else {
5965                 cnt->dcs_when_free = cnt->dcs_when_active;
5966                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5967                                            MLX5_COUNTER_TYPE_ORIGIN;
5968                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5969                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5970                                   cnt, next);
5971                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5972         }
5973 }
5974
5975 /**
5976  * Resize a meter id container.
5977  *
5978  * @param[in] dev
5979  *   Pointer to the Ethernet device structure.
5980  *
5981  * @return
5982  *   0 on success, otherwise negative errno value and rte_errno is set.
5983  */
5984 static int
5985 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
5986 {
5987         struct mlx5_priv *priv = dev->data->dev_private;
5988         struct mlx5_aso_mtr_pools_mng *pools_mng =
5989                                 &priv->sh->mtrmng->pools_mng;
5990         void *old_pools = pools_mng->pools;
5991         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
5992         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
5993         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5994
5995         if (!pools) {
5996                 rte_errno = ENOMEM;
5997                 return -ENOMEM;
5998         }
5999         if (!pools_mng->n)
6000                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6001                         mlx5_free(pools);
6002                         return -ENOMEM;
6003                 }
6004         if (old_pools)
6005                 memcpy(pools, old_pools, pools_mng->n *
6006                                        sizeof(struct mlx5_aso_mtr_pool *));
6007         pools_mng->n = resize;
6008         pools_mng->pools = pools;
6009         if (old_pools)
6010                 mlx5_free(old_pools);
6011         return 0;
6012 }
6013
6014 /**
6015  * Prepare a new meter and/or a new meter pool.
6016  *
6017  * @param[in] dev
6018  *   Pointer to the Ethernet device structure.
6019  * @param[out] mtr_free
6020  *   Where to put the pointer of a new meter.g.
6021  *
6022  * @return
6023  *   The meter pool pointer and @mtr_free is set on success,
6024  *   NULL otherwise and rte_errno is set.
6025  */
6026 static struct mlx5_aso_mtr_pool *
6027 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6028                              struct mlx5_aso_mtr **mtr_free)
6029 {
6030         struct mlx5_priv *priv = dev->data->dev_private;
6031         struct mlx5_aso_mtr_pools_mng *pools_mng =
6032                                 &priv->sh->mtrmng->pools_mng;
6033         struct mlx5_aso_mtr_pool *pool = NULL;
6034         struct mlx5_devx_obj *dcs = NULL;
6035         uint32_t i;
6036         uint32_t log_obj_size;
6037
6038         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6039         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6040                         priv->sh->pdn, log_obj_size);
6041         if (!dcs) {
6042                 rte_errno = ENODATA;
6043                 return NULL;
6044         }
6045         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6046         if (!pool) {
6047                 rte_errno = ENOMEM;
6048                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6049                 return NULL;
6050         }
6051         pool->devx_obj = dcs;
6052         pool->index = pools_mng->n_valid;
6053         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6054                 mlx5_free(pool);
6055                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6056                 return NULL;
6057         }
6058         pools_mng->pools[pool->index] = pool;
6059         pools_mng->n_valid++;
6060         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6061                 pool->mtrs[i].offset = i;
6062                 LIST_INSERT_HEAD(&pools_mng->meters,
6063                                                 &pool->mtrs[i], next);
6064         }
6065         pool->mtrs[0].offset = 0;
6066         *mtr_free = &pool->mtrs[0];
6067         return pool;
6068 }
6069
6070 /**
6071  * Release a flow meter into pool.
6072  *
6073  * @param[in] dev
6074  *   Pointer to the Ethernet device structure.
6075  * @param[in] mtr_idx
6076  *   Index to aso flow meter.
6077  */
6078 static void
6079 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6080 {
6081         struct mlx5_priv *priv = dev->data->dev_private;
6082         struct mlx5_aso_mtr_pools_mng *pools_mng =
6083                                 &priv->sh->mtrmng->pools_mng;
6084         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6085
6086         MLX5_ASSERT(aso_mtr);
6087         rte_spinlock_lock(&pools_mng->mtrsl);
6088         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6089         aso_mtr->state = ASO_METER_FREE;
6090         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6091         rte_spinlock_unlock(&pools_mng->mtrsl);
6092 }
6093
6094 /**
6095  * Allocate a aso flow meter.
6096  *
6097  * @param[in] dev
6098  *   Pointer to the Ethernet device structure.
6099  *
6100  * @return
6101  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6102  */
6103 static uint32_t
6104 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6105 {
6106         struct mlx5_priv *priv = dev->data->dev_private;
6107         struct mlx5_aso_mtr *mtr_free = NULL;
6108         struct mlx5_aso_mtr_pools_mng *pools_mng =
6109                                 &priv->sh->mtrmng->pools_mng;
6110         struct mlx5_aso_mtr_pool *pool;
6111         uint32_t mtr_idx = 0;
6112
6113         if (!priv->config.devx) {
6114                 rte_errno = ENOTSUP;
6115                 return 0;
6116         }
6117         /* Allocate the flow meter memory. */
6118         /* Get free meters from management. */
6119         rte_spinlock_lock(&pools_mng->mtrsl);
6120         mtr_free = LIST_FIRST(&pools_mng->meters);
6121         if (mtr_free)
6122                 LIST_REMOVE(mtr_free, next);
6123         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6124                 rte_spinlock_unlock(&pools_mng->mtrsl);
6125                 return 0;
6126         }
6127         mtr_free->state = ASO_METER_WAIT;
6128         rte_spinlock_unlock(&pools_mng->mtrsl);
6129         pool = container_of(mtr_free,
6130                         struct mlx5_aso_mtr_pool,
6131                         mtrs[mtr_free->offset]);
6132         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6133         if (!mtr_free->fm.meter_action) {
6134 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6135                 struct rte_flow_error error;
6136                 uint8_t reg_id;
6137
6138                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6139                 mtr_free->fm.meter_action =
6140                         mlx5_glue->dv_create_flow_action_aso
6141                                                 (priv->sh->rx_domain,
6142                                                  pool->devx_obj->obj,
6143                                                  mtr_free->offset,
6144                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6145                                                  reg_id - REG_C_0);
6146 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6147                 if (!mtr_free->fm.meter_action) {
6148                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6149                         return 0;
6150                 }
6151         }
6152         return mtr_idx;
6153 }
6154
6155 /**
6156  * Verify the @p attributes will be correctly understood by the NIC and store
6157  * them in the @p flow if everything is correct.
6158  *
6159  * @param[in] dev
6160  *   Pointer to dev struct.
6161  * @param[in] attributes
6162  *   Pointer to flow attributes
6163  * @param[in] external
6164  *   This flow rule is created by request external to PMD.
6165  * @param[out] error
6166  *   Pointer to error structure.
6167  *
6168  * @return
6169  *   - 0 on success and non root table.
6170  *   - 1 on success and root table.
6171  *   - a negative errno value otherwise and rte_errno is set.
6172  */
6173 static int
6174 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6175                             const struct mlx5_flow_tunnel *tunnel,
6176                             const struct rte_flow_attr *attributes,
6177                             const struct flow_grp_info *grp_info,
6178                             struct rte_flow_error *error)
6179 {
6180         struct mlx5_priv *priv = dev->data->dev_private;
6181         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6182         int ret = 0;
6183
6184 #ifndef HAVE_MLX5DV_DR
6185         RTE_SET_USED(tunnel);
6186         RTE_SET_USED(grp_info);
6187         if (attributes->group)
6188                 return rte_flow_error_set(error, ENOTSUP,
6189                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6190                                           NULL,
6191                                           "groups are not supported");
6192 #else
6193         uint32_t table = 0;
6194
6195         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6196                                        grp_info, error);
6197         if (ret)
6198                 return ret;
6199         if (!table)
6200                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6201 #endif
6202         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6203             attributes->priority > lowest_priority)
6204                 return rte_flow_error_set(error, ENOTSUP,
6205                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6206                                           NULL,
6207                                           "priority out of range");
6208         if (attributes->transfer) {
6209                 if (!priv->config.dv_esw_en)
6210                         return rte_flow_error_set
6211                                 (error, ENOTSUP,
6212                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6213                                  "E-Switch dr is not supported");
6214                 if (!(priv->representor || priv->master))
6215                         return rte_flow_error_set
6216                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6217                                  NULL, "E-Switch configuration can only be"
6218                                  " done by a master or a representor device");
6219                 if (attributes->egress)
6220                         return rte_flow_error_set
6221                                 (error, ENOTSUP,
6222                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6223                                  "egress is not supported");
6224         }
6225         if (!(attributes->egress ^ attributes->ingress))
6226                 return rte_flow_error_set(error, ENOTSUP,
6227                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6228                                           "must specify exactly one of "
6229                                           "ingress or egress");
6230         return ret;
6231 }
6232
6233 /**
6234  * Internal validation function. For validating both actions and items.
6235  *
6236  * @param[in] dev
6237  *   Pointer to the rte_eth_dev structure.
6238  * @param[in] attr
6239  *   Pointer to the flow attributes.
6240  * @param[in] items
6241  *   Pointer to the list of items.
6242  * @param[in] actions
6243  *   Pointer to the list of actions.
6244  * @param[in] external
6245  *   This flow rule is created by request external to PMD.
6246  * @param[in] hairpin
6247  *   Number of hairpin TX actions, 0 means classic flow.
6248  * @param[out] error
6249  *   Pointer to the error structure.
6250  *
6251  * @return
6252  *   0 on success, a negative errno value otherwise and rte_errno is set.
6253  */
6254 static int
6255 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6256                  const struct rte_flow_item items[],
6257                  const struct rte_flow_action actions[],
6258                  bool external, int hairpin, struct rte_flow_error *error)
6259 {
6260         int ret;
6261         uint64_t action_flags = 0;
6262         uint64_t item_flags = 0;
6263         uint64_t last_item = 0;
6264         uint8_t next_protocol = 0xff;
6265         uint16_t ether_type = 0;
6266         int actions_n = 0;
6267         uint8_t item_ipv6_proto = 0;
6268         int fdb_mirror_limit = 0;
6269         int modify_after_mirror = 0;
6270         const struct rte_flow_item *geneve_item = NULL;
6271         const struct rte_flow_item *gre_item = NULL;
6272         const struct rte_flow_item *gtp_item = NULL;
6273         const struct rte_flow_action_raw_decap *decap;
6274         const struct rte_flow_action_raw_encap *encap;
6275         const struct rte_flow_action_rss *rss = NULL;
6276         const struct rte_flow_action_rss *sample_rss = NULL;
6277         const struct rte_flow_action_count *count = NULL;
6278         const struct rte_flow_action_count *sample_count = NULL;
6279         const struct rte_flow_item_tcp nic_tcp_mask = {
6280                 .hdr = {
6281                         .tcp_flags = 0xFF,
6282                         .src_port = RTE_BE16(UINT16_MAX),
6283                         .dst_port = RTE_BE16(UINT16_MAX),
6284                 }
6285         };
6286         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6287                 .hdr = {
6288                         .src_addr =
6289                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6290                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6291                         .dst_addr =
6292                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6293                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6294                         .vtc_flow = RTE_BE32(0xffffffff),
6295                         .proto = 0xff,
6296                         .hop_limits = 0xff,
6297                 },
6298                 .has_frag_ext = 1,
6299         };
6300         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6301                 .hdr = {
6302                         .common = {
6303                                 .u32 =
6304                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6305                                         .type = 0xFF,
6306                                         }).u32),
6307                         },
6308                         .dummy[0] = 0xffffffff,
6309                 },
6310         };
6311         struct mlx5_priv *priv = dev->data->dev_private;
6312         struct mlx5_dev_config *dev_conf = &priv->config;
6313         uint16_t queue_index = 0xFFFF;
6314         const struct rte_flow_item_vlan *vlan_m = NULL;
6315         uint32_t rw_act_num = 0;
6316         uint64_t is_root;
6317         const struct mlx5_flow_tunnel *tunnel;
6318         struct flow_grp_info grp_info = {
6319                 .external = !!external,
6320                 .transfer = !!attr->transfer,
6321                 .fdb_def_rule = !!priv->fdb_def_rule,
6322         };
6323         const struct rte_eth_hairpin_conf *conf;
6324         bool def_policy = false;
6325
6326         if (items == NULL)
6327                 return -1;
6328         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
6329                 tunnel = flow_items_to_tunnel(items);
6330                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6331                                 MLX5_FLOW_ACTION_DECAP;
6332         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
6333                 tunnel = flow_actions_to_tunnel(actions);
6334                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6335         } else {
6336                 tunnel = NULL;
6337         }
6338         if (tunnel && priv->representor)
6339                 return rte_flow_error_set(error, ENOTSUP,
6340                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6341                                           "decap not supported "
6342                                           "for VF representor");
6343         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6344                                 (dev, tunnel, attr, items, actions);
6345         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6346         if (ret < 0)
6347                 return ret;
6348         is_root = (uint64_t)ret;
6349         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6350                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6351                 int type = items->type;
6352
6353                 if (!mlx5_flow_os_item_supported(type))
6354                         return rte_flow_error_set(error, ENOTSUP,
6355                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6356                                                   NULL, "item not supported");
6357                 switch (type) {
6358                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6359                         if (items[0].type != (typeof(items[0].type))
6360                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
6361                                 return rte_flow_error_set
6362                                                 (error, EINVAL,
6363                                                 RTE_FLOW_ERROR_TYPE_ITEM,
6364                                                 NULL, "MLX5 private items "
6365                                                 "must be the first");
6366                         break;
6367                 case RTE_FLOW_ITEM_TYPE_VOID:
6368                         break;
6369                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6370                         ret = flow_dv_validate_item_port_id
6371                                         (dev, items, attr, item_flags, error);
6372                         if (ret < 0)
6373                                 return ret;
6374                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6375                         break;
6376                 case RTE_FLOW_ITEM_TYPE_ETH:
6377                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6378                                                           true, error);
6379                         if (ret < 0)
6380                                 return ret;
6381                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6382                                              MLX5_FLOW_LAYER_OUTER_L2;
6383                         if (items->mask != NULL && items->spec != NULL) {
6384                                 ether_type =
6385                                         ((const struct rte_flow_item_eth *)
6386                                          items->spec)->type;
6387                                 ether_type &=
6388                                         ((const struct rte_flow_item_eth *)
6389                                          items->mask)->type;
6390                                 ether_type = rte_be_to_cpu_16(ether_type);
6391                         } else {
6392                                 ether_type = 0;
6393                         }
6394                         break;
6395                 case RTE_FLOW_ITEM_TYPE_VLAN:
6396                         ret = flow_dv_validate_item_vlan(items, item_flags,
6397                                                          dev, error);
6398                         if (ret < 0)
6399                                 return ret;
6400                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6401                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6402                         if (items->mask != NULL && items->spec != NULL) {
6403                                 ether_type =
6404                                         ((const struct rte_flow_item_vlan *)
6405                                          items->spec)->inner_type;
6406                                 ether_type &=
6407                                         ((const struct rte_flow_item_vlan *)
6408                                          items->mask)->inner_type;
6409                                 ether_type = rte_be_to_cpu_16(ether_type);
6410                         } else {
6411                                 ether_type = 0;
6412                         }
6413                         /* Store outer VLAN mask for of_push_vlan action. */
6414                         if (!tunnel)
6415                                 vlan_m = items->mask;
6416                         break;
6417                 case RTE_FLOW_ITEM_TYPE_IPV4:
6418                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6419                                                   &item_flags, &tunnel);
6420                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6421                                                          last_item, ether_type,
6422                                                          error);
6423                         if (ret < 0)
6424                                 return ret;
6425                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6426                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6427                         if (items->mask != NULL &&
6428                             ((const struct rte_flow_item_ipv4 *)
6429                              items->mask)->hdr.next_proto_id) {
6430                                 next_protocol =
6431                                         ((const struct rte_flow_item_ipv4 *)
6432                                          (items->spec))->hdr.next_proto_id;
6433                                 next_protocol &=
6434                                         ((const struct rte_flow_item_ipv4 *)
6435                                          (items->mask))->hdr.next_proto_id;
6436                         } else {
6437                                 /* Reset for inner layer. */
6438                                 next_protocol = 0xff;
6439                         }
6440                         break;
6441                 case RTE_FLOW_ITEM_TYPE_IPV6:
6442                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6443                                                   &item_flags, &tunnel);
6444                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6445                                                            last_item,
6446                                                            ether_type,
6447                                                            &nic_ipv6_mask,
6448                                                            error);
6449                         if (ret < 0)
6450                                 return ret;
6451                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6452                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6453                         if (items->mask != NULL &&
6454                             ((const struct rte_flow_item_ipv6 *)
6455                              items->mask)->hdr.proto) {
6456                                 item_ipv6_proto =
6457                                         ((const struct rte_flow_item_ipv6 *)
6458                                          items->spec)->hdr.proto;
6459                                 next_protocol =
6460                                         ((const struct rte_flow_item_ipv6 *)
6461                                          items->spec)->hdr.proto;
6462                                 next_protocol &=
6463                                         ((const struct rte_flow_item_ipv6 *)
6464                                          items->mask)->hdr.proto;
6465                         } else {
6466                                 /* Reset for inner layer. */
6467                                 next_protocol = 0xff;
6468                         }
6469                         break;
6470                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6471                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6472                                                                   item_flags,
6473                                                                   error);
6474                         if (ret < 0)
6475                                 return ret;
6476                         last_item = tunnel ?
6477                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6478                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6479                         if (items->mask != NULL &&
6480                             ((const struct rte_flow_item_ipv6_frag_ext *)
6481                              items->mask)->hdr.next_header) {
6482                                 next_protocol =
6483                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6484                                  items->spec)->hdr.next_header;
6485                                 next_protocol &=
6486                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6487                                  items->mask)->hdr.next_header;
6488                         } else {
6489                                 /* Reset for inner layer. */
6490                                 next_protocol = 0xff;
6491                         }
6492                         break;
6493                 case RTE_FLOW_ITEM_TYPE_TCP:
6494                         ret = mlx5_flow_validate_item_tcp
6495                                                 (items, item_flags,
6496                                                  next_protocol,
6497                                                  &nic_tcp_mask,
6498                                                  error);
6499                         if (ret < 0)
6500                                 return ret;
6501                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6502                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6503                         break;
6504                 case RTE_FLOW_ITEM_TYPE_UDP:
6505                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6506                                                           next_protocol,
6507                                                           error);
6508                         if (ret < 0)
6509                                 return ret;
6510                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6511                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6512                         break;
6513                 case RTE_FLOW_ITEM_TYPE_GRE:
6514                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6515                                                           next_protocol, error);
6516                         if (ret < 0)
6517                                 return ret;
6518                         gre_item = items;
6519                         last_item = MLX5_FLOW_LAYER_GRE;
6520                         break;
6521                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6522                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6523                                                             next_protocol,
6524                                                             error);
6525                         if (ret < 0)
6526                                 return ret;
6527                         last_item = MLX5_FLOW_LAYER_NVGRE;
6528                         break;
6529                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6530                         ret = mlx5_flow_validate_item_gre_key
6531                                 (items, item_flags, gre_item, error);
6532                         if (ret < 0)
6533                                 return ret;
6534                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6535                         break;
6536                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6537                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6538                                                             error);
6539                         if (ret < 0)
6540                                 return ret;
6541                         last_item = MLX5_FLOW_LAYER_VXLAN;
6542                         break;
6543                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6544                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6545                                                                 item_flags, dev,
6546                                                                 error);
6547                         if (ret < 0)
6548                                 return ret;
6549                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6550                         break;
6551                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6552                         ret = mlx5_flow_validate_item_geneve(items,
6553                                                              item_flags, dev,
6554                                                              error);
6555                         if (ret < 0)
6556                                 return ret;
6557                         geneve_item = items;
6558                         last_item = MLX5_FLOW_LAYER_GENEVE;
6559                         break;
6560                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6561                         ret = mlx5_flow_validate_item_geneve_opt(items,
6562                                                                  last_item,
6563                                                                  geneve_item,
6564                                                                  dev,
6565                                                                  error);
6566                         if (ret < 0)
6567                                 return ret;
6568                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6569                         break;
6570                 case RTE_FLOW_ITEM_TYPE_MPLS:
6571                         ret = mlx5_flow_validate_item_mpls(dev, items,
6572                                                            item_flags,
6573                                                            last_item, error);
6574                         if (ret < 0)
6575                                 return ret;
6576                         last_item = MLX5_FLOW_LAYER_MPLS;
6577                         break;
6578
6579                 case RTE_FLOW_ITEM_TYPE_MARK:
6580                         ret = flow_dv_validate_item_mark(dev, items, attr,
6581                                                          error);
6582                         if (ret < 0)
6583                                 return ret;
6584                         last_item = MLX5_FLOW_ITEM_MARK;
6585                         break;
6586                 case RTE_FLOW_ITEM_TYPE_META:
6587                         ret = flow_dv_validate_item_meta(dev, items, attr,
6588                                                          error);
6589                         if (ret < 0)
6590                                 return ret;
6591                         last_item = MLX5_FLOW_ITEM_METADATA;
6592                         break;
6593                 case RTE_FLOW_ITEM_TYPE_ICMP:
6594                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6595                                                            next_protocol,
6596                                                            error);
6597                         if (ret < 0)
6598                                 return ret;
6599                         last_item = MLX5_FLOW_LAYER_ICMP;
6600                         break;
6601                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6602                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6603                                                             next_protocol,
6604                                                             error);
6605                         if (ret < 0)
6606                                 return ret;
6607                         item_ipv6_proto = IPPROTO_ICMPV6;
6608                         last_item = MLX5_FLOW_LAYER_ICMP6;
6609                         break;
6610                 case RTE_FLOW_ITEM_TYPE_TAG:
6611                         ret = flow_dv_validate_item_tag(dev, items,
6612                                                         attr, error);
6613                         if (ret < 0)
6614                                 return ret;
6615                         last_item = MLX5_FLOW_ITEM_TAG;
6616                         break;
6617                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6618                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6619                         break;
6620                 case RTE_FLOW_ITEM_TYPE_GTP:
6621                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6622                                                         error);
6623                         if (ret < 0)
6624                                 return ret;
6625                         gtp_item = items;
6626                         last_item = MLX5_FLOW_LAYER_GTP;
6627                         break;
6628                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6629                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6630                                                             gtp_item, attr,
6631                                                             error);
6632                         if (ret < 0)
6633                                 return ret;
6634                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6635                         break;
6636                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6637                         /* Capacity will be checked in the translate stage. */
6638                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6639                                                             last_item,
6640                                                             ether_type,
6641                                                             &nic_ecpri_mask,
6642                                                             error);
6643                         if (ret < 0)
6644                                 return ret;
6645                         last_item = MLX5_FLOW_LAYER_ECPRI;
6646                         break;
6647                 default:
6648                         return rte_flow_error_set(error, ENOTSUP,
6649                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6650                                                   NULL, "item not supported");
6651                 }
6652                 item_flags |= last_item;
6653         }
6654         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6655                 int type = actions->type;
6656
6657                 if (!mlx5_flow_os_action_supported(type))
6658                         return rte_flow_error_set(error, ENOTSUP,
6659                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6660                                                   actions,
6661                                                   "action not supported");
6662                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
6663                         return rte_flow_error_set(error, ENOTSUP,
6664                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6665                                                   actions, "too many actions");
6666                 if (action_flags &
6667                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
6668                         return rte_flow_error_set(error, ENOTSUP,
6669                                 RTE_FLOW_ERROR_TYPE_ACTION,
6670                                 NULL, "meter action with policy "
6671                                 "must be the last action");
6672                 switch (type) {
6673                 case RTE_FLOW_ACTION_TYPE_VOID:
6674                         break;
6675                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
6676                         ret = flow_dv_validate_action_port_id(dev,
6677                                                               action_flags,
6678                                                               actions,
6679                                                               attr,
6680                                                               error);
6681                         if (ret)
6682                                 return ret;
6683                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6684                         ++actions_n;
6685                         break;
6686                 case RTE_FLOW_ACTION_TYPE_FLAG:
6687                         ret = flow_dv_validate_action_flag(dev, action_flags,
6688                                                            attr, error);
6689                         if (ret < 0)
6690                                 return ret;
6691                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6692                                 /* Count all modify-header actions as one. */
6693                                 if (!(action_flags &
6694                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6695                                         ++actions_n;
6696                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
6697                                                 MLX5_FLOW_ACTION_MARK_EXT;
6698                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6699                                         modify_after_mirror = 1;
6700
6701                         } else {
6702                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
6703                                 ++actions_n;
6704                         }
6705                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6706                         break;
6707                 case RTE_FLOW_ACTION_TYPE_MARK:
6708                         ret = flow_dv_validate_action_mark(dev, actions,
6709                                                            action_flags,
6710                                                            attr, error);
6711                         if (ret < 0)
6712                                 return ret;
6713                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6714                                 /* Count all modify-header actions as one. */
6715                                 if (!(action_flags &
6716                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
6717                                         ++actions_n;
6718                                 action_flags |= MLX5_FLOW_ACTION_MARK |
6719                                                 MLX5_FLOW_ACTION_MARK_EXT;
6720                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6721                                         modify_after_mirror = 1;
6722                         } else {
6723                                 action_flags |= MLX5_FLOW_ACTION_MARK;
6724                                 ++actions_n;
6725                         }
6726                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
6727                         break;
6728                 case RTE_FLOW_ACTION_TYPE_SET_META:
6729                         ret = flow_dv_validate_action_set_meta(dev, actions,
6730                                                                action_flags,
6731                                                                attr, error);
6732                         if (ret < 0)
6733                                 return ret;
6734                         /* Count all modify-header actions as one action. */
6735                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6736                                 ++actions_n;
6737                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6738                                 modify_after_mirror = 1;
6739                         action_flags |= MLX5_FLOW_ACTION_SET_META;
6740                         rw_act_num += MLX5_ACT_NUM_SET_META;
6741                         break;
6742                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
6743                         ret = flow_dv_validate_action_set_tag(dev, actions,
6744                                                               action_flags,
6745                                                               attr, error);
6746                         if (ret < 0)
6747                                 return ret;
6748                         /* Count all modify-header actions as one action. */
6749                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6750                                 ++actions_n;
6751                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6752                                 modify_after_mirror = 1;
6753                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6754                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6755                         break;
6756                 case RTE_FLOW_ACTION_TYPE_DROP:
6757                         ret = mlx5_flow_validate_action_drop(action_flags,
6758                                                              attr, error);
6759                         if (ret < 0)
6760                                 return ret;
6761                         action_flags |= MLX5_FLOW_ACTION_DROP;
6762                         ++actions_n;
6763                         break;
6764                 case RTE_FLOW_ACTION_TYPE_QUEUE:
6765                         ret = mlx5_flow_validate_action_queue(actions,
6766                                                               action_flags, dev,
6767                                                               attr, error);
6768                         if (ret < 0)
6769                                 return ret;
6770                         queue_index = ((const struct rte_flow_action_queue *)
6771                                                         (actions->conf))->index;
6772                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
6773                         ++actions_n;
6774                         break;
6775                 case RTE_FLOW_ACTION_TYPE_RSS:
6776                         rss = actions->conf;
6777                         ret = mlx5_flow_validate_action_rss(actions,
6778                                                             action_flags, dev,
6779                                                             attr, item_flags,
6780                                                             error);
6781                         if (ret < 0)
6782                                 return ret;
6783                         if (rss && sample_rss &&
6784                             (sample_rss->level != rss->level ||
6785                             sample_rss->types != rss->types))
6786                                 return rte_flow_error_set(error, ENOTSUP,
6787                                         RTE_FLOW_ERROR_TYPE_ACTION,
6788                                         NULL,
6789                                         "Can't use the different RSS types "
6790                                         "or level in the same flow");
6791                         if (rss != NULL && rss->queue_num)
6792                                 queue_index = rss->queue[0];
6793                         action_flags |= MLX5_FLOW_ACTION_RSS;
6794                         ++actions_n;
6795                         break;
6796                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6797                         ret =
6798                         mlx5_flow_validate_action_default_miss(action_flags,
6799                                         attr, error);
6800                         if (ret < 0)
6801                                 return ret;
6802                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6803                         ++actions_n;
6804                         break;
6805                 case RTE_FLOW_ACTION_TYPE_COUNT:
6806                         ret = flow_dv_validate_action_count(dev, actions,
6807                                                             action_flags,
6808                                                             error);
6809                         if (ret < 0)
6810                                 return ret;
6811                         count = actions->conf;
6812                         action_flags |= MLX5_FLOW_ACTION_COUNT;
6813                         ++actions_n;
6814                         break;
6815                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6816                         if (flow_dv_validate_action_pop_vlan(dev,
6817                                                              action_flags,
6818                                                              actions,
6819                                                              item_flags, attr,
6820                                                              error))
6821                                 return -rte_errno;
6822                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6823                                 modify_after_mirror = 1;
6824                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6825                         ++actions_n;
6826                         break;
6827                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6828                         ret = flow_dv_validate_action_push_vlan(dev,
6829                                                                 action_flags,
6830                                                                 vlan_m,
6831                                                                 actions, attr,
6832                                                                 error);
6833                         if (ret < 0)
6834                                 return ret;
6835                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6836                                 modify_after_mirror = 1;
6837                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6838                         ++actions_n;
6839                         break;
6840                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
6841                         ret = flow_dv_validate_action_set_vlan_pcp
6842                                                 (action_flags, actions, error);
6843                         if (ret < 0)
6844                                 return ret;
6845                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6846                                 modify_after_mirror = 1;
6847                         /* Count PCP with push_vlan command. */
6848                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
6849                         break;
6850                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6851                         ret = flow_dv_validate_action_set_vlan_vid
6852                                                 (item_flags, action_flags,
6853                                                  actions, error);
6854                         if (ret < 0)
6855                                 return ret;
6856                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6857                                 modify_after_mirror = 1;
6858                         /* Count VID with push_vlan command. */
6859                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6860                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
6861                         break;
6862                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6863                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6864                         ret = flow_dv_validate_action_l2_encap(dev,
6865                                                                action_flags,
6866                                                                actions, attr,
6867                                                                error);
6868                         if (ret < 0)
6869                                 return ret;
6870                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
6871                         ++actions_n;
6872                         break;
6873                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6874                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6875                         ret = flow_dv_validate_action_decap(dev, action_flags,
6876                                                             actions, item_flags,
6877                                                             attr, error);
6878                         if (ret < 0)
6879                                 return ret;
6880                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6881                                 modify_after_mirror = 1;
6882                         action_flags |= MLX5_FLOW_ACTION_DECAP;
6883                         ++actions_n;
6884                         break;
6885                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6886                         ret = flow_dv_validate_action_raw_encap_decap
6887                                 (dev, NULL, actions->conf, attr, &action_flags,
6888                                  &actions_n, actions, item_flags, error);
6889                         if (ret < 0)
6890                                 return ret;
6891                         break;
6892                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6893                         decap = actions->conf;
6894                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
6895                                 ;
6896                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6897                                 encap = NULL;
6898                                 actions--;
6899                         } else {
6900                                 encap = actions->conf;
6901                         }
6902                         ret = flow_dv_validate_action_raw_encap_decap
6903                                            (dev,
6904                                             decap ? decap : &empty_decap, encap,
6905                                             attr, &action_flags, &actions_n,
6906                                             actions, item_flags, error);
6907                         if (ret < 0)
6908                                 return ret;
6909                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
6910                             (action_flags & MLX5_FLOW_ACTION_DECAP))
6911                                 modify_after_mirror = 1;
6912                         break;
6913                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
6914                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
6915                         ret = flow_dv_validate_action_modify_mac(action_flags,
6916                                                                  actions,
6917                                                                  item_flags,
6918                                                                  error);
6919                         if (ret < 0)
6920                                 return ret;
6921                         /* Count all modify-header actions as one action. */
6922                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6923                                 ++actions_n;
6924                         action_flags |= actions->type ==
6925                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
6926                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
6927                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
6928                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6929                                 modify_after_mirror = 1;
6930                         /*
6931                          * Even if the source and destination MAC addresses have
6932                          * overlap in the header with 4B alignment, the convert
6933                          * function will handle them separately and 4 SW actions
6934                          * will be created. And 2 actions will be added each
6935                          * time no matter how many bytes of address will be set.
6936                          */
6937                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
6938                         break;
6939                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
6940                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
6941                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
6942                                                                   actions,
6943                                                                   item_flags,
6944                                                                   error);
6945                         if (ret < 0)
6946                                 return ret;
6947                         /* Count all modify-header actions as one action. */
6948                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6949                                 ++actions_n;
6950                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6951                                 modify_after_mirror = 1;
6952                         action_flags |= actions->type ==
6953                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
6954                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
6955                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
6956                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
6957                         break;
6958                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
6959                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
6960                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
6961                                                                   actions,
6962                                                                   item_flags,
6963                                                                   error);
6964                         if (ret < 0)
6965                                 return ret;
6966                         if (item_ipv6_proto == IPPROTO_ICMPV6)
6967                                 return rte_flow_error_set(error, ENOTSUP,
6968                                         RTE_FLOW_ERROR_TYPE_ACTION,
6969                                         actions,
6970                                         "Can't change header "
6971                                         "with ICMPv6 proto");
6972                         /* Count all modify-header actions as one action. */
6973                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6974                                 ++actions_n;
6975                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6976                                 modify_after_mirror = 1;
6977                         action_flags |= actions->type ==
6978                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6979                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
6980                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
6981                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6982                         break;
6983                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6984                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6985                         ret = flow_dv_validate_action_modify_tp(action_flags,
6986                                                                 actions,
6987                                                                 item_flags,
6988                                                                 error);
6989                         if (ret < 0)
6990                                 return ret;
6991                         /* Count all modify-header actions as one action. */
6992                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6993                                 ++actions_n;
6994                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
6995                                 modify_after_mirror = 1;
6996                         action_flags |= actions->type ==
6997                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6998                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
6999                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7000                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7001                         break;
7002                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7003                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7004                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7005                                                                  actions,
7006                                                                  item_flags,
7007                                                                  error);
7008                         if (ret < 0)
7009                                 return ret;
7010                         /* Count all modify-header actions as one action. */
7011                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7012                                 ++actions_n;
7013                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7014                                 modify_after_mirror = 1;
7015                         action_flags |= actions->type ==
7016                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7017                                                 MLX5_FLOW_ACTION_SET_TTL :
7018                                                 MLX5_FLOW_ACTION_DEC_TTL;
7019                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7020                         break;
7021                 case RTE_FLOW_ACTION_TYPE_JUMP:
7022                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7023                                                            action_flags,
7024                                                            attr, external,
7025                                                            error);
7026                         if (ret)
7027                                 return ret;
7028                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7029                             fdb_mirror_limit)
7030                                 return rte_flow_error_set(error, EINVAL,
7031                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7032                                                   NULL,
7033                                                   "sample and jump action combination is not supported");
7034                         ++actions_n;
7035                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7036                         break;
7037                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7038                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7039                         ret = flow_dv_validate_action_modify_tcp_seq
7040                                                                 (action_flags,
7041                                                                  actions,
7042                                                                  item_flags,
7043                                                                  error);
7044                         if (ret < 0)
7045                                 return ret;
7046                         /* Count all modify-header actions as one action. */
7047                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7048                                 ++actions_n;
7049                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7050                                 modify_after_mirror = 1;
7051                         action_flags |= actions->type ==
7052                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7053                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7054                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7055                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7056                         break;
7057                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7058                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7059                         ret = flow_dv_validate_action_modify_tcp_ack
7060                                                                 (action_flags,
7061                                                                  actions,
7062                                                                  item_flags,
7063                                                                  error);
7064                         if (ret < 0)
7065                                 return ret;
7066                         /* Count all modify-header actions as one action. */
7067                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7068                                 ++actions_n;
7069                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7070                                 modify_after_mirror = 1;
7071                         action_flags |= actions->type ==
7072                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7073                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7074                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7075                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7076                         break;
7077                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7078                         break;
7079                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7080                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7081                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7082                         break;
7083                 case RTE_FLOW_ACTION_TYPE_METER:
7084                         ret = mlx5_flow_validate_action_meter(dev,
7085                                                               action_flags,
7086                                                               actions, attr,
7087                                                               &def_policy,
7088                                                               error);
7089                         if (ret < 0)
7090                                 return ret;
7091                         action_flags |= MLX5_FLOW_ACTION_METER;
7092                         if (!def_policy)
7093                                 action_flags |=
7094                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7095                         ++actions_n;
7096                         /* Meter action will add one more TAG action. */
7097                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7098                         break;
7099                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7100                         if (!attr->transfer && !attr->group)
7101                                 return rte_flow_error_set(error, ENOTSUP,
7102                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7103                                                                            NULL,
7104                           "Shared ASO age action is not supported for group 0");
7105                         action_flags |= MLX5_FLOW_ACTION_AGE;
7106                         ++actions_n;
7107                         break;
7108                 case RTE_FLOW_ACTION_TYPE_AGE:
7109                         ret = flow_dv_validate_action_age(action_flags,
7110                                                           actions, dev,
7111                                                           error);
7112                         if (ret < 0)
7113                                 return ret;
7114                         /*
7115                          * Validate the regular AGE action (using counter)
7116                          * mutual exclusion with share counter actions.
7117                          */
7118                         if (!priv->sh->flow_hit_aso_en) {
7119                                 if (count && count->shared)
7120                                         return rte_flow_error_set
7121                                                 (error, EINVAL,
7122                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7123                                                 NULL,
7124                                                 "old age and shared count combination is not supported");
7125                                 if (sample_count)
7126                                         return rte_flow_error_set
7127                                                 (error, EINVAL,
7128                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7129                                                 NULL,
7130                                                 "old age action and count must be in the same sub flow");
7131                         }
7132                         action_flags |= MLX5_FLOW_ACTION_AGE;
7133                         ++actions_n;
7134                         break;
7135                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7136                         ret = flow_dv_validate_action_modify_ipv4_dscp
7137                                                          (action_flags,
7138                                                           actions,
7139                                                           item_flags,
7140                                                           error);
7141                         if (ret < 0)
7142                                 return ret;
7143                         /* Count all modify-header actions as one action. */
7144                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7145                                 ++actions_n;
7146                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7147                                 modify_after_mirror = 1;
7148                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7149                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7150                         break;
7151                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7152                         ret = flow_dv_validate_action_modify_ipv6_dscp
7153                                                                 (action_flags,
7154                                                                  actions,
7155                                                                  item_flags,
7156                                                                  error);
7157                         if (ret < 0)
7158                                 return ret;
7159                         /* Count all modify-header actions as one action. */
7160                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7161                                 ++actions_n;
7162                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7163                                 modify_after_mirror = 1;
7164                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7165                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7166                         break;
7167                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7168                         ret = flow_dv_validate_action_sample(&action_flags,
7169                                                              actions, dev,
7170                                                              attr, item_flags,
7171                                                              rss, &sample_rss,
7172                                                              &sample_count,
7173                                                              &fdb_mirror_limit,
7174                                                              error);
7175                         if (ret < 0)
7176                                 return ret;
7177                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7178                         ++actions_n;
7179                         break;
7180                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7181                         if (actions[0].type != (typeof(actions[0].type))
7182                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
7183                                 return rte_flow_error_set
7184                                                 (error, EINVAL,
7185                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7186                                                 NULL, "MLX5 private action "
7187                                                 "must be the first");
7188
7189                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
7190                         break;
7191                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7192                         ret = flow_dv_validate_action_modify_field(dev,
7193                                                                    action_flags,
7194                                                                    actions,
7195                                                                    attr,
7196                                                                    error);
7197                         if (ret < 0)
7198                                 return ret;
7199                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7200                                 modify_after_mirror = 1;
7201                         /* Count all modify-header actions as one action. */
7202                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
7203                                 ++actions_n;
7204                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7205                         rw_act_num += ret;
7206                         break;
7207                 default:
7208                         return rte_flow_error_set(error, ENOTSUP,
7209                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7210                                                   actions,
7211                                                   "action not supported");
7212                 }
7213         }
7214         /*
7215          * Validate actions in flow rules
7216          * - Explicit decap action is prohibited by the tunnel offload API.
7217          * - Drop action in tunnel steer rule is prohibited by the API.
7218          * - Application cannot use MARK action because it's value can mask
7219          *   tunnel default miss nitification.
7220          * - JUMP in tunnel match rule has no support in current PMD
7221          *   implementation.
7222          * - TAG & META are reserved for future uses.
7223          */
7224         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7225                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7226                                             MLX5_FLOW_ACTION_MARK     |
7227                                             MLX5_FLOW_ACTION_SET_TAG  |
7228                                             MLX5_FLOW_ACTION_SET_META |
7229                                             MLX5_FLOW_ACTION_DROP;
7230
7231                 if (action_flags & bad_actions_mask)
7232                         return rte_flow_error_set
7233                                         (error, EINVAL,
7234                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7235                                         "Invalid RTE action in tunnel "
7236                                         "set decap rule");
7237                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7238                         return rte_flow_error_set
7239                                         (error, EINVAL,
7240                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7241                                         "tunnel set decap rule must terminate "
7242                                         "with JUMP");
7243                 if (!attr->ingress)
7244                         return rte_flow_error_set
7245                                         (error, EINVAL,
7246                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7247                                         "tunnel flows for ingress traffic only");
7248         }
7249         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7250                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7251                                             MLX5_FLOW_ACTION_MARK    |
7252                                             MLX5_FLOW_ACTION_SET_TAG |
7253                                             MLX5_FLOW_ACTION_SET_META;
7254
7255                 if (action_flags & bad_actions_mask)
7256                         return rte_flow_error_set
7257                                         (error, EINVAL,
7258                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7259                                         "Invalid RTE action in tunnel "
7260                                         "set match rule");
7261         }
7262         /*
7263          * Validate the drop action mutual exclusion with other actions.
7264          * Drop action is mutually-exclusive with any other action, except for
7265          * Count action.
7266          * Drop action compatibility with tunnel offload was already validated.
7267          */
7268         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7269                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7270         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7271             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7272                 return rte_flow_error_set(error, EINVAL,
7273                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7274                                           "Drop action is mutually-exclusive "
7275                                           "with any other action, except for "
7276                                           "Count action");
7277         /* Eswitch has few restrictions on using items and actions */
7278         if (attr->transfer) {
7279                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7280                     action_flags & MLX5_FLOW_ACTION_FLAG)
7281                         return rte_flow_error_set(error, ENOTSUP,
7282                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7283                                                   NULL,
7284                                                   "unsupported action FLAG");
7285                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7286                     action_flags & MLX5_FLOW_ACTION_MARK)
7287                         return rte_flow_error_set(error, ENOTSUP,
7288                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7289                                                   NULL,
7290                                                   "unsupported action MARK");
7291                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7292                         return rte_flow_error_set(error, ENOTSUP,
7293                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7294                                                   NULL,
7295                                                   "unsupported action QUEUE");
7296                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7297                         return rte_flow_error_set(error, ENOTSUP,
7298                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7299                                                   NULL,
7300                                                   "unsupported action RSS");
7301                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7302                         return rte_flow_error_set(error, EINVAL,
7303                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7304                                                   actions,
7305                                                   "no fate action is found");
7306         } else {
7307                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7308                         return rte_flow_error_set(error, EINVAL,
7309                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7310                                                   actions,
7311                                                   "no fate action is found");
7312         }
7313         /*
7314          * Continue validation for Xcap and VLAN actions.
7315          * If hairpin is working in explicit TX rule mode, there is no actions
7316          * splitting and the validation of hairpin ingress flow should be the
7317          * same as other standard flows.
7318          */
7319         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7320                              MLX5_FLOW_VLAN_ACTIONS)) &&
7321             (queue_index == 0xFFFF ||
7322              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7323              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7324              conf->tx_explicit != 0))) {
7325                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7326                     MLX5_FLOW_XCAP_ACTIONS)
7327                         return rte_flow_error_set(error, ENOTSUP,
7328                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7329                                                   NULL, "encap and decap "
7330                                                   "combination aren't supported");
7331                 if (!attr->transfer && attr->ingress) {
7332                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7333                                 return rte_flow_error_set
7334                                                 (error, ENOTSUP,
7335                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7336                                                  NULL, "encap is not supported"
7337                                                  " for ingress traffic");
7338                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7339                                 return rte_flow_error_set
7340                                                 (error, ENOTSUP,
7341                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7342                                                  NULL, "push VLAN action not "
7343                                                  "supported for ingress");
7344                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7345                                         MLX5_FLOW_VLAN_ACTIONS)
7346                                 return rte_flow_error_set
7347                                                 (error, ENOTSUP,
7348                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7349                                                  NULL, "no support for "
7350                                                  "multiple VLAN actions");
7351                 }
7352         }
7353         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7354                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7355                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7356                         attr->ingress)
7357                         return rte_flow_error_set
7358                                 (error, ENOTSUP,
7359                                 RTE_FLOW_ERROR_TYPE_ACTION,
7360                                 NULL, "fate action not supported for "
7361                                 "meter with policy");
7362                 if (attr->egress) {
7363                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7364                                 return rte_flow_error_set
7365                                         (error, ENOTSUP,
7366                                         RTE_FLOW_ERROR_TYPE_ACTION,
7367                                         NULL, "modify header action in egress "
7368                                         "cannot be done before meter action");
7369                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7370                                 return rte_flow_error_set
7371                                         (error, ENOTSUP,
7372                                         RTE_FLOW_ERROR_TYPE_ACTION,
7373                                         NULL, "encap action in egress "
7374                                         "cannot be done before meter action");
7375                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7376                                 return rte_flow_error_set
7377                                         (error, ENOTSUP,
7378                                         RTE_FLOW_ERROR_TYPE_ACTION,
7379                                         NULL, "push vlan action in egress "
7380                                         "cannot be done before meter action");
7381                 }
7382         }
7383         /*
7384          * Hairpin flow will add one more TAG action in TX implicit mode.
7385          * In TX explicit mode, there will be no hairpin flow ID.
7386          */
7387         if (hairpin > 0)
7388                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7389         /* extra metadata enabled: one more TAG action will be add. */
7390         if (dev_conf->dv_flow_en &&
7391             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7392             mlx5_flow_ext_mreg_supported(dev))
7393                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7394         if (rw_act_num >
7395                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7396                 return rte_flow_error_set(error, ENOTSUP,
7397                                           RTE_FLOW_ERROR_TYPE_ACTION,
7398                                           NULL, "too many header modify"
7399                                           " actions to support");
7400         }
7401         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7402         if (fdb_mirror_limit && modify_after_mirror)
7403                 return rte_flow_error_set(error, EINVAL,
7404                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7405                                 "sample before modify action is not supported");
7406         return 0;
7407 }
7408
7409 /**
7410  * Internal preparation function. Allocates the DV flow size,
7411  * this size is constant.
7412  *
7413  * @param[in] dev
7414  *   Pointer to the rte_eth_dev structure.
7415  * @param[in] attr
7416  *   Pointer to the flow attributes.
7417  * @param[in] items
7418  *   Pointer to the list of items.
7419  * @param[in] actions
7420  *   Pointer to the list of actions.
7421  * @param[out] error
7422  *   Pointer to the error structure.
7423  *
7424  * @return
7425  *   Pointer to mlx5_flow object on success,
7426  *   otherwise NULL and rte_errno is set.
7427  */
7428 static struct mlx5_flow *
7429 flow_dv_prepare(struct rte_eth_dev *dev,
7430                 const struct rte_flow_attr *attr __rte_unused,
7431                 const struct rte_flow_item items[] __rte_unused,
7432                 const struct rte_flow_action actions[] __rte_unused,
7433                 struct rte_flow_error *error)
7434 {
7435         uint32_t handle_idx = 0;
7436         struct mlx5_flow *dev_flow;
7437         struct mlx5_flow_handle *dev_handle;
7438         struct mlx5_priv *priv = dev->data->dev_private;
7439         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7440
7441         MLX5_ASSERT(wks);
7442         /* In case of corrupting the memory. */
7443         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7444                 rte_flow_error_set(error, ENOSPC,
7445                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7446                                    "not free temporary device flow");
7447                 return NULL;
7448         }
7449         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7450                                    &handle_idx);
7451         if (!dev_handle) {
7452                 rte_flow_error_set(error, ENOMEM,
7453                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7454                                    "not enough memory to create flow handle");
7455                 return NULL;
7456         }
7457         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7458         dev_flow = &wks->flows[wks->flow_idx++];
7459         memset(dev_flow, 0, sizeof(*dev_flow));
7460         dev_flow->handle = dev_handle;
7461         dev_flow->handle_idx = handle_idx;
7462         /*
7463          * In some old rdma-core releases, before continuing, a check of the
7464          * length of matching parameter will be done at first. It needs to use
7465          * the length without misc4 param. If the flow has misc4 support, then
7466          * the length needs to be adjusted accordingly. Each param member is
7467          * aligned with a 64B boundary naturally.
7468          */
7469         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7470                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7471         dev_flow->ingress = attr->ingress;
7472         dev_flow->dv.transfer = attr->transfer;
7473         return dev_flow;
7474 }
7475
7476 #ifdef RTE_LIBRTE_MLX5_DEBUG
7477 /**
7478  * Sanity check for match mask and value. Similar to check_valid_spec() in
7479  * kernel driver. If unmasked bit is present in value, it returns failure.
7480  *
7481  * @param match_mask
7482  *   pointer to match mask buffer.
7483  * @param match_value
7484  *   pointer to match value buffer.
7485  *
7486  * @return
7487  *   0 if valid, -EINVAL otherwise.
7488  */
7489 static int
7490 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7491 {
7492         uint8_t *m = match_mask;
7493         uint8_t *v = match_value;
7494         unsigned int i;
7495
7496         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7497                 if (v[i] & ~m[i]) {
7498                         DRV_LOG(ERR,
7499                                 "match_value differs from match_criteria"
7500                                 " %p[%u] != %p[%u]",
7501                                 match_value, i, match_mask, i);
7502                         return -EINVAL;
7503                 }
7504         }
7505         return 0;
7506 }
7507 #endif
7508
7509 /**
7510  * Add match of ip_version.
7511  *
7512  * @param[in] group
7513  *   Flow group.
7514  * @param[in] headers_v
7515  *   Values header pointer.
7516  * @param[in] headers_m
7517  *   Masks header pointer.
7518  * @param[in] ip_version
7519  *   The IP version to set.
7520  */
7521 static inline void
7522 flow_dv_set_match_ip_version(uint32_t group,
7523                              void *headers_v,
7524                              void *headers_m,
7525                              uint8_t ip_version)
7526 {
7527         if (group == 0)
7528                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7529         else
7530                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7531                          ip_version);
7532         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7533         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7534         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7535 }
7536
7537 /**
7538  * Add Ethernet item to matcher and to the value.
7539  *
7540  * @param[in, out] matcher
7541  *   Flow matcher.
7542  * @param[in, out] key
7543  *   Flow matcher value.
7544  * @param[in] item
7545  *   Flow pattern to translate.
7546  * @param[in] inner
7547  *   Item is inner pattern.
7548  */
7549 static void
7550 flow_dv_translate_item_eth(void *matcher, void *key,
7551                            const struct rte_flow_item *item, int inner,
7552                            uint32_t group)
7553 {
7554         const struct rte_flow_item_eth *eth_m = item->mask;
7555         const struct rte_flow_item_eth *eth_v = item->spec;
7556         const struct rte_flow_item_eth nic_mask = {
7557                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7558                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7559                 .type = RTE_BE16(0xffff),
7560                 .has_vlan = 0,
7561         };
7562         void *hdrs_m;
7563         void *hdrs_v;
7564         char *l24_v;
7565         unsigned int i;
7566
7567         if (!eth_v)
7568                 return;
7569         if (!eth_m)
7570                 eth_m = &nic_mask;
7571         if (inner) {
7572                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7573                                          inner_headers);
7574                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7575         } else {
7576                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7577                                          outer_headers);
7578                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7579         }
7580         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7581                &eth_m->dst, sizeof(eth_m->dst));
7582         /* The value must be in the range of the mask. */
7583         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7584         for (i = 0; i < sizeof(eth_m->dst); ++i)
7585                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7586         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7587                &eth_m->src, sizeof(eth_m->src));
7588         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7589         /* The value must be in the range of the mask. */
7590         for (i = 0; i < sizeof(eth_m->dst); ++i)
7591                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7592         /*
7593          * HW supports match on one Ethertype, the Ethertype following the last
7594          * VLAN tag of the packet (see PRM).
7595          * Set match on ethertype only if ETH header is not followed by VLAN.
7596          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7597          * ethertype, and use ip_version field instead.
7598          * eCPRI over Ether layer will use type value 0xAEFE.
7599          */
7600         if (eth_m->type == 0xFFFF) {
7601                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7602                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7603                 switch (eth_v->type) {
7604                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7605                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7606                         return;
7607                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7608                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7609                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7610                         return;
7611                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7612                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7613                         return;
7614                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7615                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7616                         return;
7617                 default:
7618                         break;
7619                 }
7620         }
7621         if (eth_m->has_vlan) {
7622                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7623                 if (eth_v->has_vlan) {
7624                         /*
7625                          * Here, when also has_more_vlan field in VLAN item is
7626                          * not set, only single-tagged packets will be matched.
7627                          */
7628                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7629                         return;
7630                 }
7631         }
7632         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7633                  rte_be_to_cpu_16(eth_m->type));
7634         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7635         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7636 }
7637
7638 /**
7639  * Add VLAN item to matcher and to the value.
7640  *
7641  * @param[in, out] dev_flow
7642  *   Flow descriptor.
7643  * @param[in, out] matcher
7644  *   Flow matcher.
7645  * @param[in, out] key
7646  *   Flow matcher value.
7647  * @param[in] item
7648  *   Flow pattern to translate.
7649  * @param[in] inner
7650  *   Item is inner pattern.
7651  */
7652 static void
7653 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
7654                             void *matcher, void *key,
7655                             const struct rte_flow_item *item,
7656                             int inner, uint32_t group)
7657 {
7658         const struct rte_flow_item_vlan *vlan_m = item->mask;
7659         const struct rte_flow_item_vlan *vlan_v = item->spec;
7660         void *hdrs_m;
7661         void *hdrs_v;
7662         uint16_t tci_m;
7663         uint16_t tci_v;
7664
7665         if (inner) {
7666                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7667                                          inner_headers);
7668                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7669         } else {
7670                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7671                                          outer_headers);
7672                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7673                 /*
7674                  * This is workaround, masks are not supported,
7675                  * and pre-validated.
7676                  */
7677                 if (vlan_v)
7678                         dev_flow->handle->vf_vlan.tag =
7679                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
7680         }
7681         /*
7682          * When VLAN item exists in flow, mark packet as tagged,
7683          * even if TCI is not specified.
7684          */
7685         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
7686                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7687                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7688         }
7689         if (!vlan_v)
7690                 return;
7691         if (!vlan_m)
7692                 vlan_m = &rte_flow_item_vlan_mask;
7693         tci_m = rte_be_to_cpu_16(vlan_m->tci);
7694         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
7695         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
7696         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
7697         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
7698         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
7699         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
7700         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
7701         /*
7702          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7703          * ethertype, and use ip_version field instead.
7704          */
7705         if (vlan_m->inner_type == 0xFFFF) {
7706                 switch (vlan_v->inner_type) {
7707                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7708                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7709                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7710                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7711                         return;
7712                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7713                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7714                         return;
7715                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7716                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7717                         return;
7718                 default:
7719                         break;
7720                 }
7721         }
7722         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
7723                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7724                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7725                 /* Only one vlan_tag bit can be set. */
7726                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
7727                 return;
7728         }
7729         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7730                  rte_be_to_cpu_16(vlan_m->inner_type));
7731         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
7732                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
7733 }
7734
7735 /**
7736  * Add IPV4 item to matcher and to the value.
7737  *
7738  * @param[in, out] matcher
7739  *   Flow matcher.
7740  * @param[in, out] key
7741  *   Flow matcher value.
7742  * @param[in] item
7743  *   Flow pattern to translate.
7744  * @param[in] inner
7745  *   Item is inner pattern.
7746  * @param[in] group
7747  *   The group to insert the rule.
7748  */
7749 static void
7750 flow_dv_translate_item_ipv4(void *matcher, void *key,
7751                             const struct rte_flow_item *item,
7752                             int inner, uint32_t group)
7753 {
7754         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
7755         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
7756         const struct rte_flow_item_ipv4 nic_mask = {
7757                 .hdr = {
7758                         .src_addr = RTE_BE32(0xffffffff),
7759                         .dst_addr = RTE_BE32(0xffffffff),
7760                         .type_of_service = 0xff,
7761                         .next_proto_id = 0xff,
7762                         .time_to_live = 0xff,
7763                 },
7764         };
7765         void *headers_m;
7766         void *headers_v;
7767         char *l24_m;
7768         char *l24_v;
7769         uint8_t tos;
7770
7771         if (inner) {
7772                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7773                                          inner_headers);
7774                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7775         } else {
7776                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7777                                          outer_headers);
7778                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7779         }
7780         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
7781         if (!ipv4_v)
7782                 return;
7783         if (!ipv4_m)
7784                 ipv4_m = &nic_mask;
7785         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7786                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7787         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7788                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
7789         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
7790         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
7791         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7792                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7793         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7794                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
7795         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
7796         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
7797         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
7798         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
7799                  ipv4_m->hdr.type_of_service);
7800         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
7801         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
7802                  ipv4_m->hdr.type_of_service >> 2);
7803         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
7804         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7805                  ipv4_m->hdr.next_proto_id);
7806         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7807                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
7808         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7809                  ipv4_m->hdr.time_to_live);
7810         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7811                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
7812         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7813                  !!(ipv4_m->hdr.fragment_offset));
7814         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7815                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
7816 }
7817
7818 /**
7819  * Add IPV6 item to matcher and to the value.
7820  *
7821  * @param[in, out] matcher
7822  *   Flow matcher.
7823  * @param[in, out] key
7824  *   Flow matcher value.
7825  * @param[in] item
7826  *   Flow pattern to translate.
7827  * @param[in] inner
7828  *   Item is inner pattern.
7829  * @param[in] group
7830  *   The group to insert the rule.
7831  */
7832 static void
7833 flow_dv_translate_item_ipv6(void *matcher, void *key,
7834                             const struct rte_flow_item *item,
7835                             int inner, uint32_t group)
7836 {
7837         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
7838         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
7839         const struct rte_flow_item_ipv6 nic_mask = {
7840                 .hdr = {
7841                         .src_addr =
7842                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7843                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7844                         .dst_addr =
7845                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
7846                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
7847                         .vtc_flow = RTE_BE32(0xffffffff),
7848                         .proto = 0xff,
7849                         .hop_limits = 0xff,
7850                 },
7851         };
7852         void *headers_m;
7853         void *headers_v;
7854         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7855         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7856         char *l24_m;
7857         char *l24_v;
7858         uint32_t vtc_m;
7859         uint32_t vtc_v;
7860         int i;
7861         int size;
7862
7863         if (inner) {
7864                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7865                                          inner_headers);
7866                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7867         } else {
7868                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7869                                          outer_headers);
7870                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7871         }
7872         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
7873         if (!ipv6_v)
7874                 return;
7875         if (!ipv6_m)
7876                 ipv6_m = &nic_mask;
7877         size = sizeof(ipv6_m->hdr.dst_addr);
7878         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7879                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7880         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7881                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
7882         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
7883         for (i = 0; i < size; ++i)
7884                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
7885         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
7886                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7887         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
7888                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
7889         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
7890         for (i = 0; i < size; ++i)
7891                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
7892         /* TOS. */
7893         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
7894         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
7895         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
7896         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
7897         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
7898         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
7899         /* Label. */
7900         if (inner) {
7901                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
7902                          vtc_m);
7903                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
7904                          vtc_v);
7905         } else {
7906                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
7907                          vtc_m);
7908                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
7909                          vtc_v);
7910         }
7911         /* Protocol. */
7912         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7913                  ipv6_m->hdr.proto);
7914         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7915                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
7916         /* Hop limit. */
7917         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
7918                  ipv6_m->hdr.hop_limits);
7919         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
7920                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
7921         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
7922                  !!(ipv6_m->has_frag_ext));
7923         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
7924                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
7925 }
7926
7927 /**
7928  * Add IPV6 fragment extension item to matcher and to the value.
7929  *
7930  * @param[in, out] matcher
7931  *   Flow matcher.
7932  * @param[in, out] key
7933  *   Flow matcher value.
7934  * @param[in] item
7935  *   Flow pattern to translate.
7936  * @param[in] inner
7937  *   Item is inner pattern.
7938  */
7939 static void
7940 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
7941                                      const struct rte_flow_item *item,
7942                                      int inner)
7943 {
7944         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
7945         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
7946         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
7947                 .hdr = {
7948                         .next_header = 0xff,
7949                         .frag_data = RTE_BE16(0xffff),
7950                 },
7951         };
7952         void *headers_m;
7953         void *headers_v;
7954
7955         if (inner) {
7956                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7957                                          inner_headers);
7958                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7959         } else {
7960                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7961                                          outer_headers);
7962                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7963         }
7964         /* IPv6 fragment extension item exists, so packet is IP fragment. */
7965         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7966         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
7967         if (!ipv6_frag_ext_v)
7968                 return;
7969         if (!ipv6_frag_ext_m)
7970                 ipv6_frag_ext_m = &nic_mask;
7971         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
7972                  ipv6_frag_ext_m->hdr.next_header);
7973         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7974                  ipv6_frag_ext_v->hdr.next_header &
7975                  ipv6_frag_ext_m->hdr.next_header);
7976 }
7977
7978 /**
7979  * Add TCP item to matcher and to the value.
7980  *
7981  * @param[in, out] matcher
7982  *   Flow matcher.
7983  * @param[in, out] key
7984  *   Flow matcher value.
7985  * @param[in] item
7986  *   Flow pattern to translate.
7987  * @param[in] inner
7988  *   Item is inner pattern.
7989  */
7990 static void
7991 flow_dv_translate_item_tcp(void *matcher, void *key,
7992                            const struct rte_flow_item *item,
7993                            int inner)
7994 {
7995         const struct rte_flow_item_tcp *tcp_m = item->mask;
7996         const struct rte_flow_item_tcp *tcp_v = item->spec;
7997         void *headers_m;
7998         void *headers_v;
7999
8000         if (inner) {
8001                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8002                                          inner_headers);
8003                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8004         } else {
8005                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8006                                          outer_headers);
8007                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8008         }
8009         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8010         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8011         if (!tcp_v)
8012                 return;
8013         if (!tcp_m)
8014                 tcp_m = &rte_flow_item_tcp_mask;
8015         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8016                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8017         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8018                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8019         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8020                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8021         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8022                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8023         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8024                  tcp_m->hdr.tcp_flags);
8025         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8026                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8027 }
8028
8029 /**
8030  * Add UDP item to matcher and to the value.
8031  *
8032  * @param[in, out] matcher
8033  *   Flow matcher.
8034  * @param[in, out] key
8035  *   Flow matcher value.
8036  * @param[in] item
8037  *   Flow pattern to translate.
8038  * @param[in] inner
8039  *   Item is inner pattern.
8040  */
8041 static void
8042 flow_dv_translate_item_udp(void *matcher, void *key,
8043                            const struct rte_flow_item *item,
8044                            int inner)
8045 {
8046         const struct rte_flow_item_udp *udp_m = item->mask;
8047         const struct rte_flow_item_udp *udp_v = item->spec;
8048         void *headers_m;
8049         void *headers_v;
8050
8051         if (inner) {
8052                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8053                                          inner_headers);
8054                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8055         } else {
8056                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8057                                          outer_headers);
8058                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8059         }
8060         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8061         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8062         if (!udp_v)
8063                 return;
8064         if (!udp_m)
8065                 udp_m = &rte_flow_item_udp_mask;
8066         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8067                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8068         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8069                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8070         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8071                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8072         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8073                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8074 }
8075
8076 /**
8077  * Add GRE optional Key item to matcher and to the value.
8078  *
8079  * @param[in, out] matcher
8080  *   Flow matcher.
8081  * @param[in, out] key
8082  *   Flow matcher value.
8083  * @param[in] item
8084  *   Flow pattern to translate.
8085  * @param[in] inner
8086  *   Item is inner pattern.
8087  */
8088 static void
8089 flow_dv_translate_item_gre_key(void *matcher, void *key,
8090                                    const struct rte_flow_item *item)
8091 {
8092         const rte_be32_t *key_m = item->mask;
8093         const rte_be32_t *key_v = item->spec;
8094         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8095         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8096         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8097
8098         /* GRE K bit must be on and should already be validated */
8099         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8100         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8101         if (!key_v)
8102                 return;
8103         if (!key_m)
8104                 key_m = &gre_key_default_mask;
8105         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8106                  rte_be_to_cpu_32(*key_m) >> 8);
8107         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8108                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8109         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8110                  rte_be_to_cpu_32(*key_m) & 0xFF);
8111         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8112                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8113 }
8114
8115 /**
8116  * Add GRE item to matcher and to the value.
8117  *
8118  * @param[in, out] matcher
8119  *   Flow matcher.
8120  * @param[in, out] key
8121  *   Flow matcher value.
8122  * @param[in] item
8123  *   Flow pattern to translate.
8124  * @param[in] inner
8125  *   Item is inner pattern.
8126  */
8127 static void
8128 flow_dv_translate_item_gre(void *matcher, void *key,
8129                            const struct rte_flow_item *item,
8130                            int inner)
8131 {
8132         const struct rte_flow_item_gre *gre_m = item->mask;
8133         const struct rte_flow_item_gre *gre_v = item->spec;
8134         void *headers_m;
8135         void *headers_v;
8136         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8137         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8138         struct {
8139                 union {
8140                         __extension__
8141                         struct {
8142                                 uint16_t version:3;
8143                                 uint16_t rsvd0:9;
8144                                 uint16_t s_present:1;
8145                                 uint16_t k_present:1;
8146                                 uint16_t rsvd_bit1:1;
8147                                 uint16_t c_present:1;
8148                         };
8149                         uint16_t value;
8150                 };
8151         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8152
8153         if (inner) {
8154                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8155                                          inner_headers);
8156                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8157         } else {
8158                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8159                                          outer_headers);
8160                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8161         }
8162         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8163         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8164         if (!gre_v)
8165                 return;
8166         if (!gre_m)
8167                 gre_m = &rte_flow_item_gre_mask;
8168         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8169                  rte_be_to_cpu_16(gre_m->protocol));
8170         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8171                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8172         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8173         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8174         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8175                  gre_crks_rsvd0_ver_m.c_present);
8176         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8177                  gre_crks_rsvd0_ver_v.c_present &
8178                  gre_crks_rsvd0_ver_m.c_present);
8179         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8180                  gre_crks_rsvd0_ver_m.k_present);
8181         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8182                  gre_crks_rsvd0_ver_v.k_present &
8183                  gre_crks_rsvd0_ver_m.k_present);
8184         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8185                  gre_crks_rsvd0_ver_m.s_present);
8186         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8187                  gre_crks_rsvd0_ver_v.s_present &
8188                  gre_crks_rsvd0_ver_m.s_present);
8189 }
8190
8191 /**
8192  * Add NVGRE item to matcher and to the value.
8193  *
8194  * @param[in, out] matcher
8195  *   Flow matcher.
8196  * @param[in, out] key
8197  *   Flow matcher value.
8198  * @param[in] item
8199  *   Flow pattern to translate.
8200  * @param[in] inner
8201  *   Item is inner pattern.
8202  */
8203 static void
8204 flow_dv_translate_item_nvgre(void *matcher, void *key,
8205                              const struct rte_flow_item *item,
8206                              int inner)
8207 {
8208         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8209         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8210         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8211         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8212         const char *tni_flow_id_m;
8213         const char *tni_flow_id_v;
8214         char *gre_key_m;
8215         char *gre_key_v;
8216         int size;
8217         int i;
8218
8219         /* For NVGRE, GRE header fields must be set with defined values. */
8220         const struct rte_flow_item_gre gre_spec = {
8221                 .c_rsvd0_ver = RTE_BE16(0x2000),
8222                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8223         };
8224         const struct rte_flow_item_gre gre_mask = {
8225                 .c_rsvd0_ver = RTE_BE16(0xB000),
8226                 .protocol = RTE_BE16(UINT16_MAX),
8227         };
8228         const struct rte_flow_item gre_item = {
8229                 .spec = &gre_spec,
8230                 .mask = &gre_mask,
8231                 .last = NULL,
8232         };
8233         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8234         if (!nvgre_v)
8235                 return;
8236         if (!nvgre_m)
8237                 nvgre_m = &rte_flow_item_nvgre_mask;
8238         tni_flow_id_m = (const char *)nvgre_m->tni;
8239         tni_flow_id_v = (const char *)nvgre_v->tni;
8240         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8241         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8242         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8243         memcpy(gre_key_m, tni_flow_id_m, size);
8244         for (i = 0; i < size; ++i)
8245                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8246 }
8247
8248 /**
8249  * Add VXLAN item to matcher and to the value.
8250  *
8251  * @param[in, out] matcher
8252  *   Flow matcher.
8253  * @param[in, out] key
8254  *   Flow matcher value.
8255  * @param[in] item
8256  *   Flow pattern to translate.
8257  * @param[in] inner
8258  *   Item is inner pattern.
8259  */
8260 static void
8261 flow_dv_translate_item_vxlan(void *matcher, void *key,
8262                              const struct rte_flow_item *item,
8263                              int inner)
8264 {
8265         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8266         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8267         void *headers_m;
8268         void *headers_v;
8269         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8270         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8271         char *vni_m;
8272         char *vni_v;
8273         uint16_t dport;
8274         int size;
8275         int i;
8276
8277         if (inner) {
8278                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8279                                          inner_headers);
8280                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8281         } else {
8282                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8283                                          outer_headers);
8284                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8285         }
8286         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8287                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8288         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8289                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8290                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8291         }
8292         if (!vxlan_v)
8293                 return;
8294         if (!vxlan_m)
8295                 vxlan_m = &rte_flow_item_vxlan_mask;
8296         size = sizeof(vxlan_m->vni);
8297         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8298         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8299         memcpy(vni_m, vxlan_m->vni, size);
8300         for (i = 0; i < size; ++i)
8301                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8302 }
8303
8304 /**
8305  * Add VXLAN-GPE item to matcher and to the value.
8306  *
8307  * @param[in, out] matcher
8308  *   Flow matcher.
8309  * @param[in, out] key
8310  *   Flow matcher value.
8311  * @param[in] item
8312  *   Flow pattern to translate.
8313  * @param[in] inner
8314  *   Item is inner pattern.
8315  */
8316
8317 static void
8318 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8319                                  const struct rte_flow_item *item, int inner)
8320 {
8321         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8322         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8323         void *headers_m;
8324         void *headers_v;
8325         void *misc_m =
8326                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8327         void *misc_v =
8328                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8329         char *vni_m;
8330         char *vni_v;
8331         uint16_t dport;
8332         int size;
8333         int i;
8334         uint8_t flags_m = 0xff;
8335         uint8_t flags_v = 0xc;
8336
8337         if (inner) {
8338                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8339                                          inner_headers);
8340                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8341         } else {
8342                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8343                                          outer_headers);
8344                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8345         }
8346         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8347                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8348         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8349                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8350                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8351         }
8352         if (!vxlan_v)
8353                 return;
8354         if (!vxlan_m)
8355                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8356         size = sizeof(vxlan_m->vni);
8357         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8358         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8359         memcpy(vni_m, vxlan_m->vni, size);
8360         for (i = 0; i < size; ++i)
8361                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8362         if (vxlan_m->flags) {
8363                 flags_m = vxlan_m->flags;
8364                 flags_v = vxlan_v->flags;
8365         }
8366         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8367         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8368         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8369                  vxlan_m->protocol);
8370         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8371                  vxlan_v->protocol);
8372 }
8373
8374 /**
8375  * Add Geneve item to matcher and to the value.
8376  *
8377  * @param[in, out] matcher
8378  *   Flow matcher.
8379  * @param[in, out] key
8380  *   Flow matcher value.
8381  * @param[in] item
8382  *   Flow pattern to translate.
8383  * @param[in] inner
8384  *   Item is inner pattern.
8385  */
8386
8387 static void
8388 flow_dv_translate_item_geneve(void *matcher, void *key,
8389                               const struct rte_flow_item *item, int inner)
8390 {
8391         const struct rte_flow_item_geneve *geneve_m = item->mask;
8392         const struct rte_flow_item_geneve *geneve_v = item->spec;
8393         void *headers_m;
8394         void *headers_v;
8395         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8396         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8397         uint16_t dport;
8398         uint16_t gbhdr_m;
8399         uint16_t gbhdr_v;
8400         char *vni_m;
8401         char *vni_v;
8402         size_t size, i;
8403
8404         if (inner) {
8405                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8406                                          inner_headers);
8407                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8408         } else {
8409                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8410                                          outer_headers);
8411                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8412         }
8413         dport = MLX5_UDP_PORT_GENEVE;
8414         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8415                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8416                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8417         }
8418         if (!geneve_v)
8419                 return;
8420         if (!geneve_m)
8421                 geneve_m = &rte_flow_item_geneve_mask;
8422         size = sizeof(geneve_m->vni);
8423         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8424         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8425         memcpy(vni_m, geneve_m->vni, size);
8426         for (i = 0; i < size; ++i)
8427                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8428         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8429                  rte_be_to_cpu_16(geneve_m->protocol));
8430         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8431                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8432         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8433         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8434         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8435                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8436         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8437                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8438         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8439                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8440         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8441                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8442                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8443 }
8444
8445 /**
8446  * Create Geneve TLV option resource.
8447  *
8448  * @param dev[in, out]
8449  *   Pointer to rte_eth_dev structure.
8450  * @param[in, out] tag_be24
8451  *   Tag value in big endian then R-shift 8.
8452  * @parm[in, out] dev_flow
8453  *   Pointer to the dev_flow.
8454  * @param[out] error
8455  *   pointer to error structure.
8456  *
8457  * @return
8458  *   0 on success otherwise -errno and errno is set.
8459  */
8460
8461 int
8462 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8463                                              const struct rte_flow_item *item,
8464                                              struct rte_flow_error *error)
8465 {
8466         struct mlx5_priv *priv = dev->data->dev_private;
8467         struct mlx5_dev_ctx_shared *sh = priv->sh;
8468         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8469                         sh->geneve_tlv_option_resource;
8470         struct mlx5_devx_obj *obj;
8471         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8472         int ret = 0;
8473
8474         if (!geneve_opt_v)
8475                 return -1;
8476         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8477         if (geneve_opt_resource != NULL) {
8478                 if (geneve_opt_resource->option_class ==
8479                         geneve_opt_v->option_class &&
8480                         geneve_opt_resource->option_type ==
8481                         geneve_opt_v->option_type &&
8482                         geneve_opt_resource->length ==
8483                         geneve_opt_v->option_len) {
8484                         /* We already have GENVE TLV option obj allocated. */
8485                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8486                                            __ATOMIC_RELAXED);
8487                 } else {
8488                         ret = rte_flow_error_set(error, ENOMEM,
8489                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8490                                 "Only one GENEVE TLV option supported");
8491                         goto exit;
8492                 }
8493         } else {
8494                 /* Create a GENEVE TLV object and resource. */
8495                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8496                                 geneve_opt_v->option_class,
8497                                 geneve_opt_v->option_type,
8498                                 geneve_opt_v->option_len);
8499                 if (!obj) {
8500                         ret = rte_flow_error_set(error, ENODATA,
8501                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8502                                 "Failed to create GENEVE TLV Devx object");
8503                         goto exit;
8504                 }
8505                 sh->geneve_tlv_option_resource =
8506                                 mlx5_malloc(MLX5_MEM_ZERO,
8507                                                 sizeof(*geneve_opt_resource),
8508                                                 0, SOCKET_ID_ANY);
8509                 if (!sh->geneve_tlv_option_resource) {
8510                         claim_zero(mlx5_devx_cmd_destroy(obj));
8511                         ret = rte_flow_error_set(error, ENOMEM,
8512                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8513                                 "GENEVE TLV object memory allocation failed");
8514                         goto exit;
8515                 }
8516                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8517                 geneve_opt_resource->obj = obj;
8518                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8519                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8520                 geneve_opt_resource->length = geneve_opt_v->option_len;
8521                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8522                                 __ATOMIC_RELAXED);
8523         }
8524 exit:
8525         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8526         return ret;
8527 }
8528
8529 /**
8530  * Add Geneve TLV option item to matcher.
8531  *
8532  * @param[in, out] dev
8533  *   Pointer to rte_eth_dev structure.
8534  * @param[in, out] matcher
8535  *   Flow matcher.
8536  * @param[in, out] key
8537  *   Flow matcher value.
8538  * @param[in] item
8539  *   Flow pattern to translate.
8540  * @param[out] error
8541  *   Pointer to error structure.
8542  */
8543 static int
8544 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8545                                   void *key, const struct rte_flow_item *item,
8546                                   struct rte_flow_error *error)
8547 {
8548         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8549         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8550         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8551         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8552         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8553                         misc_parameters_3);
8554         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8555         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8556         int ret = 0;
8557
8558         if (!geneve_opt_v)
8559                 return -1;
8560         if (!geneve_opt_m)
8561                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8562         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8563                                                            error);
8564         if (ret) {
8565                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8566                 return ret;
8567         }
8568         /*
8569          * Set the option length in GENEVE header if not requested.
8570          * The GENEVE TLV option length is expressed by the option length field
8571          * in the GENEVE header.
8572          * If the option length was not requested but the GENEVE TLV option item
8573          * is present we set the option length field implicitly.
8574          */
8575         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8576                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8577                          MLX5_GENEVE_OPTLEN_MASK);
8578                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8579                          geneve_opt_v->option_len + 1);
8580         }
8581         /* Set the data. */
8582         if (geneve_opt_v->data) {
8583                 memcpy(&opt_data_key, geneve_opt_v->data,
8584                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8585                                 sizeof(opt_data_key)));
8586                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8587                                 sizeof(opt_data_key));
8588                 memcpy(&opt_data_mask, geneve_opt_m->data,
8589                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8590                                 sizeof(opt_data_mask)));
8591                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8592                                 sizeof(opt_data_mask));
8593                 MLX5_SET(fte_match_set_misc3, misc3_m,
8594                                 geneve_tlv_option_0_data,
8595                                 rte_be_to_cpu_32(opt_data_mask));
8596                 MLX5_SET(fte_match_set_misc3, misc3_v,
8597                                 geneve_tlv_option_0_data,
8598                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8599         }
8600         return ret;
8601 }
8602
8603 /**
8604  * Add MPLS item to matcher and to the value.
8605  *
8606  * @param[in, out] matcher
8607  *   Flow matcher.
8608  * @param[in, out] key
8609  *   Flow matcher value.
8610  * @param[in] item
8611  *   Flow pattern to translate.
8612  * @param[in] prev_layer
8613  *   The protocol layer indicated in previous item.
8614  * @param[in] inner
8615  *   Item is inner pattern.
8616  */
8617 static void
8618 flow_dv_translate_item_mpls(void *matcher, void *key,
8619                             const struct rte_flow_item *item,
8620                             uint64_t prev_layer,
8621                             int inner)
8622 {
8623         const uint32_t *in_mpls_m = item->mask;
8624         const uint32_t *in_mpls_v = item->spec;
8625         uint32_t *out_mpls_m = 0;
8626         uint32_t *out_mpls_v = 0;
8627         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8628         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8629         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8630                                      misc_parameters_2);
8631         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8632         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8633         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8634
8635         switch (prev_layer) {
8636         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8637                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
8638                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8639                          MLX5_UDP_PORT_MPLS);
8640                 break;
8641         case MLX5_FLOW_LAYER_GRE:
8642                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
8643                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8644                          RTE_ETHER_TYPE_MPLS);
8645                 break;
8646         default:
8647                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8648                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8649                          IPPROTO_MPLS);
8650                 break;
8651         }
8652         if (!in_mpls_v)
8653                 return;
8654         if (!in_mpls_m)
8655                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
8656         switch (prev_layer) {
8657         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
8658                 out_mpls_m =
8659                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8660                                                  outer_first_mpls_over_udp);
8661                 out_mpls_v =
8662                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8663                                                  outer_first_mpls_over_udp);
8664                 break;
8665         case MLX5_FLOW_LAYER_GRE:
8666                 out_mpls_m =
8667                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
8668                                                  outer_first_mpls_over_gre);
8669                 out_mpls_v =
8670                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
8671                                                  outer_first_mpls_over_gre);
8672                 break;
8673         default:
8674                 /* Inner MPLS not over GRE is not supported. */
8675                 if (!inner) {
8676                         out_mpls_m =
8677                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8678                                                          misc2_m,
8679                                                          outer_first_mpls);
8680                         out_mpls_v =
8681                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
8682                                                          misc2_v,
8683                                                          outer_first_mpls);
8684                 }
8685                 break;
8686         }
8687         if (out_mpls_m && out_mpls_v) {
8688                 *out_mpls_m = *in_mpls_m;
8689                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
8690         }
8691 }
8692
8693 /**
8694  * Add metadata register item to matcher
8695  *
8696  * @param[in, out] matcher
8697  *   Flow matcher.
8698  * @param[in, out] key
8699  *   Flow matcher value.
8700  * @param[in] reg_type
8701  *   Type of device metadata register
8702  * @param[in] value
8703  *   Register value
8704  * @param[in] mask
8705  *   Register mask
8706  */
8707 static void
8708 flow_dv_match_meta_reg(void *matcher, void *key,
8709                        enum modify_reg reg_type,
8710                        uint32_t data, uint32_t mask)
8711 {
8712         void *misc2_m =
8713                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
8714         void *misc2_v =
8715                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8716         uint32_t temp;
8717
8718         data &= mask;
8719         switch (reg_type) {
8720         case REG_A:
8721                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
8722                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
8723                 break;
8724         case REG_B:
8725                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
8726                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
8727                 break;
8728         case REG_C_0:
8729                 /*
8730                  * The metadata register C0 field might be divided into
8731                  * source vport index and META item value, we should set
8732                  * this field according to specified mask, not as whole one.
8733                  */
8734                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
8735                 temp |= mask;
8736                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
8737                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
8738                 temp &= ~mask;
8739                 temp |= data;
8740                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
8741                 break;
8742         case REG_C_1:
8743                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
8744                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
8745                 break;
8746         case REG_C_2:
8747                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
8748                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
8749                 break;
8750         case REG_C_3:
8751                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
8752                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
8753                 break;
8754         case REG_C_4:
8755                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
8756                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
8757                 break;
8758         case REG_C_5:
8759                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
8760                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
8761                 break;
8762         case REG_C_6:
8763                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
8764                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
8765                 break;
8766         case REG_C_7:
8767                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
8768                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
8769                 break;
8770         default:
8771                 MLX5_ASSERT(false);
8772                 break;
8773         }
8774 }
8775
8776 /**
8777  * Add MARK item to matcher
8778  *
8779  * @param[in] dev
8780  *   The device to configure through.
8781  * @param[in, out] matcher
8782  *   Flow matcher.
8783  * @param[in, out] key
8784  *   Flow matcher value.
8785  * @param[in] item
8786  *   Flow pattern to translate.
8787  */
8788 static void
8789 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
8790                             void *matcher, void *key,
8791                             const struct rte_flow_item *item)
8792 {
8793         struct mlx5_priv *priv = dev->data->dev_private;
8794         const struct rte_flow_item_mark *mark;
8795         uint32_t value;
8796         uint32_t mask;
8797
8798         mark = item->mask ? (const void *)item->mask :
8799                             &rte_flow_item_mark_mask;
8800         mask = mark->id & priv->sh->dv_mark_mask;
8801         mark = (const void *)item->spec;
8802         MLX5_ASSERT(mark);
8803         value = mark->id & priv->sh->dv_mark_mask & mask;
8804         if (mask) {
8805                 enum modify_reg reg;
8806
8807                 /* Get the metadata register index for the mark. */
8808                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
8809                 MLX5_ASSERT(reg > 0);
8810                 if (reg == REG_C_0) {
8811                         struct mlx5_priv *priv = dev->data->dev_private;
8812                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8813                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8814
8815                         mask &= msk_c0;
8816                         mask <<= shl_c0;
8817                         value <<= shl_c0;
8818                 }
8819                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8820         }
8821 }
8822
8823 /**
8824  * Add META item to matcher
8825  *
8826  * @param[in] dev
8827  *   The devich to configure through.
8828  * @param[in, out] matcher
8829  *   Flow matcher.
8830  * @param[in, out] key
8831  *   Flow matcher value.
8832  * @param[in] attr
8833  *   Attributes of flow that includes this item.
8834  * @param[in] item
8835  *   Flow pattern to translate.
8836  */
8837 static void
8838 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
8839                             void *matcher, void *key,
8840                             const struct rte_flow_attr *attr,
8841                             const struct rte_flow_item *item)
8842 {
8843         const struct rte_flow_item_meta *meta_m;
8844         const struct rte_flow_item_meta *meta_v;
8845
8846         meta_m = (const void *)item->mask;
8847         if (!meta_m)
8848                 meta_m = &rte_flow_item_meta_mask;
8849         meta_v = (const void *)item->spec;
8850         if (meta_v) {
8851                 int reg;
8852                 uint32_t value = meta_v->data;
8853                 uint32_t mask = meta_m->data;
8854
8855                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
8856                 if (reg < 0)
8857                         return;
8858                 MLX5_ASSERT(reg != REG_NON);
8859                 /*
8860                  * In datapath code there is no endianness
8861                  * coversions for perfromance reasons, all
8862                  * pattern conversions are done in rte_flow.
8863                  */
8864                 value = rte_cpu_to_be_32(value);
8865                 mask = rte_cpu_to_be_32(mask);
8866                 if (reg == REG_C_0) {
8867                         struct mlx5_priv *priv = dev->data->dev_private;
8868                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8869                         uint32_t shl_c0 = rte_bsf32(msk_c0);
8870 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
8871                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
8872
8873                         value >>= shr_c0;
8874                         mask >>= shr_c0;
8875 #endif
8876                         value <<= shl_c0;
8877                         mask <<= shl_c0;
8878                         MLX5_ASSERT(msk_c0);
8879                         MLX5_ASSERT(!(~msk_c0 & mask));
8880                 }
8881                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
8882         }
8883 }
8884
8885 /**
8886  * Add vport metadata Reg C0 item to matcher
8887  *
8888  * @param[in, out] matcher
8889  *   Flow matcher.
8890  * @param[in, out] key
8891  *   Flow matcher value.
8892  * @param[in] reg
8893  *   Flow pattern to translate.
8894  */
8895 static void
8896 flow_dv_translate_item_meta_vport(void *matcher, void *key,
8897                                   uint32_t value, uint32_t mask)
8898 {
8899         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
8900 }
8901
8902 /**
8903  * Add tag item to matcher
8904  *
8905  * @param[in] dev
8906  *   The devich to configure through.
8907  * @param[in, out] matcher
8908  *   Flow matcher.
8909  * @param[in, out] key
8910  *   Flow matcher value.
8911  * @param[in] item
8912  *   Flow pattern to translate.
8913  */
8914 static void
8915 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
8916                                 void *matcher, void *key,
8917                                 const struct rte_flow_item *item)
8918 {
8919         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
8920         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
8921         uint32_t mask, value;
8922
8923         MLX5_ASSERT(tag_v);
8924         value = tag_v->data;
8925         mask = tag_m ? tag_m->data : UINT32_MAX;
8926         if (tag_v->id == REG_C_0) {
8927                 struct mlx5_priv *priv = dev->data->dev_private;
8928                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
8929                 uint32_t shl_c0 = rte_bsf32(msk_c0);
8930
8931                 mask &= msk_c0;
8932                 mask <<= shl_c0;
8933                 value <<= shl_c0;
8934         }
8935         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
8936 }
8937
8938 /**
8939  * Add TAG item to matcher
8940  *
8941  * @param[in] dev
8942  *   The devich to configure through.
8943  * @param[in, out] matcher
8944  *   Flow matcher.
8945  * @param[in, out] key
8946  *   Flow matcher value.
8947  * @param[in] item
8948  *   Flow pattern to translate.
8949  */
8950 static void
8951 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
8952                            void *matcher, void *key,
8953                            const struct rte_flow_item *item)
8954 {
8955         const struct rte_flow_item_tag *tag_v = item->spec;
8956         const struct rte_flow_item_tag *tag_m = item->mask;
8957         enum modify_reg reg;
8958
8959         MLX5_ASSERT(tag_v);
8960         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
8961         /* Get the metadata register index for the tag. */
8962         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
8963         MLX5_ASSERT(reg > 0);
8964         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
8965 }
8966
8967 /**
8968  * Add source vport match to the specified matcher.
8969  *
8970  * @param[in, out] matcher
8971  *   Flow matcher.
8972  * @param[in, out] key
8973  *   Flow matcher value.
8974  * @param[in] port
8975  *   Source vport value to match
8976  * @param[in] mask
8977  *   Mask
8978  */
8979 static void
8980 flow_dv_translate_item_source_vport(void *matcher, void *key,
8981                                     int16_t port, uint16_t mask)
8982 {
8983         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8984         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8985
8986         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
8987         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
8988 }
8989
8990 /**
8991  * Translate port-id item to eswitch match on  port-id.
8992  *
8993  * @param[in] dev
8994  *   The devich to configure through.
8995  * @param[in, out] matcher
8996  *   Flow matcher.
8997  * @param[in, out] key
8998  *   Flow matcher value.
8999  * @param[in] item
9000  *   Flow pattern to translate.
9001  * @param[in]
9002  *   Flow attributes.
9003  *
9004  * @return
9005  *   0 on success, a negative errno value otherwise.
9006  */
9007 static int
9008 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9009                                void *key, const struct rte_flow_item *item,
9010                                const struct rte_flow_attr *attr)
9011 {
9012         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9013         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9014         struct mlx5_priv *priv;
9015         uint16_t mask, id;
9016
9017         mask = pid_m ? pid_m->id : 0xffff;
9018         id = pid_v ? pid_v->id : dev->data->port_id;
9019         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9020         if (!priv)
9021                 return -rte_errno;
9022         /*
9023          * Translate to vport field or to metadata, depending on mode.
9024          * Kernel can use either misc.source_port or half of C0 metadata
9025          * register.
9026          */
9027         if (priv->vport_meta_mask) {
9028                 /*
9029                  * Provide the hint for SW steering library
9030                  * to insert the flow into ingress domain and
9031                  * save the extra vport match.
9032                  */
9033                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9034                     priv->pf_bond < 0 && attr->transfer)
9035                         flow_dv_translate_item_source_vport
9036                                 (matcher, key, priv->vport_id, mask);
9037                 /*
9038                  * We should always set the vport metadata register,
9039                  * otherwise the SW steering library can drop
9040                  * the rule if wire vport metadata value is not zero,
9041                  * it depends on kernel configuration.
9042                  */
9043                 flow_dv_translate_item_meta_vport(matcher, key,
9044                                                   priv->vport_meta_tag,
9045                                                   priv->vport_meta_mask);
9046         } else {
9047                 flow_dv_translate_item_source_vport(matcher, key,
9048                                                     priv->vport_id, mask);
9049         }
9050         return 0;
9051 }
9052
9053 /**
9054  * Add ICMP6 item to matcher and to the value.
9055  *
9056  * @param[in, out] matcher
9057  *   Flow matcher.
9058  * @param[in, out] key
9059  *   Flow matcher value.
9060  * @param[in] item
9061  *   Flow pattern to translate.
9062  * @param[in] inner
9063  *   Item is inner pattern.
9064  */
9065 static void
9066 flow_dv_translate_item_icmp6(void *matcher, void *key,
9067                               const struct rte_flow_item *item,
9068                               int inner)
9069 {
9070         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9071         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9072         void *headers_m;
9073         void *headers_v;
9074         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9075                                      misc_parameters_3);
9076         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9077         if (inner) {
9078                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9079                                          inner_headers);
9080                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9081         } else {
9082                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9083                                          outer_headers);
9084                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9085         }
9086         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9087         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9088         if (!icmp6_v)
9089                 return;
9090         if (!icmp6_m)
9091                 icmp6_m = &rte_flow_item_icmp6_mask;
9092         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9093         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9094                  icmp6_v->type & icmp6_m->type);
9095         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9096         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9097                  icmp6_v->code & icmp6_m->code);
9098 }
9099
9100 /**
9101  * Add ICMP item to matcher and to the value.
9102  *
9103  * @param[in, out] matcher
9104  *   Flow matcher.
9105  * @param[in, out] key
9106  *   Flow matcher value.
9107  * @param[in] item
9108  *   Flow pattern to translate.
9109  * @param[in] inner
9110  *   Item is inner pattern.
9111  */
9112 static void
9113 flow_dv_translate_item_icmp(void *matcher, void *key,
9114                             const struct rte_flow_item *item,
9115                             int inner)
9116 {
9117         const struct rte_flow_item_icmp *icmp_m = item->mask;
9118         const struct rte_flow_item_icmp *icmp_v = item->spec;
9119         uint32_t icmp_header_data_m = 0;
9120         uint32_t icmp_header_data_v = 0;
9121         void *headers_m;
9122         void *headers_v;
9123         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9124                                      misc_parameters_3);
9125         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9126         if (inner) {
9127                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9128                                          inner_headers);
9129                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9130         } else {
9131                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9132                                          outer_headers);
9133                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9134         }
9135         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9136         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9137         if (!icmp_v)
9138                 return;
9139         if (!icmp_m)
9140                 icmp_m = &rte_flow_item_icmp_mask;
9141         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9142                  icmp_m->hdr.icmp_type);
9143         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9144                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9145         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9146                  icmp_m->hdr.icmp_code);
9147         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9148                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9149         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9150         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9151         if (icmp_header_data_m) {
9152                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9153                 icmp_header_data_v |=
9154                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9155                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9156                          icmp_header_data_m);
9157                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9158                          icmp_header_data_v & icmp_header_data_m);
9159         }
9160 }
9161
9162 /**
9163  * Add GTP item to matcher and to the value.
9164  *
9165  * @param[in, out] matcher
9166  *   Flow matcher.
9167  * @param[in, out] key
9168  *   Flow matcher value.
9169  * @param[in] item
9170  *   Flow pattern to translate.
9171  * @param[in] inner
9172  *   Item is inner pattern.
9173  */
9174 static void
9175 flow_dv_translate_item_gtp(void *matcher, void *key,
9176                            const struct rte_flow_item *item, int inner)
9177 {
9178         const struct rte_flow_item_gtp *gtp_m = item->mask;
9179         const struct rte_flow_item_gtp *gtp_v = item->spec;
9180         void *headers_m;
9181         void *headers_v;
9182         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9183                                      misc_parameters_3);
9184         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9185         uint16_t dport = RTE_GTPU_UDP_PORT;
9186
9187         if (inner) {
9188                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9189                                          inner_headers);
9190                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9191         } else {
9192                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9193                                          outer_headers);
9194                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9195         }
9196         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9197                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9198                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9199         }
9200         if (!gtp_v)
9201                 return;
9202         if (!gtp_m)
9203                 gtp_m = &rte_flow_item_gtp_mask;
9204         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9205                  gtp_m->v_pt_rsv_flags);
9206         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9207                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9208         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9209         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9210                  gtp_v->msg_type & gtp_m->msg_type);
9211         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9212                  rte_be_to_cpu_32(gtp_m->teid));
9213         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9214                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9215 }
9216
9217 /**
9218  * Add GTP PSC item to matcher.
9219  *
9220  * @param[in, out] matcher
9221  *   Flow matcher.
9222  * @param[in, out] key
9223  *   Flow matcher value.
9224  * @param[in] item
9225  *   Flow pattern to translate.
9226  */
9227 static int
9228 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9229                                const struct rte_flow_item *item)
9230 {
9231         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9232         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9233         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9234                         misc_parameters_3);
9235         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9236         union {
9237                 uint32_t w32;
9238                 struct {
9239                         uint16_t seq_num;
9240                         uint8_t npdu_num;
9241                         uint8_t next_ext_header_type;
9242                 };
9243         } dw_2;
9244         uint8_t gtp_flags;
9245
9246         /* Always set E-flag match on one, regardless of GTP item settings. */
9247         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9248         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9249         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9250         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9251         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9252         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9253         /*Set next extension header type. */
9254         dw_2.seq_num = 0;
9255         dw_2.npdu_num = 0;
9256         dw_2.next_ext_header_type = 0xff;
9257         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9258                  rte_cpu_to_be_32(dw_2.w32));
9259         dw_2.seq_num = 0;
9260         dw_2.npdu_num = 0;
9261         dw_2.next_ext_header_type = 0x85;
9262         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9263                  rte_cpu_to_be_32(dw_2.w32));
9264         if (gtp_psc_v) {
9265                 union {
9266                         uint32_t w32;
9267                         struct {
9268                                 uint8_t len;
9269                                 uint8_t type_flags;
9270                                 uint8_t qfi;
9271                                 uint8_t reserved;
9272                         };
9273                 } dw_0;
9274
9275                 /*Set extension header PDU type and Qos. */
9276                 if (!gtp_psc_m)
9277                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9278                 dw_0.w32 = 0;
9279                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9280                 dw_0.qfi = gtp_psc_m->qfi;
9281                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9282                          rte_cpu_to_be_32(dw_0.w32));
9283                 dw_0.w32 = 0;
9284                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9285                                                         gtp_psc_m->pdu_type);
9286                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9287                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9288                          rte_cpu_to_be_32(dw_0.w32));
9289         }
9290         return 0;
9291 }
9292
9293 /**
9294  * Add eCPRI item to matcher and to the value.
9295  *
9296  * @param[in] dev
9297  *   The devich to configure through.
9298  * @param[in, out] matcher
9299  *   Flow matcher.
9300  * @param[in, out] key
9301  *   Flow matcher value.
9302  * @param[in] item
9303  *   Flow pattern to translate.
9304  * @param[in] samples
9305  *   Sample IDs to be used in the matching.
9306  */
9307 static void
9308 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9309                              void *key, const struct rte_flow_item *item)
9310 {
9311         struct mlx5_priv *priv = dev->data->dev_private;
9312         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9313         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9314         struct rte_ecpri_common_hdr common;
9315         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9316                                      misc_parameters_4);
9317         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9318         uint32_t *samples;
9319         void *dw_m;
9320         void *dw_v;
9321
9322         if (!ecpri_v)
9323                 return;
9324         if (!ecpri_m)
9325                 ecpri_m = &rte_flow_item_ecpri_mask;
9326         /*
9327          * Maximal four DW samples are supported in a single matching now.
9328          * Two are used now for a eCPRI matching:
9329          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9330          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9331          *    if any.
9332          */
9333         if (!ecpri_m->hdr.common.u32)
9334                 return;
9335         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9336         /* Need to take the whole DW as the mask to fill the entry. */
9337         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9338                             prog_sample_field_value_0);
9339         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9340                             prog_sample_field_value_0);
9341         /* Already big endian (network order) in the header. */
9342         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9343         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9344         /* Sample#0, used for matching type, offset 0. */
9345         MLX5_SET(fte_match_set_misc4, misc4_m,
9346                  prog_sample_field_id_0, samples[0]);
9347         /* It makes no sense to set the sample ID in the mask field. */
9348         MLX5_SET(fte_match_set_misc4, misc4_v,
9349                  prog_sample_field_id_0, samples[0]);
9350         /*
9351          * Checking if message body part needs to be matched.
9352          * Some wildcard rules only matching type field should be supported.
9353          */
9354         if (ecpri_m->hdr.dummy[0]) {
9355                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9356                 switch (common.type) {
9357                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9358                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9359                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9360                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9361                                             prog_sample_field_value_1);
9362                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9363                                             prog_sample_field_value_1);
9364                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9365                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9366                                             ecpri_m->hdr.dummy[0];
9367                         /* Sample#1, to match message body, offset 4. */
9368                         MLX5_SET(fte_match_set_misc4, misc4_m,
9369                                  prog_sample_field_id_1, samples[1]);
9370                         MLX5_SET(fte_match_set_misc4, misc4_v,
9371                                  prog_sample_field_id_1, samples[1]);
9372                         break;
9373                 default:
9374                         /* Others, do not match any sample ID. */
9375                         break;
9376                 }
9377         }
9378 }
9379
9380 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9381
9382 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9383         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9384                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9385
9386 /**
9387  * Calculate flow matcher enable bitmap.
9388  *
9389  * @param match_criteria
9390  *   Pointer to flow matcher criteria.
9391  *
9392  * @return
9393  *   Bitmap of enabled fields.
9394  */
9395 static uint8_t
9396 flow_dv_matcher_enable(uint32_t *match_criteria)
9397 {
9398         uint8_t match_criteria_enable;
9399
9400         match_criteria_enable =
9401                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9402                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9403         match_criteria_enable |=
9404                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9405                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9406         match_criteria_enable |=
9407                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9408                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9409         match_criteria_enable |=
9410                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9411                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9412         match_criteria_enable |=
9413                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9414                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9415         match_criteria_enable |=
9416                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9417                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9418         return match_criteria_enable;
9419 }
9420
9421 struct mlx5_hlist_entry *
9422 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9423 {
9424         struct mlx5_dev_ctx_shared *sh = list->ctx;
9425         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9426         struct rte_eth_dev *dev = ctx->dev;
9427         struct mlx5_flow_tbl_data_entry *tbl_data;
9428         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9429         struct rte_flow_error *error = ctx->error;
9430         union mlx5_flow_tbl_key key = { .v64 = key64 };
9431         struct mlx5_flow_tbl_resource *tbl;
9432         void *domain;
9433         uint32_t idx = 0;
9434         int ret;
9435
9436         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9437         if (!tbl_data) {
9438                 rte_flow_error_set(error, ENOMEM,
9439                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9440                                    NULL,
9441                                    "cannot allocate flow table data entry");
9442                 return NULL;
9443         }
9444         tbl_data->idx = idx;
9445         tbl_data->tunnel = tt_prm->tunnel;
9446         tbl_data->group_id = tt_prm->group_id;
9447         tbl_data->external = !!tt_prm->external;
9448         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9449         tbl_data->is_egress = !!key.is_egress;
9450         tbl_data->is_transfer = !!key.is_fdb;
9451         tbl_data->dummy = !!key.dummy;
9452         tbl_data->level = key.level;
9453         tbl_data->id = key.id;
9454         tbl = &tbl_data->tbl;
9455         if (key.dummy)
9456                 return &tbl_data->entry;
9457         if (key.is_fdb)
9458                 domain = sh->fdb_domain;
9459         else if (key.is_egress)
9460                 domain = sh->tx_domain;
9461         else
9462                 domain = sh->rx_domain;
9463         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9464         if (ret) {
9465                 rte_flow_error_set(error, ENOMEM,
9466                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9467                                    NULL, "cannot create flow table object");
9468                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9469                 return NULL;
9470         }
9471         if (key.level != 0) {
9472                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9473                                         (tbl->obj, &tbl_data->jump.action);
9474                 if (ret) {
9475                         rte_flow_error_set(error, ENOMEM,
9476                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9477                                            NULL,
9478                                            "cannot create flow jump action");
9479                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9480                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9481                         return NULL;
9482                 }
9483         }
9484         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9485               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9486               key.level, key.id);
9487         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9488                              flow_dv_matcher_create_cb,
9489                              flow_dv_matcher_match_cb,
9490                              flow_dv_matcher_remove_cb);
9491         return &tbl_data->entry;
9492 }
9493
9494 int
9495 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9496                      struct mlx5_hlist_entry *entry, uint64_t key64,
9497                      void *cb_ctx __rte_unused)
9498 {
9499         struct mlx5_flow_tbl_data_entry *tbl_data =
9500                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9501         union mlx5_flow_tbl_key key = { .v64 = key64 };
9502
9503         return tbl_data->level != key.level ||
9504                tbl_data->id != key.id ||
9505                tbl_data->dummy != key.dummy ||
9506                tbl_data->is_transfer != !!key.is_fdb ||
9507                tbl_data->is_egress != !!key.is_egress;
9508 }
9509
9510 /**
9511  * Get a flow table.
9512  *
9513  * @param[in, out] dev
9514  *   Pointer to rte_eth_dev structure.
9515  * @param[in] table_level
9516  *   Table level to use.
9517  * @param[in] egress
9518  *   Direction of the table.
9519  * @param[in] transfer
9520  *   E-Switch or NIC flow.
9521  * @param[in] dummy
9522  *   Dummy entry for dv API.
9523  * @param[in] table_id
9524  *   Table id to use.
9525  * @param[out] error
9526  *   pointer to error structure.
9527  *
9528  * @return
9529  *   Returns tables resource based on the index, NULL in case of failed.
9530  */
9531 struct mlx5_flow_tbl_resource *
9532 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9533                          uint32_t table_level, uint8_t egress,
9534                          uint8_t transfer,
9535                          bool external,
9536                          const struct mlx5_flow_tunnel *tunnel,
9537                          uint32_t group_id, uint8_t dummy,
9538                          uint32_t table_id,
9539                          struct rte_flow_error *error)
9540 {
9541         struct mlx5_priv *priv = dev->data->dev_private;
9542         union mlx5_flow_tbl_key table_key = {
9543                 {
9544                         .level = table_level,
9545                         .id = table_id,
9546                         .reserved = 0,
9547                         .dummy = !!dummy,
9548                         .is_fdb = !!transfer,
9549                         .is_egress = !!egress,
9550                 }
9551         };
9552         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9553                 .tunnel = tunnel,
9554                 .group_id = group_id,
9555                 .external = external,
9556         };
9557         struct mlx5_flow_cb_ctx ctx = {
9558                 .dev = dev,
9559                 .error = error,
9560                 .data = &tt_prm,
9561         };
9562         struct mlx5_hlist_entry *entry;
9563         struct mlx5_flow_tbl_data_entry *tbl_data;
9564
9565         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9566         if (!entry) {
9567                 rte_flow_error_set(error, ENOMEM,
9568                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9569                                    "cannot get table");
9570                 return NULL;
9571         }
9572         DRV_LOG(DEBUG, "table_level %u table_id %u "
9573                 "tunnel %u group %u registered.",
9574                 table_level, table_id,
9575                 tunnel ? tunnel->tunnel_id : 0, group_id);
9576         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9577         return &tbl_data->tbl;
9578 }
9579
9580 void
9581 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
9582                       struct mlx5_hlist_entry *entry)
9583 {
9584         struct mlx5_dev_ctx_shared *sh = list->ctx;
9585         struct mlx5_flow_tbl_data_entry *tbl_data =
9586                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9587
9588         MLX5_ASSERT(entry && sh);
9589         if (tbl_data->jump.action)
9590                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
9591         if (tbl_data->tbl.obj)
9592                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
9593         if (tbl_data->tunnel_offload && tbl_data->external) {
9594                 struct mlx5_hlist_entry *he;
9595                 struct mlx5_hlist *tunnel_grp_hash;
9596                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
9597                 union tunnel_tbl_key tunnel_key = {
9598                         .tunnel_id = tbl_data->tunnel ?
9599                                         tbl_data->tunnel->tunnel_id : 0,
9600                         .group = tbl_data->group_id
9601                 };
9602                 uint32_t table_level = tbl_data->level;
9603
9604                 tunnel_grp_hash = tbl_data->tunnel ?
9605                                         tbl_data->tunnel->groups :
9606                                         thub->groups;
9607                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
9608                 if (he)
9609                         mlx5_hlist_unregister(tunnel_grp_hash, he);
9610                 DRV_LOG(DEBUG,
9611                         "table_level %u id %u tunnel %u group %u released.",
9612                         table_level,
9613                         tbl_data->id,
9614                         tbl_data->tunnel ?
9615                         tbl_data->tunnel->tunnel_id : 0,
9616                         tbl_data->group_id);
9617         }
9618         mlx5_cache_list_destroy(&tbl_data->matchers);
9619         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
9620 }
9621
9622 /**
9623  * Release a flow table.
9624  *
9625  * @param[in] sh
9626  *   Pointer to device shared structure.
9627  * @param[in] tbl
9628  *   Table resource to be released.
9629  *
9630  * @return
9631  *   Returns 0 if table was released, else return 1;
9632  */
9633 static int
9634 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
9635                              struct mlx5_flow_tbl_resource *tbl)
9636 {
9637         struct mlx5_flow_tbl_data_entry *tbl_data =
9638                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9639
9640         if (!tbl)
9641                 return 0;
9642         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
9643 }
9644
9645 int
9646 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
9647                          struct mlx5_cache_entry *entry, void *cb_ctx)
9648 {
9649         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9650         struct mlx5_flow_dv_matcher *ref = ctx->data;
9651         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
9652                                                         entry);
9653
9654         return cur->crc != ref->crc ||
9655                cur->priority != ref->priority ||
9656                memcmp((const void *)cur->mask.buf,
9657                       (const void *)ref->mask.buf, ref->mask.size);
9658 }
9659
9660 struct mlx5_cache_entry *
9661 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
9662                           struct mlx5_cache_entry *entry __rte_unused,
9663                           void *cb_ctx)
9664 {
9665         struct mlx5_dev_ctx_shared *sh = list->ctx;
9666         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9667         struct mlx5_flow_dv_matcher *ref = ctx->data;
9668         struct mlx5_flow_dv_matcher *cache;
9669         struct mlx5dv_flow_matcher_attr dv_attr = {
9670                 .type = IBV_FLOW_ATTR_NORMAL,
9671                 .match_mask = (void *)&ref->mask,
9672         };
9673         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
9674                                                             typeof(*tbl), tbl);
9675         int ret;
9676
9677         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
9678         if (!cache) {
9679                 rte_flow_error_set(ctx->error, ENOMEM,
9680                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9681                                    "cannot create matcher");
9682                 return NULL;
9683         }
9684         *cache = *ref;
9685         dv_attr.match_criteria_enable =
9686                 flow_dv_matcher_enable(cache->mask.buf);
9687         dv_attr.priority = ref->priority;
9688         if (tbl->is_egress)
9689                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
9690         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
9691                                                &cache->matcher_object);
9692         if (ret) {
9693                 mlx5_free(cache);
9694                 rte_flow_error_set(ctx->error, ENOMEM,
9695                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9696                                    "cannot create matcher");
9697                 return NULL;
9698         }
9699         return &cache->entry;
9700 }
9701
9702 /**
9703  * Register the flow matcher.
9704  *
9705  * @param[in, out] dev
9706  *   Pointer to rte_eth_dev structure.
9707  * @param[in, out] matcher
9708  *   Pointer to flow matcher.
9709  * @param[in, out] key
9710  *   Pointer to flow table key.
9711  * @parm[in, out] dev_flow
9712  *   Pointer to the dev_flow.
9713  * @param[out] error
9714  *   pointer to error structure.
9715  *
9716  * @return
9717  *   0 on success otherwise -errno and errno is set.
9718  */
9719 static int
9720 flow_dv_matcher_register(struct rte_eth_dev *dev,
9721                          struct mlx5_flow_dv_matcher *ref,
9722                          union mlx5_flow_tbl_key *key,
9723                          struct mlx5_flow *dev_flow,
9724                          const struct mlx5_flow_tunnel *tunnel,
9725                          uint32_t group_id,
9726                          struct rte_flow_error *error)
9727 {
9728         struct mlx5_cache_entry *entry;
9729         struct mlx5_flow_dv_matcher *cache;
9730         struct mlx5_flow_tbl_resource *tbl;
9731         struct mlx5_flow_tbl_data_entry *tbl_data;
9732         struct mlx5_flow_cb_ctx ctx = {
9733                 .error = error,
9734                 .data = ref,
9735         };
9736
9737         /**
9738          * tunnel offload API requires this registration for cases when
9739          * tunnel match rule was inserted before tunnel set rule.
9740          */
9741         tbl = flow_dv_tbl_resource_get(dev, key->level,
9742                                        key->is_egress, key->is_fdb,
9743                                        dev_flow->external, tunnel,
9744                                        group_id, 0, key->id, error);
9745         if (!tbl)
9746                 return -rte_errno;      /* No need to refill the error info */
9747         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
9748         ref->tbl = tbl;
9749         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
9750         if (!entry) {
9751                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9752                 return rte_flow_error_set(error, ENOMEM,
9753                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9754                                           "cannot allocate ref memory");
9755         }
9756         cache = container_of(entry, typeof(*cache), entry);
9757         dev_flow->handle->dvh.matcher = cache;
9758         return 0;
9759 }
9760
9761 struct mlx5_hlist_entry *
9762 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
9763 {
9764         struct mlx5_dev_ctx_shared *sh = list->ctx;
9765         struct rte_flow_error *error = ctx;
9766         struct mlx5_flow_dv_tag_resource *entry;
9767         uint32_t idx = 0;
9768         int ret;
9769
9770         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
9771         if (!entry) {
9772                 rte_flow_error_set(error, ENOMEM,
9773                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9774                                    "cannot allocate resource memory");
9775                 return NULL;
9776         }
9777         entry->idx = idx;
9778         entry->tag_id = key;
9779         ret = mlx5_flow_os_create_flow_action_tag(key,
9780                                                   &entry->action);
9781         if (ret) {
9782                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
9783                 rte_flow_error_set(error, ENOMEM,
9784                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9785                                    NULL, "cannot create action");
9786                 return NULL;
9787         }
9788         return &entry->entry;
9789 }
9790
9791 int
9792 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
9793                      struct mlx5_hlist_entry *entry, uint64_t key,
9794                      void *cb_ctx __rte_unused)
9795 {
9796         struct mlx5_flow_dv_tag_resource *tag =
9797                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9798
9799         return key != tag->tag_id;
9800 }
9801
9802 /**
9803  * Find existing tag resource or create and register a new one.
9804  *
9805  * @param dev[in, out]
9806  *   Pointer to rte_eth_dev structure.
9807  * @param[in, out] tag_be24
9808  *   Tag value in big endian then R-shift 8.
9809  * @parm[in, out] dev_flow
9810  *   Pointer to the dev_flow.
9811  * @param[out] error
9812  *   pointer to error structure.
9813  *
9814  * @return
9815  *   0 on success otherwise -errno and errno is set.
9816  */
9817 static int
9818 flow_dv_tag_resource_register
9819                         (struct rte_eth_dev *dev,
9820                          uint32_t tag_be24,
9821                          struct mlx5_flow *dev_flow,
9822                          struct rte_flow_error *error)
9823 {
9824         struct mlx5_priv *priv = dev->data->dev_private;
9825         struct mlx5_flow_dv_tag_resource *cache_resource;
9826         struct mlx5_hlist_entry *entry;
9827
9828         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
9829         if (entry) {
9830                 cache_resource = container_of
9831                         (entry, struct mlx5_flow_dv_tag_resource, entry);
9832                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
9833                 dev_flow->dv.tag_resource = cache_resource;
9834                 return 0;
9835         }
9836         return -rte_errno;
9837 }
9838
9839 void
9840 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
9841                       struct mlx5_hlist_entry *entry)
9842 {
9843         struct mlx5_dev_ctx_shared *sh = list->ctx;
9844         struct mlx5_flow_dv_tag_resource *tag =
9845                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
9846
9847         MLX5_ASSERT(tag && sh && tag->action);
9848         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
9849         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
9850         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
9851 }
9852
9853 /**
9854  * Release the tag.
9855  *
9856  * @param dev
9857  *   Pointer to Ethernet device.
9858  * @param tag_idx
9859  *   Tag index.
9860  *
9861  * @return
9862  *   1 while a reference on it exists, 0 when freed.
9863  */
9864 static int
9865 flow_dv_tag_release(struct rte_eth_dev *dev,
9866                     uint32_t tag_idx)
9867 {
9868         struct mlx5_priv *priv = dev->data->dev_private;
9869         struct mlx5_flow_dv_tag_resource *tag;
9870
9871         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
9872         if (!tag)
9873                 return 0;
9874         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
9875                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
9876         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
9877 }
9878
9879 /**
9880  * Translate port ID action to vport.
9881  *
9882  * @param[in] dev
9883  *   Pointer to rte_eth_dev structure.
9884  * @param[in] action
9885  *   Pointer to the port ID action.
9886  * @param[out] dst_port_id
9887  *   The target port ID.
9888  * @param[out] error
9889  *   Pointer to the error structure.
9890  *
9891  * @return
9892  *   0 on success, a negative errno value otherwise and rte_errno is set.
9893  */
9894 static int
9895 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
9896                                  const struct rte_flow_action *action,
9897                                  uint32_t *dst_port_id,
9898                                  struct rte_flow_error *error)
9899 {
9900         uint32_t port;
9901         struct mlx5_priv *priv;
9902         const struct rte_flow_action_port_id *conf =
9903                         (const struct rte_flow_action_port_id *)action->conf;
9904
9905         port = conf->original ? dev->data->port_id : conf->id;
9906         priv = mlx5_port_to_eswitch_info(port, false);
9907         if (!priv)
9908                 return rte_flow_error_set(error, -rte_errno,
9909                                           RTE_FLOW_ERROR_TYPE_ACTION,
9910                                           NULL,
9911                                           "No eswitch info was found for port");
9912 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
9913         /*
9914          * This parameter is transferred to
9915          * mlx5dv_dr_action_create_dest_ib_port().
9916          */
9917         *dst_port_id = priv->dev_port;
9918 #else
9919         /*
9920          * Legacy mode, no LAG configurations is supported.
9921          * This parameter is transferred to
9922          * mlx5dv_dr_action_create_dest_vport().
9923          */
9924         *dst_port_id = priv->vport_id;
9925 #endif
9926         return 0;
9927 }
9928
9929 /**
9930  * Create a counter with aging configuration.
9931  *
9932  * @param[in] dev
9933  *   Pointer to rte_eth_dev structure.
9934  * @param[out] count
9935  *   Pointer to the counter action configuration.
9936  * @param[in] age
9937  *   Pointer to the aging action configuration.
9938  *
9939  * @return
9940  *   Index to flow counter on success, 0 otherwise.
9941  */
9942 static uint32_t
9943 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
9944                                 struct mlx5_flow *dev_flow,
9945                                 const struct rte_flow_action_count *count,
9946                                 const struct rte_flow_action_age *age)
9947 {
9948         uint32_t counter;
9949         struct mlx5_age_param *age_param;
9950
9951         if (count && count->shared)
9952                 counter = flow_dv_counter_get_shared(dev, count->id);
9953         else
9954                 counter = flow_dv_counter_alloc(dev, !!age);
9955         if (!counter || age == NULL)
9956                 return counter;
9957         age_param  = flow_dv_counter_idx_get_age(dev, counter);
9958         age_param->context = age->context ? age->context :
9959                 (void *)(uintptr_t)(dev_flow->flow_idx);
9960         age_param->timeout = age->timeout;
9961         age_param->port_id = dev->data->port_id;
9962         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
9963         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
9964         return counter;
9965 }
9966
9967 /**
9968  * Add Tx queue matcher
9969  *
9970  * @param[in] dev
9971  *   Pointer to the dev struct.
9972  * @param[in, out] matcher
9973  *   Flow matcher.
9974  * @param[in, out] key
9975  *   Flow matcher value.
9976  * @param[in] item
9977  *   Flow pattern to translate.
9978  * @param[in] inner
9979  *   Item is inner pattern.
9980  */
9981 static void
9982 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
9983                                 void *matcher, void *key,
9984                                 const struct rte_flow_item *item)
9985 {
9986         const struct mlx5_rte_flow_item_tx_queue *queue_m;
9987         const struct mlx5_rte_flow_item_tx_queue *queue_v;
9988         void *misc_m =
9989                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9990         void *misc_v =
9991                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9992         struct mlx5_txq_ctrl *txq;
9993         uint32_t queue;
9994
9995
9996         queue_m = (const void *)item->mask;
9997         if (!queue_m)
9998                 return;
9999         queue_v = (const void *)item->spec;
10000         if (!queue_v)
10001                 return;
10002         txq = mlx5_txq_get(dev, queue_v->queue);
10003         if (!txq)
10004                 return;
10005         queue = txq->obj->sq->id;
10006         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10007         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10008                  queue & queue_m->queue);
10009         mlx5_txq_release(dev, queue_v->queue);
10010 }
10011
10012 /**
10013  * Set the hash fields according to the @p flow information.
10014  *
10015  * @param[in] dev_flow
10016  *   Pointer to the mlx5_flow.
10017  * @param[in] rss_desc
10018  *   Pointer to the mlx5_flow_rss_desc.
10019  */
10020 static void
10021 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10022                        struct mlx5_flow_rss_desc *rss_desc)
10023 {
10024         uint64_t items = dev_flow->handle->layers;
10025         int rss_inner = 0;
10026         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10027
10028         dev_flow->hash_fields = 0;
10029 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10030         if (rss_desc->level >= 2) {
10031                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10032                 rss_inner = 1;
10033         }
10034 #endif
10035         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10036             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10037                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10038                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10039                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10040                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10041                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10042                         else
10043                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10044                 }
10045         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10046                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10047                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10048                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10049                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10050                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10051                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10052                         else
10053                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10054                 }
10055         }
10056         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10057             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10058                 if (rss_types & ETH_RSS_UDP) {
10059                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10060                                 dev_flow->hash_fields |=
10061                                                 IBV_RX_HASH_SRC_PORT_UDP;
10062                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10063                                 dev_flow->hash_fields |=
10064                                                 IBV_RX_HASH_DST_PORT_UDP;
10065                         else
10066                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10067                 }
10068         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10069                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10070                 if (rss_types & ETH_RSS_TCP) {
10071                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10072                                 dev_flow->hash_fields |=
10073                                                 IBV_RX_HASH_SRC_PORT_TCP;
10074                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10075                                 dev_flow->hash_fields |=
10076                                                 IBV_RX_HASH_DST_PORT_TCP;
10077                         else
10078                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10079                 }
10080         }
10081 }
10082
10083 /**
10084  * Prepare an Rx Hash queue.
10085  *
10086  * @param dev
10087  *   Pointer to Ethernet device.
10088  * @param[in] dev_flow
10089  *   Pointer to the mlx5_flow.
10090  * @param[in] rss_desc
10091  *   Pointer to the mlx5_flow_rss_desc.
10092  * @param[out] hrxq_idx
10093  *   Hash Rx queue index.
10094  *
10095  * @return
10096  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10097  */
10098 static struct mlx5_hrxq *
10099 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10100                      struct mlx5_flow *dev_flow,
10101                      struct mlx5_flow_rss_desc *rss_desc,
10102                      uint32_t *hrxq_idx)
10103 {
10104         struct mlx5_priv *priv = dev->data->dev_private;
10105         struct mlx5_flow_handle *dh = dev_flow->handle;
10106         struct mlx5_hrxq *hrxq;
10107
10108         MLX5_ASSERT(rss_desc->queue_num);
10109         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10110         rss_desc->hash_fields = dev_flow->hash_fields;
10111         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10112         rss_desc->shared_rss = 0;
10113         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10114         if (!*hrxq_idx)
10115                 return NULL;
10116         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10117                               *hrxq_idx);
10118         return hrxq;
10119 }
10120
10121 /**
10122  * Release sample sub action resource.
10123  *
10124  * @param[in, out] dev
10125  *   Pointer to rte_eth_dev structure.
10126  * @param[in] act_res
10127  *   Pointer to sample sub action resource.
10128  */
10129 static void
10130 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10131                                    struct mlx5_flow_sub_actions_idx *act_res)
10132 {
10133         if (act_res->rix_hrxq) {
10134                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10135                 act_res->rix_hrxq = 0;
10136         }
10137         if (act_res->rix_encap_decap) {
10138                 flow_dv_encap_decap_resource_release(dev,
10139                                                      act_res->rix_encap_decap);
10140                 act_res->rix_encap_decap = 0;
10141         }
10142         if (act_res->rix_port_id_action) {
10143                 flow_dv_port_id_action_resource_release(dev,
10144                                                 act_res->rix_port_id_action);
10145                 act_res->rix_port_id_action = 0;
10146         }
10147         if (act_res->rix_tag) {
10148                 flow_dv_tag_release(dev, act_res->rix_tag);
10149                 act_res->rix_tag = 0;
10150         }
10151         if (act_res->rix_jump) {
10152                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10153                 act_res->rix_jump = 0;
10154         }
10155 }
10156
10157 int
10158 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10159                         struct mlx5_cache_entry *entry, void *cb_ctx)
10160 {
10161         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10162         struct rte_eth_dev *dev = ctx->dev;
10163         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10164         struct mlx5_flow_dv_sample_resource *cache_resource =
10165                         container_of(entry, typeof(*cache_resource), entry);
10166
10167         if (resource->ratio == cache_resource->ratio &&
10168             resource->ft_type == cache_resource->ft_type &&
10169             resource->ft_id == cache_resource->ft_id &&
10170             resource->set_action == cache_resource->set_action &&
10171             !memcmp((void *)&resource->sample_act,
10172                     (void *)&cache_resource->sample_act,
10173                     sizeof(struct mlx5_flow_sub_actions_list))) {
10174                 /*
10175                  * Existing sample action should release the prepared
10176                  * sub-actions reference counter.
10177                  */
10178                 flow_dv_sample_sub_actions_release(dev,
10179                                                 &resource->sample_idx);
10180                 return 0;
10181         }
10182         return 1;
10183 }
10184
10185 struct mlx5_cache_entry *
10186 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10187                          struct mlx5_cache_entry *entry __rte_unused,
10188                          void *cb_ctx)
10189 {
10190         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10191         struct rte_eth_dev *dev = ctx->dev;
10192         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10193         void **sample_dv_actions = resource->sub_actions;
10194         struct mlx5_flow_dv_sample_resource *cache_resource;
10195         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10196         struct mlx5_priv *priv = dev->data->dev_private;
10197         struct mlx5_dev_ctx_shared *sh = priv->sh;
10198         struct mlx5_flow_tbl_resource *tbl;
10199         uint32_t idx = 0;
10200         const uint32_t next_ft_step = 1;
10201         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10202         uint8_t is_egress = 0;
10203         uint8_t is_transfer = 0;
10204         struct rte_flow_error *error = ctx->error;
10205
10206         /* Register new sample resource. */
10207         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10208         if (!cache_resource) {
10209                 rte_flow_error_set(error, ENOMEM,
10210                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10211                                           NULL,
10212                                           "cannot allocate resource memory");
10213                 return NULL;
10214         }
10215         *cache_resource = *resource;
10216         /* Create normal path table level */
10217         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10218                 is_transfer = 1;
10219         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10220                 is_egress = 1;
10221         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10222                                         is_egress, is_transfer,
10223                                         true, NULL, 0, 0, 0, error);
10224         if (!tbl) {
10225                 rte_flow_error_set(error, ENOMEM,
10226                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10227                                           NULL,
10228                                           "fail to create normal path table "
10229                                           "for sample");
10230                 goto error;
10231         }
10232         cache_resource->normal_path_tbl = tbl;
10233         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10234                 if (!sh->default_miss_action) {
10235                         rte_flow_error_set(error, ENOMEM,
10236                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10237                                                 NULL,
10238                                                 "default miss action was not "
10239                                                 "created");
10240                         goto error;
10241                 }
10242                 sample_dv_actions[resource->sample_act.actions_num++] =
10243                                                 sh->default_miss_action;
10244         }
10245         /* Create a DR sample action */
10246         sampler_attr.sample_ratio = cache_resource->ratio;
10247         sampler_attr.default_next_table = tbl->obj;
10248         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10249         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10250                                                         &sample_dv_actions[0];
10251         sampler_attr.action = cache_resource->set_action;
10252         if (mlx5_os_flow_dr_create_flow_action_sampler
10253                         (&sampler_attr, &cache_resource->verbs_action)) {
10254                 rte_flow_error_set(error, ENOMEM,
10255                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10256                                         NULL, "cannot create sample action");
10257                 goto error;
10258         }
10259         cache_resource->idx = idx;
10260         cache_resource->dev = dev;
10261         return &cache_resource->entry;
10262 error:
10263         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10264                 flow_dv_sample_sub_actions_release(dev,
10265                                                    &cache_resource->sample_idx);
10266         if (cache_resource->normal_path_tbl)
10267                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10268                                 cache_resource->normal_path_tbl);
10269         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10270         return NULL;
10271
10272 }
10273
10274 /**
10275  * Find existing sample resource or create and register a new one.
10276  *
10277  * @param[in, out] dev
10278  *   Pointer to rte_eth_dev structure.
10279  * @param[in] resource
10280  *   Pointer to sample resource.
10281  * @parm[in, out] dev_flow
10282  *   Pointer to the dev_flow.
10283  * @param[out] error
10284  *   pointer to error structure.
10285  *
10286  * @return
10287  *   0 on success otherwise -errno and errno is set.
10288  */
10289 static int
10290 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10291                          struct mlx5_flow_dv_sample_resource *resource,
10292                          struct mlx5_flow *dev_flow,
10293                          struct rte_flow_error *error)
10294 {
10295         struct mlx5_flow_dv_sample_resource *cache_resource;
10296         struct mlx5_cache_entry *entry;
10297         struct mlx5_priv *priv = dev->data->dev_private;
10298         struct mlx5_flow_cb_ctx ctx = {
10299                 .dev = dev,
10300                 .error = error,
10301                 .data = resource,
10302         };
10303
10304         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10305         if (!entry)
10306                 return -rte_errno;
10307         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10308         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10309         dev_flow->dv.sample_res = cache_resource;
10310         return 0;
10311 }
10312
10313 int
10314 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10315                             struct mlx5_cache_entry *entry, void *cb_ctx)
10316 {
10317         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10318         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10319         struct rte_eth_dev *dev = ctx->dev;
10320         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10321                         container_of(entry, typeof(*cache_resource), entry);
10322         uint32_t idx = 0;
10323
10324         if (resource->num_of_dest == cache_resource->num_of_dest &&
10325             resource->ft_type == cache_resource->ft_type &&
10326             !memcmp((void *)cache_resource->sample_act,
10327                     (void *)resource->sample_act,
10328                    (resource->num_of_dest *
10329                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10330                 /*
10331                  * Existing sample action should release the prepared
10332                  * sub-actions reference counter.
10333                  */
10334                 for (idx = 0; idx < resource->num_of_dest; idx++)
10335                         flow_dv_sample_sub_actions_release(dev,
10336                                         &resource->sample_idx[idx]);
10337                 return 0;
10338         }
10339         return 1;
10340 }
10341
10342 struct mlx5_cache_entry *
10343 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10344                          struct mlx5_cache_entry *entry __rte_unused,
10345                          void *cb_ctx)
10346 {
10347         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10348         struct rte_eth_dev *dev = ctx->dev;
10349         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10350         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10351         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10352         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10353         struct mlx5_priv *priv = dev->data->dev_private;
10354         struct mlx5_dev_ctx_shared *sh = priv->sh;
10355         struct mlx5_flow_sub_actions_list *sample_act;
10356         struct mlx5dv_dr_domain *domain;
10357         uint32_t idx = 0, res_idx = 0;
10358         struct rte_flow_error *error = ctx->error;
10359         uint64_t action_flags;
10360         int ret;
10361
10362         /* Register new destination array resource. */
10363         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10364                                             &res_idx);
10365         if (!cache_resource) {
10366                 rte_flow_error_set(error, ENOMEM,
10367                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10368                                           NULL,
10369                                           "cannot allocate resource memory");
10370                 return NULL;
10371         }
10372         *cache_resource = *resource;
10373         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10374                 domain = sh->fdb_domain;
10375         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10376                 domain = sh->rx_domain;
10377         else
10378                 domain = sh->tx_domain;
10379         for (idx = 0; idx < resource->num_of_dest; idx++) {
10380                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10381                                  mlx5_malloc(MLX5_MEM_ZERO,
10382                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10383                                  0, SOCKET_ID_ANY);
10384                 if (!dest_attr[idx]) {
10385                         rte_flow_error_set(error, ENOMEM,
10386                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10387                                            NULL,
10388                                            "cannot allocate resource memory");
10389                         goto error;
10390                 }
10391                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10392                 sample_act = &resource->sample_act[idx];
10393                 action_flags = sample_act->action_flags;
10394                 switch (action_flags) {
10395                 case MLX5_FLOW_ACTION_QUEUE:
10396                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10397                         break;
10398                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10399                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10400                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10401                         dest_attr[idx]->dest_reformat->reformat =
10402                                         sample_act->dr_encap_action;
10403                         dest_attr[idx]->dest_reformat->dest =
10404                                         sample_act->dr_port_id_action;
10405                         break;
10406                 case MLX5_FLOW_ACTION_PORT_ID:
10407                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10408                         break;
10409                 case MLX5_FLOW_ACTION_JUMP:
10410                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10411                         break;
10412                 default:
10413                         rte_flow_error_set(error, EINVAL,
10414                                            RTE_FLOW_ERROR_TYPE_ACTION,
10415                                            NULL,
10416                                            "unsupported actions type");
10417                         goto error;
10418                 }
10419         }
10420         /* create a dest array actioin */
10421         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10422                                                 (domain,
10423                                                  cache_resource->num_of_dest,
10424                                                  dest_attr,
10425                                                  &cache_resource->action);
10426         if (ret) {
10427                 rte_flow_error_set(error, ENOMEM,
10428                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10429                                    NULL,
10430                                    "cannot create destination array action");
10431                 goto error;
10432         }
10433         cache_resource->idx = res_idx;
10434         cache_resource->dev = dev;
10435         for (idx = 0; idx < resource->num_of_dest; idx++)
10436                 mlx5_free(dest_attr[idx]);
10437         return &cache_resource->entry;
10438 error:
10439         for (idx = 0; idx < resource->num_of_dest; idx++) {
10440                 flow_dv_sample_sub_actions_release(dev,
10441                                 &cache_resource->sample_idx[idx]);
10442                 if (dest_attr[idx])
10443                         mlx5_free(dest_attr[idx]);
10444         }
10445
10446         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10447         return NULL;
10448 }
10449
10450 /**
10451  * Find existing destination array resource or create and register a new one.
10452  *
10453  * @param[in, out] dev
10454  *   Pointer to rte_eth_dev structure.
10455  * @param[in] resource
10456  *   Pointer to destination array resource.
10457  * @parm[in, out] dev_flow
10458  *   Pointer to the dev_flow.
10459  * @param[out] error
10460  *   pointer to error structure.
10461  *
10462  * @return
10463  *   0 on success otherwise -errno and errno is set.
10464  */
10465 static int
10466 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10467                          struct mlx5_flow_dv_dest_array_resource *resource,
10468                          struct mlx5_flow *dev_flow,
10469                          struct rte_flow_error *error)
10470 {
10471         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10472         struct mlx5_priv *priv = dev->data->dev_private;
10473         struct mlx5_cache_entry *entry;
10474         struct mlx5_flow_cb_ctx ctx = {
10475                 .dev = dev,
10476                 .error = error,
10477                 .data = resource,
10478         };
10479
10480         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10481         if (!entry)
10482                 return -rte_errno;
10483         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10484         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10485         dev_flow->dv.dest_array_res = cache_resource;
10486         return 0;
10487 }
10488
10489 /**
10490  * Convert Sample action to DV specification.
10491  *
10492  * @param[in] dev
10493  *   Pointer to rte_eth_dev structure.
10494  * @param[in] action
10495  *   Pointer to sample action structure.
10496  * @param[in, out] dev_flow
10497  *   Pointer to the mlx5_flow.
10498  * @param[in] attr
10499  *   Pointer to the flow attributes.
10500  * @param[in, out] num_of_dest
10501  *   Pointer to the num of destination.
10502  * @param[in, out] sample_actions
10503  *   Pointer to sample actions list.
10504  * @param[in, out] res
10505  *   Pointer to sample resource.
10506  * @param[out] error
10507  *   Pointer to the error structure.
10508  *
10509  * @return
10510  *   0 on success, a negative errno value otherwise and rte_errno is set.
10511  */
10512 static int
10513 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10514                                 const struct rte_flow_action_sample *action,
10515                                 struct mlx5_flow *dev_flow,
10516                                 const struct rte_flow_attr *attr,
10517                                 uint32_t *num_of_dest,
10518                                 void **sample_actions,
10519                                 struct mlx5_flow_dv_sample_resource *res,
10520                                 struct rte_flow_error *error)
10521 {
10522         struct mlx5_priv *priv = dev->data->dev_private;
10523         const struct rte_flow_action *sub_actions;
10524         struct mlx5_flow_sub_actions_list *sample_act;
10525         struct mlx5_flow_sub_actions_idx *sample_idx;
10526         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10527         struct rte_flow *flow = dev_flow->flow;
10528         struct mlx5_flow_rss_desc *rss_desc;
10529         uint64_t action_flags = 0;
10530
10531         MLX5_ASSERT(wks);
10532         rss_desc = &wks->rss_desc;
10533         sample_act = &res->sample_act;
10534         sample_idx = &res->sample_idx;
10535         res->ratio = action->ratio;
10536         sub_actions = action->actions;
10537         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10538                 int type = sub_actions->type;
10539                 uint32_t pre_rix = 0;
10540                 void *pre_r;
10541                 switch (type) {
10542                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10543                 {
10544                         const struct rte_flow_action_queue *queue;
10545                         struct mlx5_hrxq *hrxq;
10546                         uint32_t hrxq_idx;
10547
10548                         queue = sub_actions->conf;
10549                         rss_desc->queue_num = 1;
10550                         rss_desc->queue[0] = queue->index;
10551                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10552                                                     rss_desc, &hrxq_idx);
10553                         if (!hrxq)
10554                                 return rte_flow_error_set
10555                                         (error, rte_errno,
10556                                          RTE_FLOW_ERROR_TYPE_ACTION,
10557                                          NULL,
10558                                          "cannot create fate queue");
10559                         sample_act->dr_queue_action = hrxq->action;
10560                         sample_idx->rix_hrxq = hrxq_idx;
10561                         sample_actions[sample_act->actions_num++] =
10562                                                 hrxq->action;
10563                         (*num_of_dest)++;
10564                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10565                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10566                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10567                         dev_flow->handle->fate_action =
10568                                         MLX5_FLOW_FATE_QUEUE;
10569                         break;
10570                 }
10571                 case RTE_FLOW_ACTION_TYPE_RSS:
10572                 {
10573                         struct mlx5_hrxq *hrxq;
10574                         uint32_t hrxq_idx;
10575                         const struct rte_flow_action_rss *rss;
10576                         const uint8_t *rss_key;
10577
10578                         rss = sub_actions->conf;
10579                         memcpy(rss_desc->queue, rss->queue,
10580                                rss->queue_num * sizeof(uint16_t));
10581                         rss_desc->queue_num = rss->queue_num;
10582                         /* NULL RSS key indicates default RSS key. */
10583                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10584                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10585                         /*
10586                          * rss->level and rss.types should be set in advance
10587                          * when expanding items for RSS.
10588                          */
10589                         flow_dv_hashfields_set(dev_flow, rss_desc);
10590                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10591                                                     rss_desc, &hrxq_idx);
10592                         if (!hrxq)
10593                                 return rte_flow_error_set
10594                                         (error, rte_errno,
10595                                          RTE_FLOW_ERROR_TYPE_ACTION,
10596                                          NULL,
10597                                          "cannot create fate queue");
10598                         sample_act->dr_queue_action = hrxq->action;
10599                         sample_idx->rix_hrxq = hrxq_idx;
10600                         sample_actions[sample_act->actions_num++] =
10601                                                 hrxq->action;
10602                         (*num_of_dest)++;
10603                         action_flags |= MLX5_FLOW_ACTION_RSS;
10604                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10605                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10606                         dev_flow->handle->fate_action =
10607                                         MLX5_FLOW_FATE_QUEUE;
10608                         break;
10609                 }
10610                 case RTE_FLOW_ACTION_TYPE_MARK:
10611                 {
10612                         uint32_t tag_be = mlx5_flow_mark_set
10613                                 (((const struct rte_flow_action_mark *)
10614                                 (sub_actions->conf))->id);
10615
10616                         dev_flow->handle->mark = 1;
10617                         pre_rix = dev_flow->handle->dvh.rix_tag;
10618                         /* Save the mark resource before sample */
10619                         pre_r = dev_flow->dv.tag_resource;
10620                         if (flow_dv_tag_resource_register(dev, tag_be,
10621                                                   dev_flow, error))
10622                                 return -rte_errno;
10623                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10624                         sample_act->dr_tag_action =
10625                                 dev_flow->dv.tag_resource->action;
10626                         sample_idx->rix_tag =
10627                                 dev_flow->handle->dvh.rix_tag;
10628                         sample_actions[sample_act->actions_num++] =
10629                                                 sample_act->dr_tag_action;
10630                         /* Recover the mark resource after sample */
10631                         dev_flow->dv.tag_resource = pre_r;
10632                         dev_flow->handle->dvh.rix_tag = pre_rix;
10633                         action_flags |= MLX5_FLOW_ACTION_MARK;
10634                         break;
10635                 }
10636                 case RTE_FLOW_ACTION_TYPE_COUNT:
10637                 {
10638                         if (!flow->counter) {
10639                                 flow->counter =
10640                                         flow_dv_translate_create_counter(dev,
10641                                                 dev_flow, sub_actions->conf,
10642                                                 0);
10643                                 if (!flow->counter)
10644                                         return rte_flow_error_set
10645                                                 (error, rte_errno,
10646                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10647                                                 NULL,
10648                                                 "cannot create counter"
10649                                                 " object.");
10650                         }
10651                         sample_act->dr_cnt_action =
10652                                   (flow_dv_counter_get_by_idx(dev,
10653                                   flow->counter, NULL))->action;
10654                         sample_actions[sample_act->actions_num++] =
10655                                                 sample_act->dr_cnt_action;
10656                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10657                         break;
10658                 }
10659                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10660                 {
10661                         struct mlx5_flow_dv_port_id_action_resource
10662                                         port_id_resource;
10663                         uint32_t port_id = 0;
10664
10665                         memset(&port_id_resource, 0, sizeof(port_id_resource));
10666                         /* Save the port id resource before sample */
10667                         pre_rix = dev_flow->handle->rix_port_id_action;
10668                         pre_r = dev_flow->dv.port_id_action;
10669                         if (flow_dv_translate_action_port_id(dev, sub_actions,
10670                                                              &port_id, error))
10671                                 return -rte_errno;
10672                         port_id_resource.port_id = port_id;
10673                         if (flow_dv_port_id_action_resource_register
10674                             (dev, &port_id_resource, dev_flow, error))
10675                                 return -rte_errno;
10676                         sample_act->dr_port_id_action =
10677                                 dev_flow->dv.port_id_action->action;
10678                         sample_idx->rix_port_id_action =
10679                                 dev_flow->handle->rix_port_id_action;
10680                         sample_actions[sample_act->actions_num++] =
10681                                                 sample_act->dr_port_id_action;
10682                         /* Recover the port id resource after sample */
10683                         dev_flow->dv.port_id_action = pre_r;
10684                         dev_flow->handle->rix_port_id_action = pre_rix;
10685                         (*num_of_dest)++;
10686                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10687                         break;
10688                 }
10689                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10690                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10691                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10692                         /* Save the encap resource before sample */
10693                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
10694                         pre_r = dev_flow->dv.encap_decap;
10695                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
10696                                                            dev_flow,
10697                                                            attr->transfer,
10698                                                            error))
10699                                 return -rte_errno;
10700                         sample_act->dr_encap_action =
10701                                 dev_flow->dv.encap_decap->action;
10702                         sample_idx->rix_encap_decap =
10703                                 dev_flow->handle->dvh.rix_encap_decap;
10704                         sample_actions[sample_act->actions_num++] =
10705                                                 sample_act->dr_encap_action;
10706                         /* Recover the encap resource after sample */
10707                         dev_flow->dv.encap_decap = pre_r;
10708                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
10709                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10710                         break;
10711                 default:
10712                         return rte_flow_error_set(error, EINVAL,
10713                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10714                                 NULL,
10715                                 "Not support for sampler action");
10716                 }
10717         }
10718         sample_act->action_flags = action_flags;
10719         res->ft_id = dev_flow->dv.group;
10720         if (attr->transfer) {
10721                 union {
10722                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
10723                         uint64_t set_action;
10724                 } action_ctx = { .set_action = 0 };
10725
10726                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10727                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
10728                          MLX5_MODIFICATION_TYPE_SET);
10729                 MLX5_SET(set_action_in, action_ctx.action_in, field,
10730                          MLX5_MODI_META_REG_C_0);
10731                 MLX5_SET(set_action_in, action_ctx.action_in, data,
10732                          priv->vport_meta_tag);
10733                 res->set_action = action_ctx.set_action;
10734         } else if (attr->ingress) {
10735                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10736         } else {
10737                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
10738         }
10739         return 0;
10740 }
10741
10742 /**
10743  * Convert Sample action to DV specification.
10744  *
10745  * @param[in] dev
10746  *   Pointer to rte_eth_dev structure.
10747  * @param[in, out] dev_flow
10748  *   Pointer to the mlx5_flow.
10749  * @param[in] num_of_dest
10750  *   The num of destination.
10751  * @param[in, out] res
10752  *   Pointer to sample resource.
10753  * @param[in, out] mdest_res
10754  *   Pointer to destination array resource.
10755  * @param[in] sample_actions
10756  *   Pointer to sample path actions list.
10757  * @param[in] action_flags
10758  *   Holds the actions detected until now.
10759  * @param[out] error
10760  *   Pointer to the error structure.
10761  *
10762  * @return
10763  *   0 on success, a negative errno value otherwise and rte_errno is set.
10764  */
10765 static int
10766 flow_dv_create_action_sample(struct rte_eth_dev *dev,
10767                              struct mlx5_flow *dev_flow,
10768                              uint32_t num_of_dest,
10769                              struct mlx5_flow_dv_sample_resource *res,
10770                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
10771                              void **sample_actions,
10772                              uint64_t action_flags,
10773                              struct rte_flow_error *error)
10774 {
10775         /* update normal path action resource into last index of array */
10776         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
10777         struct mlx5_flow_sub_actions_list *sample_act =
10778                                         &mdest_res->sample_act[dest_index];
10779         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10780         struct mlx5_flow_rss_desc *rss_desc;
10781         uint32_t normal_idx = 0;
10782         struct mlx5_hrxq *hrxq;
10783         uint32_t hrxq_idx;
10784
10785         MLX5_ASSERT(wks);
10786         rss_desc = &wks->rss_desc;
10787         if (num_of_dest > 1) {
10788                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
10789                         /* Handle QP action for mirroring */
10790                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10791                                                     rss_desc, &hrxq_idx);
10792                         if (!hrxq)
10793                                 return rte_flow_error_set
10794                                      (error, rte_errno,
10795                                       RTE_FLOW_ERROR_TYPE_ACTION,
10796                                       NULL,
10797                                       "cannot create rx queue");
10798                         normal_idx++;
10799                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
10800                         sample_act->dr_queue_action = hrxq->action;
10801                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10802                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10803                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10804                 }
10805                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
10806                         normal_idx++;
10807                         mdest_res->sample_idx[dest_index].rix_encap_decap =
10808                                 dev_flow->handle->dvh.rix_encap_decap;
10809                         sample_act->dr_encap_action =
10810                                 dev_flow->dv.encap_decap->action;
10811                         dev_flow->handle->dvh.rix_encap_decap = 0;
10812                 }
10813                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
10814                         normal_idx++;
10815                         mdest_res->sample_idx[dest_index].rix_port_id_action =
10816                                 dev_flow->handle->rix_port_id_action;
10817                         sample_act->dr_port_id_action =
10818                                 dev_flow->dv.port_id_action->action;
10819                         dev_flow->handle->rix_port_id_action = 0;
10820                 }
10821                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
10822                         normal_idx++;
10823                         mdest_res->sample_idx[dest_index].rix_jump =
10824                                 dev_flow->handle->rix_jump;
10825                         sample_act->dr_jump_action =
10826                                 dev_flow->dv.jump->action;
10827                         dev_flow->handle->rix_jump = 0;
10828                 }
10829                 sample_act->actions_num = normal_idx;
10830                 /* update sample action resource into first index of array */
10831                 mdest_res->ft_type = res->ft_type;
10832                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
10833                                 sizeof(struct mlx5_flow_sub_actions_idx));
10834                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
10835                                 sizeof(struct mlx5_flow_sub_actions_list));
10836                 mdest_res->num_of_dest = num_of_dest;
10837                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
10838                                                          dev_flow, error))
10839                         return rte_flow_error_set(error, EINVAL,
10840                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10841                                                   NULL, "can't create sample "
10842                                                   "action");
10843         } else {
10844                 res->sub_actions = sample_actions;
10845                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
10846                         return rte_flow_error_set(error, EINVAL,
10847                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10848                                                   NULL,
10849                                                   "can't create sample action");
10850         }
10851         return 0;
10852 }
10853
10854 /**
10855  * Remove an ASO age action from age actions list.
10856  *
10857  * @param[in] dev
10858  *   Pointer to the Ethernet device structure.
10859  * @param[in] age
10860  *   Pointer to the aso age action handler.
10861  */
10862 static void
10863 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
10864                                 struct mlx5_aso_age_action *age)
10865 {
10866         struct mlx5_age_info *age_info;
10867         struct mlx5_age_param *age_param = &age->age_params;
10868         struct mlx5_priv *priv = dev->data->dev_private;
10869         uint16_t expected = AGE_CANDIDATE;
10870
10871         age_info = GET_PORT_AGE_INFO(priv);
10872         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
10873                                          AGE_FREE, false, __ATOMIC_RELAXED,
10874                                          __ATOMIC_RELAXED)) {
10875                 /**
10876                  * We need the lock even it is age timeout,
10877                  * since age action may still in process.
10878                  */
10879                 rte_spinlock_lock(&age_info->aged_sl);
10880                 LIST_REMOVE(age, next);
10881                 rte_spinlock_unlock(&age_info->aged_sl);
10882                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
10883         }
10884 }
10885
10886 /**
10887  * Release an ASO age action.
10888  *
10889  * @param[in] dev
10890  *   Pointer to the Ethernet device structure.
10891  * @param[in] age_idx
10892  *   Index of ASO age action to release.
10893  * @param[in] flow
10894  *   True if the release operation is during flow destroy operation.
10895  *   False if the release operation is during action destroy operation.
10896  *
10897  * @return
10898  *   0 when age action was removed, otherwise the number of references.
10899  */
10900 static int
10901 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
10902 {
10903         struct mlx5_priv *priv = dev->data->dev_private;
10904         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10905         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
10906         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
10907
10908         if (!ret) {
10909                 flow_dv_aso_age_remove_from_age(dev, age);
10910                 rte_spinlock_lock(&mng->free_sl);
10911                 LIST_INSERT_HEAD(&mng->free, age, next);
10912                 rte_spinlock_unlock(&mng->free_sl);
10913         }
10914         return ret;
10915 }
10916
10917 /**
10918  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
10919  *
10920  * @param[in] dev
10921  *   Pointer to the Ethernet device structure.
10922  *
10923  * @return
10924  *   0 on success, otherwise negative errno value and rte_errno is set.
10925  */
10926 static int
10927 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
10928 {
10929         struct mlx5_priv *priv = dev->data->dev_private;
10930         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10931         void *old_pools = mng->pools;
10932         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
10933         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
10934         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
10935
10936         if (!pools) {
10937                 rte_errno = ENOMEM;
10938                 return -ENOMEM;
10939         }
10940         if (old_pools) {
10941                 memcpy(pools, old_pools,
10942                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
10943                 mlx5_free(old_pools);
10944         } else {
10945                 /* First ASO flow hit allocation - starting ASO data-path. */
10946                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
10947
10948                 if (ret) {
10949                         mlx5_free(pools);
10950                         return ret;
10951                 }
10952         }
10953         mng->n = resize;
10954         mng->pools = pools;
10955         return 0;
10956 }
10957
10958 /**
10959  * Create and initialize a new ASO aging pool.
10960  *
10961  * @param[in] dev
10962  *   Pointer to the Ethernet device structure.
10963  * @param[out] age_free
10964  *   Where to put the pointer of a new age action.
10965  *
10966  * @return
10967  *   The age actions pool pointer and @p age_free is set on success,
10968  *   NULL otherwise and rte_errno is set.
10969  */
10970 static struct mlx5_aso_age_pool *
10971 flow_dv_age_pool_create(struct rte_eth_dev *dev,
10972                         struct mlx5_aso_age_action **age_free)
10973 {
10974         struct mlx5_priv *priv = dev->data->dev_private;
10975         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
10976         struct mlx5_aso_age_pool *pool = NULL;
10977         struct mlx5_devx_obj *obj = NULL;
10978         uint32_t i;
10979
10980         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
10981                                                     priv->sh->pdn);
10982         if (!obj) {
10983                 rte_errno = ENODATA;
10984                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
10985                 return NULL;
10986         }
10987         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10988         if (!pool) {
10989                 claim_zero(mlx5_devx_cmd_destroy(obj));
10990                 rte_errno = ENOMEM;
10991                 return NULL;
10992         }
10993         pool->flow_hit_aso_obj = obj;
10994         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
10995         rte_spinlock_lock(&mng->resize_sl);
10996         pool->index = mng->next;
10997         /* Resize pools array if there is no room for the new pool in it. */
10998         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
10999                 claim_zero(mlx5_devx_cmd_destroy(obj));
11000                 mlx5_free(pool);
11001                 rte_spinlock_unlock(&mng->resize_sl);
11002                 return NULL;
11003         }
11004         mng->pools[pool->index] = pool;
11005         mng->next++;
11006         rte_spinlock_unlock(&mng->resize_sl);
11007         /* Assign the first action in the new pool, the rest go to free list. */
11008         *age_free = &pool->actions[0];
11009         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11010                 pool->actions[i].offset = i;
11011                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11012         }
11013         return pool;
11014 }
11015
11016 /**
11017  * Allocate a ASO aging bit.
11018  *
11019  * @param[in] dev
11020  *   Pointer to the Ethernet device structure.
11021  * @param[out] error
11022  *   Pointer to the error structure.
11023  *
11024  * @return
11025  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11026  */
11027 static uint32_t
11028 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11029 {
11030         struct mlx5_priv *priv = dev->data->dev_private;
11031         const struct mlx5_aso_age_pool *pool;
11032         struct mlx5_aso_age_action *age_free = NULL;
11033         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11034
11035         MLX5_ASSERT(mng);
11036         /* Try to get the next free age action bit. */
11037         rte_spinlock_lock(&mng->free_sl);
11038         age_free = LIST_FIRST(&mng->free);
11039         if (age_free) {
11040                 LIST_REMOVE(age_free, next);
11041         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11042                 rte_spinlock_unlock(&mng->free_sl);
11043                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11044                                    NULL, "failed to create ASO age pool");
11045                 return 0; /* 0 is an error. */
11046         }
11047         rte_spinlock_unlock(&mng->free_sl);
11048         pool = container_of
11049           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11050                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11051                                                                        actions);
11052         if (!age_free->dr_action) {
11053                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11054                                                  error);
11055
11056                 if (reg_c < 0) {
11057                         rte_flow_error_set(error, rte_errno,
11058                                            RTE_FLOW_ERROR_TYPE_ACTION,
11059                                            NULL, "failed to get reg_c "
11060                                            "for ASO flow hit");
11061                         return 0; /* 0 is an error. */
11062                 }
11063 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11064                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11065                                 (priv->sh->rx_domain,
11066                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11067                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11068                                  (reg_c - REG_C_0));
11069 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11070                 if (!age_free->dr_action) {
11071                         rte_errno = errno;
11072                         rte_spinlock_lock(&mng->free_sl);
11073                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11074                         rte_spinlock_unlock(&mng->free_sl);
11075                         rte_flow_error_set(error, rte_errno,
11076                                            RTE_FLOW_ERROR_TYPE_ACTION,
11077                                            NULL, "failed to create ASO "
11078                                            "flow hit action");
11079                         return 0; /* 0 is an error. */
11080                 }
11081         }
11082         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11083         return pool->index | ((age_free->offset + 1) << 16);
11084 }
11085
11086 /**
11087  * Create a age action using ASO mechanism.
11088  *
11089  * @param[in] dev
11090  *   Pointer to rte_eth_dev structure.
11091  * @param[in] age
11092  *   Pointer to the aging action configuration.
11093  * @param[out] error
11094  *   Pointer to the error structure.
11095  *
11096  * @return
11097  *   Index to flow counter on success, 0 otherwise.
11098  */
11099 static uint32_t
11100 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
11101                                  const struct rte_flow_action_age *age,
11102                                  struct rte_flow_error *error)
11103 {
11104         uint32_t age_idx = 0;
11105         struct mlx5_aso_age_action *aso_age;
11106
11107         age_idx = flow_dv_aso_age_alloc(dev, error);
11108         if (!age_idx)
11109                 return 0;
11110         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11111         aso_age->age_params.context = age->context;
11112         aso_age->age_params.timeout = age->timeout;
11113         aso_age->age_params.port_id = dev->data->port_id;
11114         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11115                          __ATOMIC_RELAXED);
11116         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11117                          __ATOMIC_RELAXED);
11118         return age_idx;
11119 }
11120
11121 /**
11122  * Fill the flow with DV spec, lock free
11123  * (mutex should be acquired by caller).
11124  *
11125  * @param[in] dev
11126  *   Pointer to rte_eth_dev structure.
11127  * @param[in, out] dev_flow
11128  *   Pointer to the sub flow.
11129  * @param[in] attr
11130  *   Pointer to the flow attributes.
11131  * @param[in] items
11132  *   Pointer to the list of items.
11133  * @param[in] actions
11134  *   Pointer to the list of actions.
11135  * @param[out] error
11136  *   Pointer to the error structure.
11137  *
11138  * @return
11139  *   0 on success, a negative errno value otherwise and rte_errno is set.
11140  */
11141 static int
11142 flow_dv_translate(struct rte_eth_dev *dev,
11143                   struct mlx5_flow *dev_flow,
11144                   const struct rte_flow_attr *attr,
11145                   const struct rte_flow_item items[],
11146                   const struct rte_flow_action actions[],
11147                   struct rte_flow_error *error)
11148 {
11149         struct mlx5_priv *priv = dev->data->dev_private;
11150         struct mlx5_dev_config *dev_conf = &priv->config;
11151         struct rte_flow *flow = dev_flow->flow;
11152         struct mlx5_flow_handle *handle = dev_flow->handle;
11153         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11154         struct mlx5_flow_rss_desc *rss_desc;
11155         uint64_t item_flags = 0;
11156         uint64_t last_item = 0;
11157         uint64_t action_flags = 0;
11158         struct mlx5_flow_dv_matcher matcher = {
11159                 .mask = {
11160                         .size = sizeof(matcher.mask.buf) -
11161                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
11162                 },
11163         };
11164         int actions_n = 0;
11165         bool actions_end = false;
11166         union {
11167                 struct mlx5_flow_dv_modify_hdr_resource res;
11168                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
11169                             sizeof(struct mlx5_modification_cmd) *
11170                             (MLX5_MAX_MODIFY_NUM + 1)];
11171         } mhdr_dummy;
11172         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
11173         const struct rte_flow_action_count *count = NULL;
11174         const struct rte_flow_action_age *age = NULL;
11175         union flow_dv_attr flow_attr = { .attr = 0 };
11176         uint32_t tag_be;
11177         union mlx5_flow_tbl_key tbl_key;
11178         uint32_t modify_action_position = UINT32_MAX;
11179         void *match_mask = matcher.mask.buf;
11180         void *match_value = dev_flow->dv.value.buf;
11181         uint8_t next_protocol = 0xff;
11182         struct rte_vlan_hdr vlan = { 0 };
11183         struct mlx5_flow_dv_dest_array_resource mdest_res;
11184         struct mlx5_flow_dv_sample_resource sample_res;
11185         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11186         const struct rte_flow_action_sample *sample = NULL;
11187         struct mlx5_flow_sub_actions_list *sample_act;
11188         uint32_t sample_act_pos = UINT32_MAX;
11189         uint32_t num_of_dest = 0;
11190         int tmp_actions_n = 0;
11191         uint32_t table;
11192         int ret = 0;
11193         const struct mlx5_flow_tunnel *tunnel;
11194         struct flow_grp_info grp_info = {
11195                 .external = !!dev_flow->external,
11196                 .transfer = !!attr->transfer,
11197                 .fdb_def_rule = !!priv->fdb_def_rule,
11198                 .skip_scale = dev_flow->skip_scale &
11199                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
11200         };
11201
11202         if (!wks)
11203                 return rte_flow_error_set(error, ENOMEM,
11204                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11205                                           NULL,
11206                                           "failed to push flow workspace");
11207         rss_desc = &wks->rss_desc;
11208         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
11209         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
11210         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11211                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11212         /* update normal path action resource into last index of array */
11213         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
11214         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
11215                  flow_items_to_tunnel(items) :
11216                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
11217                  flow_actions_to_tunnel(actions) :
11218                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
11219         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
11220                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11221         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
11222                                 (dev, tunnel, attr, items, actions);
11223         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
11224                                        &grp_info, error);
11225         if (ret)
11226                 return ret;
11227         dev_flow->dv.group = table;
11228         if (attr->transfer)
11229                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11230         /* number of actions must be set to 0 in case of dirty stack. */
11231         mhdr_res->actions_num = 0;
11232         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
11233                 /*
11234                  * do not add decap action if match rule drops packet
11235                  * HW rejects rules with decap & drop
11236                  *
11237                  * if tunnel match rule was inserted before matching tunnel set
11238                  * rule flow table used in the match rule must be registered.
11239                  * current implementation handles that in the
11240                  * flow_dv_match_register() at the function end.
11241                  */
11242                 bool add_decap = true;
11243                 const struct rte_flow_action *ptr = actions;
11244
11245                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
11246                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
11247                                 add_decap = false;
11248                                 break;
11249                         }
11250                 }
11251                 if (add_decap) {
11252                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11253                                                            attr->transfer,
11254                                                            error))
11255                                 return -rte_errno;
11256                         dev_flow->dv.actions[actions_n++] =
11257                                         dev_flow->dv.encap_decap->action;
11258                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11259                 }
11260         }
11261         for (; !actions_end ; actions++) {
11262                 const struct rte_flow_action_queue *queue;
11263                 const struct rte_flow_action_rss *rss;
11264                 const struct rte_flow_action *action = actions;
11265                 const uint8_t *rss_key;
11266                 struct mlx5_flow_tbl_resource *tbl;
11267                 struct mlx5_aso_age_action *age_act;
11268                 uint32_t port_id = 0;
11269                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
11270                 int action_type = actions->type;
11271                 const struct rte_flow_action *found_action = NULL;
11272                 uint32_t jump_group = 0;
11273
11274                 if (!mlx5_flow_os_action_supported(action_type))
11275                         return rte_flow_error_set(error, ENOTSUP,
11276                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11277                                                   actions,
11278                                                   "action not supported");
11279                 switch (action_type) {
11280                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
11281                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
11282                         break;
11283                 case RTE_FLOW_ACTION_TYPE_VOID:
11284                         break;
11285                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11286                         if (flow_dv_translate_action_port_id(dev, action,
11287                                                              &port_id, error))
11288                                 return -rte_errno;
11289                         port_id_resource.port_id = port_id;
11290                         MLX5_ASSERT(!handle->rix_port_id_action);
11291                         if (flow_dv_port_id_action_resource_register
11292                             (dev, &port_id_resource, dev_flow, error))
11293                                 return -rte_errno;
11294                         dev_flow->dv.actions[actions_n++] =
11295                                         dev_flow->dv.port_id_action->action;
11296                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11297                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
11298                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11299                         num_of_dest++;
11300                         break;
11301                 case RTE_FLOW_ACTION_TYPE_FLAG:
11302                         action_flags |= MLX5_FLOW_ACTION_FLAG;
11303                         dev_flow->handle->mark = 1;
11304                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11305                                 struct rte_flow_action_mark mark = {
11306                                         .id = MLX5_FLOW_MARK_DEFAULT,
11307                                 };
11308
11309                                 if (flow_dv_convert_action_mark(dev, &mark,
11310                                                                 mhdr_res,
11311                                                                 error))
11312                                         return -rte_errno;
11313                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11314                                 break;
11315                         }
11316                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
11317                         /*
11318                          * Only one FLAG or MARK is supported per device flow
11319                          * right now. So the pointer to the tag resource must be
11320                          * zero before the register process.
11321                          */
11322                         MLX5_ASSERT(!handle->dvh.rix_tag);
11323                         if (flow_dv_tag_resource_register(dev, tag_be,
11324                                                           dev_flow, error))
11325                                 return -rte_errno;
11326                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11327                         dev_flow->dv.actions[actions_n++] =
11328                                         dev_flow->dv.tag_resource->action;
11329                         break;
11330                 case RTE_FLOW_ACTION_TYPE_MARK:
11331                         action_flags |= MLX5_FLOW_ACTION_MARK;
11332                         dev_flow->handle->mark = 1;
11333                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
11334                                 const struct rte_flow_action_mark *mark =
11335                                         (const struct rte_flow_action_mark *)
11336                                                 actions->conf;
11337
11338                                 if (flow_dv_convert_action_mark(dev, mark,
11339                                                                 mhdr_res,
11340                                                                 error))
11341                                         return -rte_errno;
11342                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
11343                                 break;
11344                         }
11345                         /* Fall-through */
11346                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
11347                         /* Legacy (non-extensive) MARK action. */
11348                         tag_be = mlx5_flow_mark_set
11349                               (((const struct rte_flow_action_mark *)
11350                                (actions->conf))->id);
11351                         MLX5_ASSERT(!handle->dvh.rix_tag);
11352                         if (flow_dv_tag_resource_register(dev, tag_be,
11353                                                           dev_flow, error))
11354                                 return -rte_errno;
11355                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11356                         dev_flow->dv.actions[actions_n++] =
11357                                         dev_flow->dv.tag_resource->action;
11358                         break;
11359                 case RTE_FLOW_ACTION_TYPE_SET_META:
11360                         if (flow_dv_convert_action_set_meta
11361                                 (dev, mhdr_res, attr,
11362                                  (const struct rte_flow_action_set_meta *)
11363                                   actions->conf, error))
11364                                 return -rte_errno;
11365                         action_flags |= MLX5_FLOW_ACTION_SET_META;
11366                         break;
11367                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
11368                         if (flow_dv_convert_action_set_tag
11369                                 (dev, mhdr_res,
11370                                  (const struct rte_flow_action_set_tag *)
11371                                   actions->conf, error))
11372                                 return -rte_errno;
11373                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11374                         break;
11375                 case RTE_FLOW_ACTION_TYPE_DROP:
11376                         action_flags |= MLX5_FLOW_ACTION_DROP;
11377                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
11378                         break;
11379                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11380                         queue = actions->conf;
11381                         rss_desc->queue_num = 1;
11382                         rss_desc->queue[0] = queue->index;
11383                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11384                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11385                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
11386                         num_of_dest++;
11387                         break;
11388                 case RTE_FLOW_ACTION_TYPE_RSS:
11389                         rss = actions->conf;
11390                         memcpy(rss_desc->queue, rss->queue,
11391                                rss->queue_num * sizeof(uint16_t));
11392                         rss_desc->queue_num = rss->queue_num;
11393                         /* NULL RSS key indicates default RSS key. */
11394                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11395                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11396                         /*
11397                          * rss->level and rss.types should be set in advance
11398                          * when expanding items for RSS.
11399                          */
11400                         action_flags |= MLX5_FLOW_ACTION_RSS;
11401                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
11402                                 MLX5_FLOW_FATE_SHARED_RSS :
11403                                 MLX5_FLOW_FATE_QUEUE;
11404                         break;
11405                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
11406                         flow->age = (uint32_t)(uintptr_t)(action->conf);
11407                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
11408                         __atomic_fetch_add(&age_act->refcnt, 1,
11409                                            __ATOMIC_RELAXED);
11410                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
11411                         action_flags |= MLX5_FLOW_ACTION_AGE;
11412                         break;
11413                 case RTE_FLOW_ACTION_TYPE_AGE:
11414                         if (priv->sh->flow_hit_aso_en && attr->group) {
11415                                 /*
11416                                  * Create one shared age action, to be used
11417                                  * by all sub-flows.
11418                                  */
11419                                 if (!flow->age) {
11420                                         flow->age =
11421                                                 flow_dv_translate_create_aso_age
11422                                                         (dev, action->conf,
11423                                                          error);
11424                                         if (!flow->age)
11425                                                 return rte_flow_error_set
11426                                                 (error, rte_errno,
11427                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11428                                                  NULL,
11429                                                  "can't create ASO age action");
11430                                 }
11431                                 dev_flow->dv.actions[actions_n++] =
11432                                           (flow_aso_age_get_by_idx
11433                                                 (dev, flow->age))->dr_action;
11434                                 action_flags |= MLX5_FLOW_ACTION_AGE;
11435                                 break;
11436                         }
11437                         /* Fall-through */
11438                 case RTE_FLOW_ACTION_TYPE_COUNT:
11439                         if (!dev_conf->devx) {
11440                                 return rte_flow_error_set
11441                                               (error, ENOTSUP,
11442                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11443                                                NULL,
11444                                                "count action not supported");
11445                         }
11446                         /* Save information first, will apply later. */
11447                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
11448                                 count = action->conf;
11449                         else
11450                                 age = action->conf;
11451                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11452                         break;
11453                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
11454                         dev_flow->dv.actions[actions_n++] =
11455                                                 priv->sh->pop_vlan_action;
11456                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
11457                         break;
11458                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
11459                         if (!(action_flags &
11460                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
11461                                 flow_dev_get_vlan_info_from_items(items, &vlan);
11462                         vlan.eth_proto = rte_be_to_cpu_16
11463                              ((((const struct rte_flow_action_of_push_vlan *)
11464                                                    actions->conf)->ethertype));
11465                         found_action = mlx5_flow_find_action
11466                                         (actions + 1,
11467                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
11468                         if (found_action)
11469                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11470                         found_action = mlx5_flow_find_action
11471                                         (actions + 1,
11472                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
11473                         if (found_action)
11474                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
11475                         if (flow_dv_create_action_push_vlan
11476                                             (dev, attr, &vlan, dev_flow, error))
11477                                 return -rte_errno;
11478                         dev_flow->dv.actions[actions_n++] =
11479                                         dev_flow->dv.push_vlan_res->action;
11480                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
11481                         break;
11482                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
11483                         /* of_vlan_push action handled this action */
11484                         MLX5_ASSERT(action_flags &
11485                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
11486                         break;
11487                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
11488                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
11489                                 break;
11490                         flow_dev_get_vlan_info_from_items(items, &vlan);
11491                         mlx5_update_vlan_vid_pcp(actions, &vlan);
11492                         /* If no VLAN push - this is a modify header action */
11493                         if (flow_dv_convert_action_modify_vlan_vid
11494                                                 (mhdr_res, actions, error))
11495                                 return -rte_errno;
11496                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
11497                         break;
11498                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11499                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11500                         if (flow_dv_create_action_l2_encap(dev, actions,
11501                                                            dev_flow,
11502                                                            attr->transfer,
11503                                                            error))
11504                                 return -rte_errno;
11505                         dev_flow->dv.actions[actions_n++] =
11506                                         dev_flow->dv.encap_decap->action;
11507                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11508                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11509                                 sample_act->action_flags |=
11510                                                         MLX5_FLOW_ACTION_ENCAP;
11511                         break;
11512                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
11513                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
11514                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
11515                                                            attr->transfer,
11516                                                            error))
11517                                 return -rte_errno;
11518                         dev_flow->dv.actions[actions_n++] =
11519                                         dev_flow->dv.encap_decap->action;
11520                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11521                         break;
11522                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11523                         /* Handle encap with preceding decap. */
11524                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
11525                                 if (flow_dv_create_action_raw_encap
11526                                         (dev, actions, dev_flow, attr, error))
11527                                         return -rte_errno;
11528                                 dev_flow->dv.actions[actions_n++] =
11529                                         dev_flow->dv.encap_decap->action;
11530                         } else {
11531                                 /* Handle encap without preceding decap. */
11532                                 if (flow_dv_create_action_l2_encap
11533                                     (dev, actions, dev_flow, attr->transfer,
11534                                      error))
11535                                         return -rte_errno;
11536                                 dev_flow->dv.actions[actions_n++] =
11537                                         dev_flow->dv.encap_decap->action;
11538                         }
11539                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11540                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
11541                                 sample_act->action_flags |=
11542                                                         MLX5_FLOW_ACTION_ENCAP;
11543                         break;
11544                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11545                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
11546                                 ;
11547                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11548                                 if (flow_dv_create_action_l2_decap
11549                                     (dev, dev_flow, attr->transfer, error))
11550                                         return -rte_errno;
11551                                 dev_flow->dv.actions[actions_n++] =
11552                                         dev_flow->dv.encap_decap->action;
11553                         }
11554                         /* If decap is followed by encap, handle it at encap. */
11555                         action_flags |= MLX5_FLOW_ACTION_DECAP;
11556                         break;
11557                 case RTE_FLOW_ACTION_TYPE_JUMP:
11558                         jump_group = ((const struct rte_flow_action_jump *)
11559                                                         action->conf)->group;
11560                         grp_info.std_tbl_fix = 0;
11561                         if (dev_flow->skip_scale &
11562                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
11563                                 grp_info.skip_scale = 1;
11564                         else
11565                                 grp_info.skip_scale = 0;
11566                         ret = mlx5_flow_group_to_table(dev, tunnel,
11567                                                        jump_group,
11568                                                        &table,
11569                                                        &grp_info, error);
11570                         if (ret)
11571                                 return ret;
11572                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
11573                                                        attr->transfer,
11574                                                        !!dev_flow->external,
11575                                                        tunnel, jump_group, 0,
11576                                                        0, error);
11577                         if (!tbl)
11578                                 return rte_flow_error_set
11579                                                 (error, errno,
11580                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11581                                                  NULL,
11582                                                  "cannot create jump action.");
11583                         if (flow_dv_jump_tbl_resource_register
11584                             (dev, tbl, dev_flow, error)) {
11585                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11586                                 return rte_flow_error_set
11587                                                 (error, errno,
11588                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11589                                                  NULL,
11590                                                  "cannot create jump action.");
11591                         }
11592                         dev_flow->dv.actions[actions_n++] =
11593                                         dev_flow->dv.jump->action;
11594                         action_flags |= MLX5_FLOW_ACTION_JUMP;
11595                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
11596                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
11597                         num_of_dest++;
11598                         break;
11599                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
11600                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
11601                         if (flow_dv_convert_action_modify_mac
11602                                         (mhdr_res, actions, error))
11603                                 return -rte_errno;
11604                         action_flags |= actions->type ==
11605                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
11606                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
11607                                         MLX5_FLOW_ACTION_SET_MAC_DST;
11608                         break;
11609                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
11610                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
11611                         if (flow_dv_convert_action_modify_ipv4
11612                                         (mhdr_res, actions, error))
11613                                 return -rte_errno;
11614                         action_flags |= actions->type ==
11615                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
11616                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
11617                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
11618                         break;
11619                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
11620                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
11621                         if (flow_dv_convert_action_modify_ipv6
11622                                         (mhdr_res, actions, error))
11623                                 return -rte_errno;
11624                         action_flags |= actions->type ==
11625                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
11626                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
11627                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
11628                         break;
11629                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
11630                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
11631                         if (flow_dv_convert_action_modify_tp
11632                                         (mhdr_res, actions, items,
11633                                          &flow_attr, dev_flow, !!(action_flags &
11634                                          MLX5_FLOW_ACTION_DECAP), error))
11635                                 return -rte_errno;
11636                         action_flags |= actions->type ==
11637                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
11638                                         MLX5_FLOW_ACTION_SET_TP_SRC :
11639                                         MLX5_FLOW_ACTION_SET_TP_DST;
11640                         break;
11641                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
11642                         if (flow_dv_convert_action_modify_dec_ttl
11643                                         (mhdr_res, items, &flow_attr, dev_flow,
11644                                          !!(action_flags &
11645                                          MLX5_FLOW_ACTION_DECAP), error))
11646                                 return -rte_errno;
11647                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
11648                         break;
11649                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
11650                         if (flow_dv_convert_action_modify_ttl
11651                                         (mhdr_res, actions, items, &flow_attr,
11652                                          dev_flow, !!(action_flags &
11653                                          MLX5_FLOW_ACTION_DECAP), error))
11654                                 return -rte_errno;
11655                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
11656                         break;
11657                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
11658                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
11659                         if (flow_dv_convert_action_modify_tcp_seq
11660                                         (mhdr_res, actions, error))
11661                                 return -rte_errno;
11662                         action_flags |= actions->type ==
11663                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
11664                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
11665                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
11666                         break;
11667
11668                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
11669                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
11670                         if (flow_dv_convert_action_modify_tcp_ack
11671                                         (mhdr_res, actions, error))
11672                                 return -rte_errno;
11673                         action_flags |= actions->type ==
11674                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
11675                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
11676                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
11677                         break;
11678                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
11679                         if (flow_dv_convert_action_set_reg
11680                                         (mhdr_res, actions, error))
11681                                 return -rte_errno;
11682                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11683                         break;
11684                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
11685                         if (flow_dv_convert_action_copy_mreg
11686                                         (dev, mhdr_res, actions, error))
11687                                 return -rte_errno;
11688                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
11689                         break;
11690                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
11691                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
11692                         dev_flow->handle->fate_action =
11693                                         MLX5_FLOW_FATE_DEFAULT_MISS;
11694                         break;
11695                 case RTE_FLOW_ACTION_TYPE_METER:
11696                         if (!wks->fm)
11697                                 return rte_flow_error_set(error, rte_errno,
11698                                         RTE_FLOW_ERROR_TYPE_ACTION,
11699                                         NULL, "Failed to get meter in flow.");
11700                         /* Set the meter action. */
11701                         dev_flow->dv.actions[actions_n++] =
11702                                 wks->fm->meter_action;
11703                         action_flags |= MLX5_FLOW_ACTION_METER;
11704                         break;
11705                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
11706                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
11707                                                               actions, error))
11708                                 return -rte_errno;
11709                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
11710                         break;
11711                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
11712                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
11713                                                               actions, error))
11714                                 return -rte_errno;
11715                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
11716                         break;
11717                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
11718                         sample_act_pos = actions_n;
11719                         sample = (const struct rte_flow_action_sample *)
11720                                  action->conf;
11721                         actions_n++;
11722                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
11723                         /* put encap action into group if work with port id */
11724                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
11725                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
11726                                 sample_act->action_flags |=
11727                                                         MLX5_FLOW_ACTION_ENCAP;
11728                         break;
11729                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
11730                         if (flow_dv_convert_action_modify_field
11731                                         (dev, mhdr_res, actions, attr, error))
11732                                 return -rte_errno;
11733                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
11734                         break;
11735                 case RTE_FLOW_ACTION_TYPE_END:
11736                         actions_end = true;
11737                         if (mhdr_res->actions_num) {
11738                                 /* create modify action if needed. */
11739                                 if (flow_dv_modify_hdr_resource_register
11740                                         (dev, mhdr_res, dev_flow, error))
11741                                         return -rte_errno;
11742                                 dev_flow->dv.actions[modify_action_position] =
11743                                         handle->dvh.modify_hdr->action;
11744                         }
11745                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
11746                                 /*
11747                                  * Create one count action, to be used
11748                                  * by all sub-flows.
11749                                  */
11750                                 if (!flow->counter) {
11751                                         flow->counter =
11752                                                 flow_dv_translate_create_counter
11753                                                         (dev, dev_flow, count,
11754                                                          age);
11755                                         if (!flow->counter)
11756                                                 return rte_flow_error_set
11757                                                 (error, rte_errno,
11758                                                  RTE_FLOW_ERROR_TYPE_ACTION,
11759                                                  NULL, "cannot create counter"
11760                                                  " object.");
11761                                 }
11762                                 dev_flow->dv.actions[actions_n] =
11763                                           (flow_dv_counter_get_by_idx(dev,
11764                                           flow->counter, NULL))->action;
11765                                 actions_n++;
11766                         }
11767                 default:
11768                         break;
11769                 }
11770                 if (mhdr_res->actions_num &&
11771                     modify_action_position == UINT32_MAX)
11772                         modify_action_position = actions_n++;
11773         }
11774         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
11775                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
11776                 int item_type = items->type;
11777
11778                 if (!mlx5_flow_os_item_supported(item_type))
11779                         return rte_flow_error_set(error, ENOTSUP,
11780                                                   RTE_FLOW_ERROR_TYPE_ITEM,
11781                                                   NULL, "item not supported");
11782                 switch (item_type) {
11783                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
11784                         flow_dv_translate_item_port_id
11785                                 (dev, match_mask, match_value, items, attr);
11786                         last_item = MLX5_FLOW_ITEM_PORT_ID;
11787                         break;
11788                 case RTE_FLOW_ITEM_TYPE_ETH:
11789                         flow_dv_translate_item_eth(match_mask, match_value,
11790                                                    items, tunnel,
11791                                                    dev_flow->dv.group);
11792                         matcher.priority = action_flags &
11793                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
11794                                         !dev_flow->external ?
11795                                         MLX5_PRIORITY_MAP_L3 :
11796                                         MLX5_PRIORITY_MAP_L2;
11797                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
11798                                              MLX5_FLOW_LAYER_OUTER_L2;
11799                         break;
11800                 case RTE_FLOW_ITEM_TYPE_VLAN:
11801                         flow_dv_translate_item_vlan(dev_flow,
11802                                                     match_mask, match_value,
11803                                                     items, tunnel,
11804                                                     dev_flow->dv.group);
11805                         matcher.priority = MLX5_PRIORITY_MAP_L2;
11806                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
11807                                               MLX5_FLOW_LAYER_INNER_VLAN) :
11808                                              (MLX5_FLOW_LAYER_OUTER_L2 |
11809                                               MLX5_FLOW_LAYER_OUTER_VLAN);
11810                         break;
11811                 case RTE_FLOW_ITEM_TYPE_IPV4:
11812                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11813                                                   &item_flags, &tunnel);
11814                         flow_dv_translate_item_ipv4(match_mask, match_value,
11815                                                     items, tunnel,
11816                                                     dev_flow->dv.group);
11817                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11818                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
11819                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
11820                         if (items->mask != NULL &&
11821                             ((const struct rte_flow_item_ipv4 *)
11822                              items->mask)->hdr.next_proto_id) {
11823                                 next_protocol =
11824                                         ((const struct rte_flow_item_ipv4 *)
11825                                          (items->spec))->hdr.next_proto_id;
11826                                 next_protocol &=
11827                                         ((const struct rte_flow_item_ipv4 *)
11828                                          (items->mask))->hdr.next_proto_id;
11829                         } else {
11830                                 /* Reset for inner layer. */
11831                                 next_protocol = 0xff;
11832                         }
11833                         break;
11834                 case RTE_FLOW_ITEM_TYPE_IPV6:
11835                         mlx5_flow_tunnel_ip_check(items, next_protocol,
11836                                                   &item_flags, &tunnel);
11837                         flow_dv_translate_item_ipv6(match_mask, match_value,
11838                                                     items, tunnel,
11839                                                     dev_flow->dv.group);
11840                         matcher.priority = MLX5_PRIORITY_MAP_L3;
11841                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
11842                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
11843                         if (items->mask != NULL &&
11844                             ((const struct rte_flow_item_ipv6 *)
11845                              items->mask)->hdr.proto) {
11846                                 next_protocol =
11847                                         ((const struct rte_flow_item_ipv6 *)
11848                                          items->spec)->hdr.proto;
11849                                 next_protocol &=
11850                                         ((const struct rte_flow_item_ipv6 *)
11851                                          items->mask)->hdr.proto;
11852                         } else {
11853                                 /* Reset for inner layer. */
11854                                 next_protocol = 0xff;
11855                         }
11856                         break;
11857                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
11858                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
11859                                                              match_value,
11860                                                              items, tunnel);
11861                         last_item = tunnel ?
11862                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
11863                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
11864                         if (items->mask != NULL &&
11865                             ((const struct rte_flow_item_ipv6_frag_ext *)
11866                              items->mask)->hdr.next_header) {
11867                                 next_protocol =
11868                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11869                                  items->spec)->hdr.next_header;
11870                                 next_protocol &=
11871                                 ((const struct rte_flow_item_ipv6_frag_ext *)
11872                                  items->mask)->hdr.next_header;
11873                         } else {
11874                                 /* Reset for inner layer. */
11875                                 next_protocol = 0xff;
11876                         }
11877                         break;
11878                 case RTE_FLOW_ITEM_TYPE_TCP:
11879                         flow_dv_translate_item_tcp(match_mask, match_value,
11880                                                    items, tunnel);
11881                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11882                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
11883                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
11884                         break;
11885                 case RTE_FLOW_ITEM_TYPE_UDP:
11886                         flow_dv_translate_item_udp(match_mask, match_value,
11887                                                    items, tunnel);
11888                         matcher.priority = MLX5_PRIORITY_MAP_L4;
11889                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
11890                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
11891                         break;
11892                 case RTE_FLOW_ITEM_TYPE_GRE:
11893                         flow_dv_translate_item_gre(match_mask, match_value,
11894                                                    items, tunnel);
11895                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11896                         last_item = MLX5_FLOW_LAYER_GRE;
11897                         break;
11898                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
11899                         flow_dv_translate_item_gre_key(match_mask,
11900                                                        match_value, items);
11901                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
11902                         break;
11903                 case RTE_FLOW_ITEM_TYPE_NVGRE:
11904                         flow_dv_translate_item_nvgre(match_mask, match_value,
11905                                                      items, tunnel);
11906                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11907                         last_item = MLX5_FLOW_LAYER_GRE;
11908                         break;
11909                 case RTE_FLOW_ITEM_TYPE_VXLAN:
11910                         flow_dv_translate_item_vxlan(match_mask, match_value,
11911                                                      items, tunnel);
11912                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11913                         last_item = MLX5_FLOW_LAYER_VXLAN;
11914                         break;
11915                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
11916                         flow_dv_translate_item_vxlan_gpe(match_mask,
11917                                                          match_value, items,
11918                                                          tunnel);
11919                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11920                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
11921                         break;
11922                 case RTE_FLOW_ITEM_TYPE_GENEVE:
11923                         flow_dv_translate_item_geneve(match_mask, match_value,
11924                                                       items, tunnel);
11925                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11926                         last_item = MLX5_FLOW_LAYER_GENEVE;
11927                         break;
11928                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
11929                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
11930                                                           match_value,
11931                                                           items, error);
11932                         if (ret)
11933                                 return rte_flow_error_set(error, -ret,
11934                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11935                                         "cannot create GENEVE TLV option");
11936                         flow->geneve_tlv_option = 1;
11937                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
11938                         break;
11939                 case RTE_FLOW_ITEM_TYPE_MPLS:
11940                         flow_dv_translate_item_mpls(match_mask, match_value,
11941                                                     items, last_item, tunnel);
11942                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11943                         last_item = MLX5_FLOW_LAYER_MPLS;
11944                         break;
11945                 case RTE_FLOW_ITEM_TYPE_MARK:
11946                         flow_dv_translate_item_mark(dev, match_mask,
11947                                                     match_value, items);
11948                         last_item = MLX5_FLOW_ITEM_MARK;
11949                         break;
11950                 case RTE_FLOW_ITEM_TYPE_META:
11951                         flow_dv_translate_item_meta(dev, match_mask,
11952                                                     match_value, attr, items);
11953                         last_item = MLX5_FLOW_ITEM_METADATA;
11954                         break;
11955                 case RTE_FLOW_ITEM_TYPE_ICMP:
11956                         flow_dv_translate_item_icmp(match_mask, match_value,
11957                                                     items, tunnel);
11958                         last_item = MLX5_FLOW_LAYER_ICMP;
11959                         break;
11960                 case RTE_FLOW_ITEM_TYPE_ICMP6:
11961                         flow_dv_translate_item_icmp6(match_mask, match_value,
11962                                                       items, tunnel);
11963                         last_item = MLX5_FLOW_LAYER_ICMP6;
11964                         break;
11965                 case RTE_FLOW_ITEM_TYPE_TAG:
11966                         flow_dv_translate_item_tag(dev, match_mask,
11967                                                    match_value, items);
11968                         last_item = MLX5_FLOW_ITEM_TAG;
11969                         break;
11970                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
11971                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
11972                                                         match_value, items);
11973                         last_item = MLX5_FLOW_ITEM_TAG;
11974                         break;
11975                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
11976                         flow_dv_translate_item_tx_queue(dev, match_mask,
11977                                                         match_value,
11978                                                         items);
11979                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
11980                         break;
11981                 case RTE_FLOW_ITEM_TYPE_GTP:
11982                         flow_dv_translate_item_gtp(match_mask, match_value,
11983                                                    items, tunnel);
11984                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
11985                         last_item = MLX5_FLOW_LAYER_GTP;
11986                         break;
11987                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
11988                         ret = flow_dv_translate_item_gtp_psc(match_mask,
11989                                                           match_value,
11990                                                           items);
11991                         if (ret)
11992                                 return rte_flow_error_set(error, -ret,
11993                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
11994                                         "cannot create GTP PSC item");
11995                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
11996                         break;
11997                 case RTE_FLOW_ITEM_TYPE_ECPRI:
11998                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
11999                                 /* Create it only the first time to be used. */
12000                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12001                                 if (ret)
12002                                         return rte_flow_error_set
12003                                                 (error, -ret,
12004                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12005                                                 NULL,
12006                                                 "cannot create eCPRI parser");
12007                         }
12008                         /* Adjust the length matcher and device flow value. */
12009                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12010                         dev_flow->dv.value.size =
12011                                         MLX5_ST_SZ_BYTES(fte_match_param);
12012                         flow_dv_translate_item_ecpri(dev, match_mask,
12013                                                      match_value, items);
12014                         /* No other protocol should follow eCPRI layer. */
12015                         last_item = MLX5_FLOW_LAYER_ECPRI;
12016                         break;
12017                 default:
12018                         break;
12019                 }
12020                 item_flags |= last_item;
12021         }
12022         /*
12023          * When E-Switch mode is enabled, we have two cases where we need to
12024          * set the source port manually.
12025          * The first one, is in case of Nic steering rule, and the second is
12026          * E-Switch rule where no port_id item was found. In both cases
12027          * the source port is set according the current port in use.
12028          */
12029         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12030             (priv->representor || priv->master)) {
12031                 if (flow_dv_translate_item_port_id(dev, match_mask,
12032                                                    match_value, NULL, attr))
12033                         return -rte_errno;
12034         }
12035 #ifdef RTE_LIBRTE_MLX5_DEBUG
12036         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12037                                               dev_flow->dv.value.buf));
12038 #endif
12039         /*
12040          * Layers may be already initialized from prefix flow if this dev_flow
12041          * is the suffix flow.
12042          */
12043         handle->layers |= item_flags;
12044         if (action_flags & MLX5_FLOW_ACTION_RSS)
12045                 flow_dv_hashfields_set(dev_flow, rss_desc);
12046         /* If has RSS action in the sample action, the Sample/Mirror resource
12047          * should be registered after the hash filed be update.
12048          */
12049         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
12050                 ret = flow_dv_translate_action_sample(dev,
12051                                                       sample,
12052                                                       dev_flow, attr,
12053                                                       &num_of_dest,
12054                                                       sample_actions,
12055                                                       &sample_res,
12056                                                       error);
12057                 if (ret < 0)
12058                         return ret;
12059                 ret = flow_dv_create_action_sample(dev,
12060                                                    dev_flow,
12061                                                    num_of_dest,
12062                                                    &sample_res,
12063                                                    &mdest_res,
12064                                                    sample_actions,
12065                                                    action_flags,
12066                                                    error);
12067                 if (ret < 0)
12068                         return rte_flow_error_set
12069                                                 (error, rte_errno,
12070                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12071                                                 NULL,
12072                                                 "cannot create sample action");
12073                 if (num_of_dest > 1) {
12074                         dev_flow->dv.actions[sample_act_pos] =
12075                         dev_flow->dv.dest_array_res->action;
12076                 } else {
12077                         dev_flow->dv.actions[sample_act_pos] =
12078                         dev_flow->dv.sample_res->verbs_action;
12079                 }
12080         }
12081         /*
12082          * For multiple destination (sample action with ratio=1), the encap
12083          * action and port id action will be combined into group action.
12084          * So need remove the original these actions in the flow and only
12085          * use the sample action instead of.
12086          */
12087         if (num_of_dest > 1 &&
12088             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
12089                 int i;
12090                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12091
12092                 for (i = 0; i < actions_n; i++) {
12093                         if ((sample_act->dr_encap_action &&
12094                                 sample_act->dr_encap_action ==
12095                                 dev_flow->dv.actions[i]) ||
12096                                 (sample_act->dr_port_id_action &&
12097                                 sample_act->dr_port_id_action ==
12098                                 dev_flow->dv.actions[i]) ||
12099                                 (sample_act->dr_jump_action &&
12100                                 sample_act->dr_jump_action ==
12101                                 dev_flow->dv.actions[i]))
12102                                 continue;
12103                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
12104                 }
12105                 memcpy((void *)dev_flow->dv.actions,
12106                                 (void *)temp_actions,
12107                                 tmp_actions_n * sizeof(void *));
12108                 actions_n = tmp_actions_n;
12109         }
12110         dev_flow->dv.actions_n = actions_n;
12111         dev_flow->act_flags = action_flags;
12112         /* Register matcher. */
12113         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
12114                                     matcher.mask.size);
12115         matcher.priority = mlx5_get_matcher_priority(dev, attr,
12116                                         matcher.priority);
12117         /* reserved field no needs to be set to 0 here. */
12118         tbl_key.is_fdb = attr->transfer;
12119         tbl_key.is_egress = attr->egress;
12120         tbl_key.level = dev_flow->dv.group;
12121         tbl_key.id = dev_flow->dv.table_id;
12122         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
12123                                      tunnel, attr->group, error))
12124                 return -rte_errno;
12125         return 0;
12126 }
12127
12128 /**
12129  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12130  * and tunnel.
12131  *
12132  * @param[in, out] action
12133  *   Shred RSS action holding hash RX queue objects.
12134  * @param[in] hash_fields
12135  *   Defines combination of packet fields to participate in RX hash.
12136  * @param[in] tunnel
12137  *   Tunnel type
12138  * @param[in] hrxq_idx
12139  *   Hash RX queue index to set.
12140  *
12141  * @return
12142  *   0 on success, otherwise negative errno value.
12143  */
12144 static int
12145 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
12146                               const uint64_t hash_fields,
12147                               uint32_t hrxq_idx)
12148 {
12149         uint32_t *hrxqs = action->hrxq;
12150
12151         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12152         case MLX5_RSS_HASH_IPV4:
12153                 /* fall-through. */
12154         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12155                 /* fall-through. */
12156         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12157                 hrxqs[0] = hrxq_idx;
12158                 return 0;
12159         case MLX5_RSS_HASH_IPV4_TCP:
12160                 /* fall-through. */
12161         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12162                 /* fall-through. */
12163         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12164                 hrxqs[1] = hrxq_idx;
12165                 return 0;
12166         case MLX5_RSS_HASH_IPV4_UDP:
12167                 /* fall-through. */
12168         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12169                 /* fall-through. */
12170         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12171                 hrxqs[2] = hrxq_idx;
12172                 return 0;
12173         case MLX5_RSS_HASH_IPV6:
12174                 /* fall-through. */
12175         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12176                 /* fall-through. */
12177         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12178                 hrxqs[3] = hrxq_idx;
12179                 return 0;
12180         case MLX5_RSS_HASH_IPV6_TCP:
12181                 /* fall-through. */
12182         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12183                 /* fall-through. */
12184         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12185                 hrxqs[4] = hrxq_idx;
12186                 return 0;
12187         case MLX5_RSS_HASH_IPV6_UDP:
12188                 /* fall-through. */
12189         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12190                 /* fall-through. */
12191         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12192                 hrxqs[5] = hrxq_idx;
12193                 return 0;
12194         case MLX5_RSS_HASH_NONE:
12195                 hrxqs[6] = hrxq_idx;
12196                 return 0;
12197         default:
12198                 return -1;
12199         }
12200 }
12201
12202 /**
12203  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
12204  * and tunnel.
12205  *
12206  * @param[in] dev
12207  *   Pointer to the Ethernet device structure.
12208  * @param[in] idx
12209  *   Shared RSS action ID holding hash RX queue objects.
12210  * @param[in] hash_fields
12211  *   Defines combination of packet fields to participate in RX hash.
12212  * @param[in] tunnel
12213  *   Tunnel type
12214  *
12215  * @return
12216  *   Valid hash RX queue index, otherwise 0.
12217  */
12218 static uint32_t
12219 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
12220                                  const uint64_t hash_fields)
12221 {
12222         struct mlx5_priv *priv = dev->data->dev_private;
12223         struct mlx5_shared_action_rss *shared_rss =
12224             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12225         const uint32_t *hrxqs = shared_rss->hrxq;
12226
12227         switch (hash_fields & ~IBV_RX_HASH_INNER) {
12228         case MLX5_RSS_HASH_IPV4:
12229                 /* fall-through. */
12230         case MLX5_RSS_HASH_IPV4_DST_ONLY:
12231                 /* fall-through. */
12232         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
12233                 return hrxqs[0];
12234         case MLX5_RSS_HASH_IPV4_TCP:
12235                 /* fall-through. */
12236         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
12237                 /* fall-through. */
12238         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
12239                 return hrxqs[1];
12240         case MLX5_RSS_HASH_IPV4_UDP:
12241                 /* fall-through. */
12242         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
12243                 /* fall-through. */
12244         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
12245                 return hrxqs[2];
12246         case MLX5_RSS_HASH_IPV6:
12247                 /* fall-through. */
12248         case MLX5_RSS_HASH_IPV6_DST_ONLY:
12249                 /* fall-through. */
12250         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
12251                 return hrxqs[3];
12252         case MLX5_RSS_HASH_IPV6_TCP:
12253                 /* fall-through. */
12254         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
12255                 /* fall-through. */
12256         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
12257                 return hrxqs[4];
12258         case MLX5_RSS_HASH_IPV6_UDP:
12259                 /* fall-through. */
12260         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
12261                 /* fall-through. */
12262         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
12263                 return hrxqs[5];
12264         case MLX5_RSS_HASH_NONE:
12265                 return hrxqs[6];
12266         default:
12267                 return 0;
12268         }
12269
12270 }
12271
12272 /**
12273  * Apply the flow to the NIC, lock free,
12274  * (mutex should be acquired by caller).
12275  *
12276  * @param[in] dev
12277  *   Pointer to the Ethernet device structure.
12278  * @param[in, out] flow
12279  *   Pointer to flow structure.
12280  * @param[out] error
12281  *   Pointer to error structure.
12282  *
12283  * @return
12284  *   0 on success, a negative errno value otherwise and rte_errno is set.
12285  */
12286 static int
12287 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
12288               struct rte_flow_error *error)
12289 {
12290         struct mlx5_flow_dv_workspace *dv;
12291         struct mlx5_flow_handle *dh;
12292         struct mlx5_flow_handle_dv *dv_h;
12293         struct mlx5_flow *dev_flow;
12294         struct mlx5_priv *priv = dev->data->dev_private;
12295         uint32_t handle_idx;
12296         int n;
12297         int err;
12298         int idx;
12299         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12300         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
12301
12302         MLX5_ASSERT(wks);
12303         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
12304                 dev_flow = &wks->flows[idx];
12305                 dv = &dev_flow->dv;
12306                 dh = dev_flow->handle;
12307                 dv_h = &dh->dvh;
12308                 n = dv->actions_n;
12309                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
12310                         if (dv->transfer) {
12311                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12312                                 dv->actions[n++] = priv->sh->dr_drop_action;
12313                         } else {
12314 #ifdef HAVE_MLX5DV_DR
12315                                 /* DR supports drop action placeholder. */
12316                                 MLX5_ASSERT(priv->sh->dr_drop_action);
12317                                 dv->actions[n++] = priv->sh->dr_drop_action;
12318 #else
12319                                 /* For DV we use the explicit drop queue. */
12320                                 MLX5_ASSERT(priv->drop_queue.hrxq);
12321                                 dv->actions[n++] =
12322                                                 priv->drop_queue.hrxq->action;
12323 #endif
12324                         }
12325                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
12326                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
12327                         struct mlx5_hrxq *hrxq;
12328                         uint32_t hrxq_idx;
12329
12330                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
12331                                                     &hrxq_idx);
12332                         if (!hrxq) {
12333                                 rte_flow_error_set
12334                                         (error, rte_errno,
12335                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12336                                          "cannot get hash queue");
12337                                 goto error;
12338                         }
12339                         dh->rix_hrxq = hrxq_idx;
12340                         dv->actions[n++] = hrxq->action;
12341                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12342                         struct mlx5_hrxq *hrxq = NULL;
12343                         uint32_t hrxq_idx;
12344
12345                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
12346                                                 rss_desc->shared_rss,
12347                                                 dev_flow->hash_fields);
12348                         if (hrxq_idx)
12349                                 hrxq = mlx5_ipool_get
12350                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
12351                                          hrxq_idx);
12352                         if (!hrxq) {
12353                                 rte_flow_error_set
12354                                         (error, rte_errno,
12355                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12356                                          "cannot get hash queue");
12357                                 goto error;
12358                         }
12359                         dh->rix_srss = rss_desc->shared_rss;
12360                         dv->actions[n++] = hrxq->action;
12361                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
12362                         if (!priv->sh->default_miss_action) {
12363                                 rte_flow_error_set
12364                                         (error, rte_errno,
12365                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12366                                          "default miss action not be created.");
12367                                 goto error;
12368                         }
12369                         dv->actions[n++] = priv->sh->default_miss_action;
12370                 }
12371                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
12372                                                (void *)&dv->value, n,
12373                                                dv->actions, &dh->drv_flow);
12374                 if (err) {
12375                         rte_flow_error_set(error, errno,
12376                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12377                                            NULL,
12378                                            "hardware refuses to create flow");
12379                         goto error;
12380                 }
12381                 if (priv->vmwa_context &&
12382                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
12383                         /*
12384                          * The rule contains the VLAN pattern.
12385                          * For VF we are going to create VLAN
12386                          * interface to make hypervisor set correct
12387                          * e-Switch vport context.
12388                          */
12389                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
12390                 }
12391         }
12392         return 0;
12393 error:
12394         err = rte_errno; /* Save rte_errno before cleanup. */
12395         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
12396                        handle_idx, dh, next) {
12397                 /* hrxq is union, don't clear it if the flag is not set. */
12398                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
12399                         mlx5_hrxq_release(dev, dh->rix_hrxq);
12400                         dh->rix_hrxq = 0;
12401                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
12402                         dh->rix_srss = 0;
12403                 }
12404                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12405                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12406         }
12407         rte_errno = err; /* Restore rte_errno. */
12408         return -rte_errno;
12409 }
12410
12411 void
12412 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
12413                           struct mlx5_cache_entry *entry)
12414 {
12415         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
12416                                                           entry);
12417
12418         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
12419         mlx5_free(cache);
12420 }
12421
12422 /**
12423  * Release the flow matcher.
12424  *
12425  * @param dev
12426  *   Pointer to Ethernet device.
12427  * @param port_id
12428  *   Index to port ID action resource.
12429  *
12430  * @return
12431  *   1 while a reference on it exists, 0 when freed.
12432  */
12433 static int
12434 flow_dv_matcher_release(struct rte_eth_dev *dev,
12435                         struct mlx5_flow_handle *handle)
12436 {
12437         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
12438         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
12439                                                             typeof(*tbl), tbl);
12440         int ret;
12441
12442         MLX5_ASSERT(matcher->matcher_object);
12443         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
12444         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
12445         return ret;
12446 }
12447
12448 /**
12449  * Release encap_decap resource.
12450  *
12451  * @param list
12452  *   Pointer to the hash list.
12453  * @param entry
12454  *   Pointer to exist resource entry object.
12455  */
12456 void
12457 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
12458                               struct mlx5_hlist_entry *entry)
12459 {
12460         struct mlx5_dev_ctx_shared *sh = list->ctx;
12461         struct mlx5_flow_dv_encap_decap_resource *res =
12462                 container_of(entry, typeof(*res), entry);
12463
12464         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12465         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
12466 }
12467
12468 /**
12469  * Release an encap/decap resource.
12470  *
12471  * @param dev
12472  *   Pointer to Ethernet device.
12473  * @param encap_decap_idx
12474  *   Index of encap decap resource.
12475  *
12476  * @return
12477  *   1 while a reference on it exists, 0 when freed.
12478  */
12479 static int
12480 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
12481                                      uint32_t encap_decap_idx)
12482 {
12483         struct mlx5_priv *priv = dev->data->dev_private;
12484         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
12485
12486         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
12487                                         encap_decap_idx);
12488         if (!cache_resource)
12489                 return 0;
12490         MLX5_ASSERT(cache_resource->action);
12491         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
12492                                      &cache_resource->entry);
12493 }
12494
12495 /**
12496  * Release an jump to table action resource.
12497  *
12498  * @param dev
12499  *   Pointer to Ethernet device.
12500  * @param rix_jump
12501  *   Index to the jump action resource.
12502  *
12503  * @return
12504  *   1 while a reference on it exists, 0 when freed.
12505  */
12506 static int
12507 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
12508                                   uint32_t rix_jump)
12509 {
12510         struct mlx5_priv *priv = dev->data->dev_private;
12511         struct mlx5_flow_tbl_data_entry *tbl_data;
12512
12513         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
12514                                   rix_jump);
12515         if (!tbl_data)
12516                 return 0;
12517         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
12518 }
12519
12520 void
12521 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
12522                          struct mlx5_hlist_entry *entry)
12523 {
12524         struct mlx5_flow_dv_modify_hdr_resource *res =
12525                 container_of(entry, typeof(*res), entry);
12526
12527         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
12528         mlx5_free(entry);
12529 }
12530
12531 /**
12532  * Release a modify-header resource.
12533  *
12534  * @param dev
12535  *   Pointer to Ethernet device.
12536  * @param handle
12537  *   Pointer to mlx5_flow_handle.
12538  *
12539  * @return
12540  *   1 while a reference on it exists, 0 when freed.
12541  */
12542 static int
12543 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
12544                                     struct mlx5_flow_handle *handle)
12545 {
12546         struct mlx5_priv *priv = dev->data->dev_private;
12547         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
12548
12549         MLX5_ASSERT(entry->action);
12550         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
12551 }
12552
12553 void
12554 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
12555                           struct mlx5_cache_entry *entry)
12556 {
12557         struct mlx5_dev_ctx_shared *sh = list->ctx;
12558         struct mlx5_flow_dv_port_id_action_resource *cache =
12559                         container_of(entry, typeof(*cache), entry);
12560
12561         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12562         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
12563 }
12564
12565 /**
12566  * Release port ID action resource.
12567  *
12568  * @param dev
12569  *   Pointer to Ethernet device.
12570  * @param handle
12571  *   Pointer to mlx5_flow_handle.
12572  *
12573  * @return
12574  *   1 while a reference on it exists, 0 when freed.
12575  */
12576 static int
12577 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
12578                                         uint32_t port_id)
12579 {
12580         struct mlx5_priv *priv = dev->data->dev_private;
12581         struct mlx5_flow_dv_port_id_action_resource *cache;
12582
12583         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
12584         if (!cache)
12585                 return 0;
12586         MLX5_ASSERT(cache->action);
12587         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
12588                                      &cache->entry);
12589 }
12590
12591 /**
12592  * Release shared RSS action resource.
12593  *
12594  * @param dev
12595  *   Pointer to Ethernet device.
12596  * @param srss
12597  *   Shared RSS action index.
12598  */
12599 static void
12600 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
12601 {
12602         struct mlx5_priv *priv = dev->data->dev_private;
12603         struct mlx5_shared_action_rss *shared_rss;
12604
12605         shared_rss = mlx5_ipool_get
12606                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
12607         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
12608 }
12609
12610 void
12611 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
12612                             struct mlx5_cache_entry *entry)
12613 {
12614         struct mlx5_dev_ctx_shared *sh = list->ctx;
12615         struct mlx5_flow_dv_push_vlan_action_resource *cache =
12616                         container_of(entry, typeof(*cache), entry);
12617
12618         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
12619         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
12620 }
12621
12622 /**
12623  * Release push vlan action resource.
12624  *
12625  * @param dev
12626  *   Pointer to Ethernet device.
12627  * @param handle
12628  *   Pointer to mlx5_flow_handle.
12629  *
12630  * @return
12631  *   1 while a reference on it exists, 0 when freed.
12632  */
12633 static int
12634 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
12635                                           struct mlx5_flow_handle *handle)
12636 {
12637         struct mlx5_priv *priv = dev->data->dev_private;
12638         struct mlx5_flow_dv_push_vlan_action_resource *cache;
12639         uint32_t idx = handle->dvh.rix_push_vlan;
12640
12641         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
12642         if (!cache)
12643                 return 0;
12644         MLX5_ASSERT(cache->action);
12645         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
12646                                      &cache->entry);
12647 }
12648
12649 /**
12650  * Release the fate resource.
12651  *
12652  * @param dev
12653  *   Pointer to Ethernet device.
12654  * @param handle
12655  *   Pointer to mlx5_flow_handle.
12656  */
12657 static void
12658 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
12659                                struct mlx5_flow_handle *handle)
12660 {
12661         if (!handle->rix_fate)
12662                 return;
12663         switch (handle->fate_action) {
12664         case MLX5_FLOW_FATE_QUEUE:
12665                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
12666                         mlx5_hrxq_release(dev, handle->rix_hrxq);
12667                 break;
12668         case MLX5_FLOW_FATE_JUMP:
12669                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
12670                 break;
12671         case MLX5_FLOW_FATE_PORT_ID:
12672                 flow_dv_port_id_action_resource_release(dev,
12673                                 handle->rix_port_id_action);
12674                 break;
12675         default:
12676                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
12677                 break;
12678         }
12679         handle->rix_fate = 0;
12680 }
12681
12682 void
12683 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
12684                          struct mlx5_cache_entry *entry)
12685 {
12686         struct mlx5_flow_dv_sample_resource *cache_resource =
12687                         container_of(entry, typeof(*cache_resource), entry);
12688         struct rte_eth_dev *dev = cache_resource->dev;
12689         struct mlx5_priv *priv = dev->data->dev_private;
12690
12691         if (cache_resource->verbs_action)
12692                 claim_zero(mlx5_flow_os_destroy_flow_action
12693                                 (cache_resource->verbs_action));
12694         if (cache_resource->normal_path_tbl)
12695                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12696                         cache_resource->normal_path_tbl);
12697         flow_dv_sample_sub_actions_release(dev,
12698                                 &cache_resource->sample_idx);
12699         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12700                         cache_resource->idx);
12701         DRV_LOG(DEBUG, "sample resource %p: removed",
12702                 (void *)cache_resource);
12703 }
12704
12705 /**
12706  * Release an sample resource.
12707  *
12708  * @param dev
12709  *   Pointer to Ethernet device.
12710  * @param handle
12711  *   Pointer to mlx5_flow_handle.
12712  *
12713  * @return
12714  *   1 while a reference on it exists, 0 when freed.
12715  */
12716 static int
12717 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
12718                                      struct mlx5_flow_handle *handle)
12719 {
12720         struct mlx5_priv *priv = dev->data->dev_private;
12721         struct mlx5_flow_dv_sample_resource *cache_resource;
12722
12723         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
12724                          handle->dvh.rix_sample);
12725         if (!cache_resource)
12726                 return 0;
12727         MLX5_ASSERT(cache_resource->verbs_action);
12728         return mlx5_cache_unregister(&priv->sh->sample_action_list,
12729                                      &cache_resource->entry);
12730 }
12731
12732 void
12733 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
12734                              struct mlx5_cache_entry *entry)
12735 {
12736         struct mlx5_flow_dv_dest_array_resource *cache_resource =
12737                         container_of(entry, typeof(*cache_resource), entry);
12738         struct rte_eth_dev *dev = cache_resource->dev;
12739         struct mlx5_priv *priv = dev->data->dev_private;
12740         uint32_t i = 0;
12741
12742         MLX5_ASSERT(cache_resource->action);
12743         if (cache_resource->action)
12744                 claim_zero(mlx5_flow_os_destroy_flow_action
12745                                         (cache_resource->action));
12746         for (; i < cache_resource->num_of_dest; i++)
12747                 flow_dv_sample_sub_actions_release(dev,
12748                                 &cache_resource->sample_idx[i]);
12749         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12750                         cache_resource->idx);
12751         DRV_LOG(DEBUG, "destination array resource %p: removed",
12752                 (void *)cache_resource);
12753 }
12754
12755 /**
12756  * Release an destination array resource.
12757  *
12758  * @param dev
12759  *   Pointer to Ethernet device.
12760  * @param handle
12761  *   Pointer to mlx5_flow_handle.
12762  *
12763  * @return
12764  *   1 while a reference on it exists, 0 when freed.
12765  */
12766 static int
12767 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
12768                                     struct mlx5_flow_handle *handle)
12769 {
12770         struct mlx5_priv *priv = dev->data->dev_private;
12771         struct mlx5_flow_dv_dest_array_resource *cache;
12772
12773         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
12774                                handle->dvh.rix_dest_array);
12775         if (!cache)
12776                 return 0;
12777         MLX5_ASSERT(cache->action);
12778         return mlx5_cache_unregister(&priv->sh->dest_array_list,
12779                                      &cache->entry);
12780 }
12781
12782 static void
12783 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
12784 {
12785         struct mlx5_priv *priv = dev->data->dev_private;
12786         struct mlx5_dev_ctx_shared *sh = priv->sh;
12787         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
12788                                 sh->geneve_tlv_option_resource;
12789         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
12790         if (geneve_opt_resource) {
12791                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
12792                                          __ATOMIC_RELAXED))) {
12793                         claim_zero(mlx5_devx_cmd_destroy
12794                                         (geneve_opt_resource->obj));
12795                         mlx5_free(sh->geneve_tlv_option_resource);
12796                         sh->geneve_tlv_option_resource = NULL;
12797                 }
12798         }
12799         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
12800 }
12801
12802 /**
12803  * Remove the flow from the NIC but keeps it in memory.
12804  * Lock free, (mutex should be acquired by caller).
12805  *
12806  * @param[in] dev
12807  *   Pointer to Ethernet device.
12808  * @param[in, out] flow
12809  *   Pointer to flow structure.
12810  */
12811 static void
12812 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12813 {
12814         struct mlx5_flow_handle *dh;
12815         uint32_t handle_idx;
12816         struct mlx5_priv *priv = dev->data->dev_private;
12817
12818         if (!flow)
12819                 return;
12820         handle_idx = flow->dev_handles;
12821         while (handle_idx) {
12822                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12823                                     handle_idx);
12824                 if (!dh)
12825                         return;
12826                 if (dh->drv_flow) {
12827                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
12828                         dh->drv_flow = NULL;
12829                 }
12830                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
12831                         flow_dv_fate_resource_release(dev, dh);
12832                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
12833                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
12834                 handle_idx = dh->next.next;
12835         }
12836 }
12837
12838 /**
12839  * Remove the flow from the NIC and the memory.
12840  * Lock free, (mutex should be acquired by caller).
12841  *
12842  * @param[in] dev
12843  *   Pointer to the Ethernet device structure.
12844  * @param[in, out] flow
12845  *   Pointer to flow structure.
12846  */
12847 static void
12848 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12849 {
12850         struct mlx5_flow_handle *dev_handle;
12851         struct mlx5_priv *priv = dev->data->dev_private;
12852         struct mlx5_flow_meter_info *fm = NULL;
12853         uint32_t srss = 0;
12854
12855         if (!flow)
12856                 return;
12857         flow_dv_remove(dev, flow);
12858         if (flow->counter) {
12859                 flow_dv_counter_free(dev, flow->counter);
12860                 flow->counter = 0;
12861         }
12862         if (flow->meter) {
12863                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
12864                 if (fm)
12865                         mlx5_flow_meter_detach(priv, fm);
12866                 flow->meter = 0;
12867         }
12868         if (flow->age)
12869                 flow_dv_aso_age_release(dev, flow->age);
12870         if (flow->geneve_tlv_option) {
12871                 flow_dv_geneve_tlv_option_resource_release(dev);
12872                 flow->geneve_tlv_option = 0;
12873         }
12874         while (flow->dev_handles) {
12875                 uint32_t tmp_idx = flow->dev_handles;
12876
12877                 dev_handle = mlx5_ipool_get(priv->sh->ipool
12878                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
12879                 if (!dev_handle)
12880                         return;
12881                 flow->dev_handles = dev_handle->next.next;
12882                 if (dev_handle->dvh.matcher)
12883                         flow_dv_matcher_release(dev, dev_handle);
12884                 if (dev_handle->dvh.rix_sample)
12885                         flow_dv_sample_resource_release(dev, dev_handle);
12886                 if (dev_handle->dvh.rix_dest_array)
12887                         flow_dv_dest_array_resource_release(dev, dev_handle);
12888                 if (dev_handle->dvh.rix_encap_decap)
12889                         flow_dv_encap_decap_resource_release(dev,
12890                                 dev_handle->dvh.rix_encap_decap);
12891                 if (dev_handle->dvh.modify_hdr)
12892                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
12893                 if (dev_handle->dvh.rix_push_vlan)
12894                         flow_dv_push_vlan_action_resource_release(dev,
12895                                                                   dev_handle);
12896                 if (dev_handle->dvh.rix_tag)
12897                         flow_dv_tag_release(dev,
12898                                             dev_handle->dvh.rix_tag);
12899                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
12900                         flow_dv_fate_resource_release(dev, dev_handle);
12901                 else if (!srss)
12902                         srss = dev_handle->rix_srss;
12903                 if (fm && dev_handle->is_meter_flow_id &&
12904                     dev_handle->split_flow_id)
12905                         mlx5_ipool_free(fm->flow_ipool,
12906                                         dev_handle->split_flow_id);
12907                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
12908                            tmp_idx);
12909         }
12910         if (srss)
12911                 flow_dv_shared_rss_action_release(dev, srss);
12912 }
12913
12914 /**
12915  * Release array of hash RX queue objects.
12916  * Helper function.
12917  *
12918  * @param[in] dev
12919  *   Pointer to the Ethernet device structure.
12920  * @param[in, out] hrxqs
12921  *   Array of hash RX queue objects.
12922  *
12923  * @return
12924  *   Total number of references to hash RX queue objects in *hrxqs* array
12925  *   after this operation.
12926  */
12927 static int
12928 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
12929                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
12930 {
12931         size_t i;
12932         int remaining = 0;
12933
12934         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
12935                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
12936
12937                 if (!ret)
12938                         (*hrxqs)[i] = 0;
12939                 remaining += ret;
12940         }
12941         return remaining;
12942 }
12943
12944 /**
12945  * Release all hash RX queue objects representing shared RSS action.
12946  *
12947  * @param[in] dev
12948  *   Pointer to the Ethernet device structure.
12949  * @param[in, out] action
12950  *   Shared RSS action to remove hash RX queue objects from.
12951  *
12952  * @return
12953  *   Total number of references to hash RX queue objects stored in *action*
12954  *   after this operation.
12955  *   Expected to be 0 if no external references held.
12956  */
12957 static int
12958 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
12959                                  struct mlx5_shared_action_rss *shared_rss)
12960 {
12961         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
12962 }
12963
12964 /**
12965  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
12966  * user input.
12967  *
12968  * Only one hash value is available for one L3+L4 combination:
12969  * for example:
12970  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
12971  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
12972  * same slot in mlx5_rss_hash_fields.
12973  *
12974  * @param[in] rss
12975  *   Pointer to the shared action RSS conf.
12976  * @param[in, out] hash_field
12977  *   hash_field variable needed to be adjusted.
12978  *
12979  * @return
12980  *   void
12981  */
12982 static void
12983 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
12984                                      uint64_t *hash_field)
12985 {
12986         uint64_t rss_types = rss->origin.types;
12987
12988         switch (*hash_field & ~IBV_RX_HASH_INNER) {
12989         case MLX5_RSS_HASH_IPV4:
12990                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
12991                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
12992                         if (rss_types & ETH_RSS_L3_DST_ONLY)
12993                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
12994                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
12995                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
12996                         else
12997                                 *hash_field |= MLX5_RSS_HASH_IPV4;
12998                 }
12999                 return;
13000         case MLX5_RSS_HASH_IPV6:
13001                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13002                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13003                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13004                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13005                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13006                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13007                         else
13008                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13009                 }
13010                 return;
13011         case MLX5_RSS_HASH_IPV4_UDP:
13012                 /* fall-through. */
13013         case MLX5_RSS_HASH_IPV6_UDP:
13014                 if (rss_types & ETH_RSS_UDP) {
13015                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13016                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13017                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13018                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13019                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13020                         else
13021                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13022                 }
13023                 return;
13024         case MLX5_RSS_HASH_IPV4_TCP:
13025                 /* fall-through. */
13026         case MLX5_RSS_HASH_IPV6_TCP:
13027                 if (rss_types & ETH_RSS_TCP) {
13028                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13029                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13030                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13031                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13032                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13033                         else
13034                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13035                 }
13036                 return;
13037         default:
13038                 return;
13039         }
13040 }
13041
13042 /**
13043  * Setup shared RSS action.
13044  * Prepare set of hash RX queue objects sufficient to handle all valid
13045  * hash_fields combinations (see enum ibv_rx_hash_fields).
13046  *
13047  * @param[in] dev
13048  *   Pointer to the Ethernet device structure.
13049  * @param[in] action_idx
13050  *   Shared RSS action ipool index.
13051  * @param[in, out] action
13052  *   Partially initialized shared RSS action.
13053  * @param[out] error
13054  *   Perform verbose error reporting if not NULL. Initialized in case of
13055  *   error only.
13056  *
13057  * @return
13058  *   0 on success, otherwise negative errno value.
13059  */
13060 static int
13061 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
13062                            uint32_t action_idx,
13063                            struct mlx5_shared_action_rss *shared_rss,
13064                            struct rte_flow_error *error)
13065 {
13066         struct mlx5_flow_rss_desc rss_desc = { 0 };
13067         size_t i;
13068         int err;
13069
13070         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
13071                 return rte_flow_error_set(error, rte_errno,
13072                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13073                                           "cannot setup indirection table");
13074         }
13075         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
13076         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
13077         rss_desc.const_q = shared_rss->origin.queue;
13078         rss_desc.queue_num = shared_rss->origin.queue_num;
13079         /* Set non-zero value to indicate a shared RSS. */
13080         rss_desc.shared_rss = action_idx;
13081         rss_desc.ind_tbl = shared_rss->ind_tbl;
13082         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
13083                 uint32_t hrxq_idx;
13084                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
13085                 int tunnel = 0;
13086
13087                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
13088                 if (shared_rss->origin.level > 1) {
13089                         hash_fields |= IBV_RX_HASH_INNER;
13090                         tunnel = 1;
13091                 }
13092                 rss_desc.tunnel = tunnel;
13093                 rss_desc.hash_fields = hash_fields;
13094                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
13095                 if (!hrxq_idx) {
13096                         rte_flow_error_set
13097                                 (error, rte_errno,
13098                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13099                                  "cannot get hash queue");
13100                         goto error_hrxq_new;
13101                 }
13102                 err = __flow_dv_action_rss_hrxq_set
13103                         (shared_rss, hash_fields, hrxq_idx);
13104                 MLX5_ASSERT(!err);
13105         }
13106         return 0;
13107 error_hrxq_new:
13108         err = rte_errno;
13109         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13110         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
13111                 shared_rss->ind_tbl = NULL;
13112         rte_errno = err;
13113         return -rte_errno;
13114 }
13115
13116 /**
13117  * Create shared RSS action.
13118  *
13119  * @param[in] dev
13120  *   Pointer to the Ethernet device structure.
13121  * @param[in] conf
13122  *   Shared action configuration.
13123  * @param[in] rss
13124  *   RSS action specification used to create shared action.
13125  * @param[out] error
13126  *   Perform verbose error reporting if not NULL. Initialized in case of
13127  *   error only.
13128  *
13129  * @return
13130  *   A valid shared action ID in case of success, 0 otherwise and
13131  *   rte_errno is set.
13132  */
13133 static uint32_t
13134 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
13135                             const struct rte_flow_indir_action_conf *conf,
13136                             const struct rte_flow_action_rss *rss,
13137                             struct rte_flow_error *error)
13138 {
13139         struct mlx5_priv *priv = dev->data->dev_private;
13140         struct mlx5_shared_action_rss *shared_rss = NULL;
13141         void *queue = NULL;
13142         struct rte_flow_action_rss *origin;
13143         const uint8_t *rss_key;
13144         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
13145         uint32_t idx;
13146
13147         RTE_SET_USED(conf);
13148         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13149                             0, SOCKET_ID_ANY);
13150         shared_rss = mlx5_ipool_zmalloc
13151                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
13152         if (!shared_rss || !queue) {
13153                 rte_flow_error_set(error, ENOMEM,
13154                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13155                                    "cannot allocate resource memory");
13156                 goto error_rss_init;
13157         }
13158         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
13159                 rte_flow_error_set(error, E2BIG,
13160                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13161                                    "rss action number out of range");
13162                 goto error_rss_init;
13163         }
13164         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
13165                                           sizeof(*shared_rss->ind_tbl),
13166                                           0, SOCKET_ID_ANY);
13167         if (!shared_rss->ind_tbl) {
13168                 rte_flow_error_set(error, ENOMEM,
13169                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13170                                    "cannot allocate resource memory");
13171                 goto error_rss_init;
13172         }
13173         memcpy(queue, rss->queue, queue_size);
13174         shared_rss->ind_tbl->queues = queue;
13175         shared_rss->ind_tbl->queues_n = rss->queue_num;
13176         origin = &shared_rss->origin;
13177         origin->func = rss->func;
13178         origin->level = rss->level;
13179         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
13180         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
13181         /* NULL RSS key indicates default RSS key. */
13182         rss_key = !rss->key ? rss_hash_default_key : rss->key;
13183         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
13184         origin->key = &shared_rss->key[0];
13185         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
13186         origin->queue = queue;
13187         origin->queue_num = rss->queue_num;
13188         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
13189                 goto error_rss_init;
13190         rte_spinlock_init(&shared_rss->action_rss_sl);
13191         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13192         rte_spinlock_lock(&priv->shared_act_sl);
13193         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13194                      &priv->rss_shared_actions, idx, shared_rss, next);
13195         rte_spinlock_unlock(&priv->shared_act_sl);
13196         return idx;
13197 error_rss_init:
13198         if (shared_rss) {
13199                 if (shared_rss->ind_tbl)
13200                         mlx5_free(shared_rss->ind_tbl);
13201                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13202                                 idx);
13203         }
13204         if (queue)
13205                 mlx5_free(queue);
13206         return 0;
13207 }
13208
13209 /**
13210  * Destroy the shared RSS action.
13211  * Release related hash RX queue objects.
13212  *
13213  * @param[in] dev
13214  *   Pointer to the Ethernet device structure.
13215  * @param[in] idx
13216  *   The shared RSS action object ID to be removed.
13217  * @param[out] error
13218  *   Perform verbose error reporting if not NULL. Initialized in case of
13219  *   error only.
13220  *
13221  * @return
13222  *   0 on success, otherwise negative errno value.
13223  */
13224 static int
13225 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
13226                              struct rte_flow_error *error)
13227 {
13228         struct mlx5_priv *priv = dev->data->dev_private;
13229         struct mlx5_shared_action_rss *shared_rss =
13230             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13231         uint32_t old_refcnt = 1;
13232         int remaining;
13233         uint16_t *queue = NULL;
13234
13235         if (!shared_rss)
13236                 return rte_flow_error_set(error, EINVAL,
13237                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13238                                           "invalid shared action");
13239         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
13240         if (remaining)
13241                 return rte_flow_error_set(error, EBUSY,
13242                                           RTE_FLOW_ERROR_TYPE_ACTION,
13243                                           NULL,
13244                                           "shared rss hrxq has references");
13245         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
13246                                          0, 0, __ATOMIC_ACQUIRE,
13247                                          __ATOMIC_RELAXED))
13248                 return rte_flow_error_set(error, EBUSY,
13249                                           RTE_FLOW_ERROR_TYPE_ACTION,
13250                                           NULL,
13251                                           "shared rss has references");
13252         queue = shared_rss->ind_tbl->queues;
13253         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
13254         if (remaining)
13255                 return rte_flow_error_set(error, EBUSY,
13256                                           RTE_FLOW_ERROR_TYPE_ACTION,
13257                                           NULL,
13258                                           "shared rss indirection table has"
13259                                           " references");
13260         mlx5_free(queue);
13261         rte_spinlock_lock(&priv->shared_act_sl);
13262         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13263                      &priv->rss_shared_actions, idx, shared_rss, next);
13264         rte_spinlock_unlock(&priv->shared_act_sl);
13265         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
13266                         idx);
13267         return 0;
13268 }
13269
13270 /**
13271  * Create indirect action, lock free,
13272  * (mutex should be acquired by caller).
13273  * Dispatcher for action type specific call.
13274  *
13275  * @param[in] dev
13276  *   Pointer to the Ethernet device structure.
13277  * @param[in] conf
13278  *   Shared action configuration.
13279  * @param[in] action
13280  *   Action specification used to create indirect action.
13281  * @param[out] error
13282  *   Perform verbose error reporting if not NULL. Initialized in case of
13283  *   error only.
13284  *
13285  * @return
13286  *   A valid shared action handle in case of success, NULL otherwise and
13287  *   rte_errno is set.
13288  */
13289 static struct rte_flow_action_handle *
13290 flow_dv_action_create(struct rte_eth_dev *dev,
13291                       const struct rte_flow_indir_action_conf *conf,
13292                       const struct rte_flow_action *action,
13293                       struct rte_flow_error *err)
13294 {
13295         uint32_t idx = 0;
13296         uint32_t ret = 0;
13297
13298         switch (action->type) {
13299         case RTE_FLOW_ACTION_TYPE_RSS:
13300                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
13301                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
13302                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13303                 break;
13304         case RTE_FLOW_ACTION_TYPE_AGE:
13305                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
13306                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
13307                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
13308                 if (ret) {
13309                         struct mlx5_aso_age_action *aso_age =
13310                                               flow_aso_age_get_by_idx(dev, ret);
13311
13312                         if (!aso_age->age_params.context)
13313                                 aso_age->age_params.context =
13314                                                          (void *)(uintptr_t)idx;
13315                 }
13316                 break;
13317         default:
13318                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
13319                                    NULL, "action type not supported");
13320                 break;
13321         }
13322         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
13323 }
13324
13325 /**
13326  * Destroy the indirect action.
13327  * Release action related resources on the NIC and the memory.
13328  * Lock free, (mutex should be acquired by caller).
13329  * Dispatcher for action type specific call.
13330  *
13331  * @param[in] dev
13332  *   Pointer to the Ethernet device structure.
13333  * @param[in] handle
13334  *   The indirect action object handle to be removed.
13335  * @param[out] error
13336  *   Perform verbose error reporting if not NULL. Initialized in case of
13337  *   error only.
13338  *
13339  * @return
13340  *   0 on success, otherwise negative errno value.
13341  */
13342 static int
13343 flow_dv_action_destroy(struct rte_eth_dev *dev,
13344                        struct rte_flow_action_handle *handle,
13345                        struct rte_flow_error *error)
13346 {
13347         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13348         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13349         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13350         int ret;
13351
13352         switch (type) {
13353         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13354                 return __flow_dv_action_rss_release(dev, idx, error);
13355         case MLX5_INDIRECT_ACTION_TYPE_AGE:
13356                 ret = flow_dv_aso_age_release(dev, idx);
13357                 if (ret)
13358                         /*
13359                          * In this case, the last flow has a reference will
13360                          * actually release the age action.
13361                          */
13362                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
13363                                 " released with references %d.", idx, ret);
13364                 return 0;
13365         default:
13366                 return rte_flow_error_set(error, ENOTSUP,
13367                                           RTE_FLOW_ERROR_TYPE_ACTION,
13368                                           NULL,
13369                                           "action type not supported");
13370         }
13371 }
13372
13373 /**
13374  * Updates in place shared RSS action configuration.
13375  *
13376  * @param[in] dev
13377  *   Pointer to the Ethernet device structure.
13378  * @param[in] idx
13379  *   The shared RSS action object ID to be updated.
13380  * @param[in] action_conf
13381  *   RSS action specification used to modify *shared_rss*.
13382  * @param[out] error
13383  *   Perform verbose error reporting if not NULL. Initialized in case of
13384  *   error only.
13385  *
13386  * @return
13387  *   0 on success, otherwise negative errno value.
13388  * @note: currently only support update of RSS queues.
13389  */
13390 static int
13391 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
13392                             const struct rte_flow_action_rss *action_conf,
13393                             struct rte_flow_error *error)
13394 {
13395         struct mlx5_priv *priv = dev->data->dev_private;
13396         struct mlx5_shared_action_rss *shared_rss =
13397             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13398         int ret = 0;
13399         void *queue = NULL;
13400         uint16_t *queue_old = NULL;
13401         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
13402
13403         if (!shared_rss)
13404                 return rte_flow_error_set(error, EINVAL,
13405                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13406                                           "invalid shared action to update");
13407         if (priv->obj_ops.ind_table_modify == NULL)
13408                 return rte_flow_error_set(error, ENOTSUP,
13409                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13410                                           "cannot modify indirection table");
13411         queue = mlx5_malloc(MLX5_MEM_ZERO,
13412                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
13413                             0, SOCKET_ID_ANY);
13414         if (!queue)
13415                 return rte_flow_error_set(error, ENOMEM,
13416                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13417                                           NULL,
13418                                           "cannot allocate resource memory");
13419         memcpy(queue, action_conf->queue, queue_size);
13420         MLX5_ASSERT(shared_rss->ind_tbl);
13421         rte_spinlock_lock(&shared_rss->action_rss_sl);
13422         queue_old = shared_rss->ind_tbl->queues;
13423         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
13424                                         queue, action_conf->queue_num, true);
13425         if (ret) {
13426                 mlx5_free(queue);
13427                 ret = rte_flow_error_set(error, rte_errno,
13428                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13429                                           "cannot update indirection table");
13430         } else {
13431                 mlx5_free(queue_old);
13432                 shared_rss->origin.queue = queue;
13433                 shared_rss->origin.queue_num = action_conf->queue_num;
13434         }
13435         rte_spinlock_unlock(&shared_rss->action_rss_sl);
13436         return ret;
13437 }
13438
13439 /**
13440  * Updates in place shared action configuration, lock free,
13441  * (mutex should be acquired by caller).
13442  *
13443  * @param[in] dev
13444  *   Pointer to the Ethernet device structure.
13445  * @param[in] handle
13446  *   The indirect action object handle to be updated.
13447  * @param[in] update
13448  *   Action specification used to modify the action pointed by *handle*.
13449  *   *update* could be of same type with the action pointed by the *handle*
13450  *   handle argument, or some other structures like a wrapper, depending on
13451  *   the indirect action type.
13452  * @param[out] error
13453  *   Perform verbose error reporting if not NULL. Initialized in case of
13454  *   error only.
13455  *
13456  * @return
13457  *   0 on success, otherwise negative errno value.
13458  */
13459 static int
13460 flow_dv_action_update(struct rte_eth_dev *dev,
13461                         struct rte_flow_action_handle *handle,
13462                         const void *update,
13463                         struct rte_flow_error *err)
13464 {
13465         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13466         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13467         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13468         const void *action_conf;
13469
13470         switch (type) {
13471         case MLX5_INDIRECT_ACTION_TYPE_RSS:
13472                 action_conf = ((const struct rte_flow_action *)update)->conf;
13473                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
13474         default:
13475                 return rte_flow_error_set(err, ENOTSUP,
13476                                           RTE_FLOW_ERROR_TYPE_ACTION,
13477                                           NULL,
13478                                           "action type update not supported");
13479         }
13480 }
13481
13482 static int
13483 flow_dv_action_query(struct rte_eth_dev *dev,
13484                      const struct rte_flow_action_handle *handle, void *data,
13485                      struct rte_flow_error *error)
13486 {
13487         struct mlx5_age_param *age_param;
13488         struct rte_flow_query_age *resp;
13489         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13490         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13491         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
13492
13493         switch (type) {
13494         case MLX5_INDIRECT_ACTION_TYPE_AGE:
13495                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
13496                 resp = data;
13497                 resp->aged = __atomic_load_n(&age_param->state,
13498                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
13499                                                                           1 : 0;
13500                 resp->sec_since_last_hit_valid = !resp->aged;
13501                 if (resp->sec_since_last_hit_valid)
13502                         resp->sec_since_last_hit = __atomic_load_n
13503                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
13504                 return 0;
13505         default:
13506                 return rte_flow_error_set(error, ENOTSUP,
13507                                           RTE_FLOW_ERROR_TYPE_ACTION,
13508                                           NULL,
13509                                           "action type query not supported");
13510         }
13511 }
13512
13513 /**
13514  * Destroy the meter sub policy table rules.
13515  * Lock free, (mutex should be acquired by caller).
13516  *
13517  * @param[in] dev
13518  *   Pointer to Ethernet device.
13519  * @param[in] sub_policy
13520  *   Pointer to meter sub policy table.
13521  */
13522 static void
13523 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
13524                              struct mlx5_flow_meter_sub_policy *sub_policy)
13525 {
13526         struct mlx5_flow_tbl_data_entry *tbl;
13527         int i;
13528
13529         for (i = 0; i < RTE_COLORS; i++) {
13530                 if (sub_policy->color_rule[i]) {
13531                         claim_zero(mlx5_flow_os_destroy_flow
13532                                 (sub_policy->color_rule[i]));
13533                         sub_policy->color_rule[i] = NULL;
13534                 }
13535                 if (sub_policy->color_matcher[i]) {
13536                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
13537                                 typeof(*tbl), tbl);
13538                         mlx5_cache_unregister(&tbl->matchers,
13539                                       &sub_policy->color_matcher[i]->entry);
13540                         sub_policy->color_matcher[i] = NULL;
13541                 }
13542         }
13543         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13544                 if (sub_policy->rix_hrxq[i]) {
13545                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
13546                         sub_policy->rix_hrxq[i] = 0;
13547                 }
13548                 if (sub_policy->jump_tbl[i]) {
13549                         flow_dv_tbl_resource_release(MLX5_SH(dev),
13550                         sub_policy->jump_tbl[i]);
13551                         sub_policy->jump_tbl[i] = NULL;
13552                 }
13553         }
13554         if (sub_policy->tbl_rsc) {
13555                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13556                         sub_policy->tbl_rsc);
13557                 sub_policy->tbl_rsc = NULL;
13558         }
13559 }
13560
13561 /**
13562  * Destroy policy rules, lock free,
13563  * (mutex should be acquired by caller).
13564  * Dispatcher for action type specific call.
13565  *
13566  * @param[in] dev
13567  *   Pointer to the Ethernet device structure.
13568  * @param[in] mtr_policy
13569  *   Meter policy struct.
13570  */
13571 static void
13572 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
13573                       struct mlx5_flow_meter_policy *mtr_policy)
13574 {
13575         uint32_t i, j;
13576         struct mlx5_flow_meter_sub_policy *sub_policy;
13577         uint16_t sub_policy_num;
13578
13579         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
13580                 sub_policy_num = (mtr_policy->sub_policy_num >>
13581                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
13582                         MLX5_MTR_SUB_POLICY_NUM_MASK;
13583                 for (j = 0; j < sub_policy_num; j++) {
13584                         sub_policy = mtr_policy->sub_policys[i][j];
13585                         if (sub_policy)
13586                                 __flow_dv_destroy_sub_policy_rules
13587                                                 (dev, sub_policy);
13588                 }
13589         }
13590 }
13591
13592 /**
13593  * Destroy policy action, lock free,
13594  * (mutex should be acquired by caller).
13595  * Dispatcher for action type specific call.
13596  *
13597  * @param[in] dev
13598  *   Pointer to the Ethernet device structure.
13599  * @param[in] mtr_policy
13600  *   Meter policy struct.
13601  */
13602 static void
13603 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
13604                       struct mlx5_flow_meter_policy *mtr_policy)
13605 {
13606         struct rte_flow_action *rss_action;
13607         struct mlx5_flow_handle dev_handle;
13608         uint32_t i, j;
13609
13610         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
13611                 if (mtr_policy->act_cnt[i].rix_mark) {
13612                         flow_dv_tag_release(dev,
13613                                 mtr_policy->act_cnt[i].rix_mark);
13614                         mtr_policy->act_cnt[i].rix_mark = 0;
13615                 }
13616                 if (mtr_policy->act_cnt[i].modify_hdr) {
13617                         dev_handle.dvh.modify_hdr =
13618                                 mtr_policy->act_cnt[i].modify_hdr;
13619                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
13620                 }
13621                 switch (mtr_policy->act_cnt[i].fate_action) {
13622                 case MLX5_FLOW_FATE_SHARED_RSS:
13623                         rss_action = mtr_policy->act_cnt[i].rss;
13624                         mlx5_free(rss_action);
13625                         break;
13626                 case MLX5_FLOW_FATE_PORT_ID:
13627                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
13628                                 flow_dv_port_id_action_resource_release(dev,
13629                                 mtr_policy->act_cnt[i].rix_port_id_action);
13630                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
13631                         }
13632                         break;
13633                 case MLX5_FLOW_FATE_DROP:
13634                 case MLX5_FLOW_FATE_JUMP:
13635                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13636                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
13637                                                 NULL;
13638                         break;
13639                 default:
13640                         /*Queue action do nothing*/
13641                         break;
13642                 }
13643         }
13644         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
13645                 mtr_policy->dr_drop_action[j] = NULL;
13646 }
13647
13648 /**
13649  * Create policy action per domain, lock free,
13650  * (mutex should be acquired by caller).
13651  * Dispatcher for action type specific call.
13652  *
13653  * @param[in] dev
13654  *   Pointer to the Ethernet device structure.
13655  * @param[in] mtr_policy
13656  *   Meter policy struct.
13657  * @param[in] action
13658  *   Action specification used to create meter actions.
13659  * @param[out] error
13660  *   Perform verbose error reporting if not NULL. Initialized in case of
13661  *   error only.
13662  *
13663  * @return
13664  *   0 on success, otherwise negative errno value.
13665  */
13666 static int
13667 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
13668                         struct mlx5_flow_meter_policy *mtr_policy,
13669                         const struct rte_flow_action *actions[RTE_COLORS],
13670                         enum mlx5_meter_domain domain,
13671                         struct rte_mtr_error *error)
13672 {
13673         struct mlx5_priv *priv = dev->data->dev_private;
13674         struct rte_flow_error flow_err;
13675         const struct rte_flow_action *act;
13676         uint64_t action_flags = 0;
13677         struct mlx5_flow_handle dh;
13678         struct mlx5_flow dev_flow;
13679         struct mlx5_flow_dv_port_id_action_resource port_id_action;
13680         int i, ret;
13681         uint8_t egress, transfer;
13682         struct mlx5_meter_policy_action_container *act_cnt = NULL;
13683         union {
13684                 struct mlx5_flow_dv_modify_hdr_resource res;
13685                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13686                             sizeof(struct mlx5_modification_cmd) *
13687                             (MLX5_MAX_MODIFY_NUM + 1)];
13688         } mhdr_dummy;
13689
13690         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
13691         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
13692         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
13693         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
13694         memset(&port_id_action, 0,
13695                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
13696         dev_flow.handle = &dh;
13697         dev_flow.dv.port_id_action = &port_id_action;
13698         dev_flow.external = true;
13699         for (i = 0; i < RTE_COLORS; i++) {
13700                 if (i < MLX5_MTR_RTE_COLORS)
13701                         act_cnt = &mtr_policy->act_cnt[i];
13702                 for (act = actions[i];
13703                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
13704                         act++) {
13705                         switch (act->type) {
13706                         case RTE_FLOW_ACTION_TYPE_MARK:
13707                         {
13708                                 uint32_t tag_be = mlx5_flow_mark_set
13709                                         (((const struct rte_flow_action_mark *)
13710                                         (act->conf))->id);
13711
13712                                 if (i >= MLX5_MTR_RTE_COLORS)
13713                                         return -rte_mtr_error_set(error,
13714                                           ENOTSUP,
13715                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13716                                           NULL,
13717                                           "cannot create policy "
13718                                           "mark action for this color");
13719                                 dev_flow.handle->mark = 1;
13720                                 if (flow_dv_tag_resource_register(dev, tag_be,
13721                                                   &dev_flow, &flow_err))
13722                                         return -rte_mtr_error_set(error,
13723                                         ENOTSUP,
13724                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13725                                         NULL,
13726                                         "cannot setup policy mark action");
13727                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
13728                                 act_cnt->rix_mark =
13729                                         dev_flow.handle->dvh.rix_tag;
13730                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13731                                         dev_flow.handle->rix_hrxq =
13732                         mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13733                                         flow_drv_rxq_flags_set(dev,
13734                                                 dev_flow.handle);
13735                                 }
13736                                 action_flags |= MLX5_FLOW_ACTION_MARK;
13737                                 break;
13738                         }
13739                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
13740                         {
13741                                 struct mlx5_flow_dv_modify_hdr_resource
13742                                         *mhdr_res = &mhdr_dummy.res;
13743
13744                                 if (i >= MLX5_MTR_RTE_COLORS)
13745                                         return -rte_mtr_error_set(error,
13746                                           ENOTSUP,
13747                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13748                                           NULL,
13749                                           "cannot create policy "
13750                                           "set tag action for this color");
13751                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
13752                                 mhdr_res->ft_type = transfer ?
13753                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
13754                                         egress ?
13755                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13756                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13757                                 if (flow_dv_convert_action_set_tag
13758                                 (dev, mhdr_res,
13759                                 (const struct rte_flow_action_set_tag *)
13760                                 act->conf,  &flow_err))
13761                                         return -rte_mtr_error_set(error,
13762                                         ENOTSUP,
13763                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13764                                         NULL, "cannot convert policy "
13765                                         "set tag action");
13766                                 if (!mhdr_res->actions_num)
13767                                         return -rte_mtr_error_set(error,
13768                                         ENOTSUP,
13769                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13770                                         NULL, "cannot find policy "
13771                                         "set tag action");
13772                                 /* create modify action if needed. */
13773                                 dev_flow.dv.group = 1;
13774                                 if (flow_dv_modify_hdr_resource_register
13775                                         (dev, mhdr_res, &dev_flow, &flow_err))
13776                                         return -rte_mtr_error_set(error,
13777                                         ENOTSUP,
13778                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13779                                         NULL, "cannot register policy "
13780                                         "set tag action");
13781                                 act_cnt->modify_hdr =
13782                                 dev_flow.handle->dvh.modify_hdr;
13783                                 if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
13784                                         dev_flow.handle->rix_hrxq =
13785                                 mtr_policy->sub_policys[domain][0]->rix_hrxq[i];
13786                                         flow_drv_rxq_flags_set(dev,
13787                                                 dev_flow.handle);
13788                                 }
13789                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13790                                 break;
13791                         }
13792                         case RTE_FLOW_ACTION_TYPE_DROP:
13793                         {
13794                                 struct mlx5_flow_mtr_mng *mtrmng =
13795                                                 priv->sh->mtrmng;
13796                                 struct mlx5_flow_tbl_data_entry *tbl_data;
13797
13798                                 /*
13799                                  * Create the drop table with
13800                                  * METER DROP level.
13801                                  */
13802                                 if (!mtrmng->drop_tbl[domain]) {
13803                                         mtrmng->drop_tbl[domain] =
13804                                         flow_dv_tbl_resource_get(dev,
13805                                         MLX5_FLOW_TABLE_LEVEL_METER,
13806                                         egress, transfer, false, NULL, 0,
13807                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
13808                                         if (!mtrmng->drop_tbl[domain])
13809                                                 return -rte_mtr_error_set
13810                                         (error, ENOTSUP,
13811                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13812                                         NULL,
13813                                         "Failed to create meter drop table");
13814                                 }
13815                                 tbl_data = container_of
13816                                 (mtrmng->drop_tbl[domain],
13817                                 struct mlx5_flow_tbl_data_entry, tbl);
13818                                 if (i < MLX5_MTR_RTE_COLORS) {
13819                                         act_cnt->dr_jump_action[domain] =
13820                                                 tbl_data->jump.action;
13821                                         act_cnt->fate_action =
13822                                                 MLX5_FLOW_FATE_DROP;
13823                                 }
13824                                 if (i == RTE_COLOR_RED)
13825                                         mtr_policy->dr_drop_action[domain] =
13826                                                 tbl_data->jump.action;
13827                                 action_flags |= MLX5_FLOW_ACTION_DROP;
13828                                 break;
13829                         }
13830                         case RTE_FLOW_ACTION_TYPE_QUEUE:
13831                         {
13832                                 struct mlx5_hrxq *hrxq;
13833                                 uint32_t hrxq_idx;
13834                                 struct mlx5_flow_rss_desc rss_desc;
13835                                 struct mlx5_flow_meter_sub_policy *sub_policy =
13836                                 mtr_policy->sub_policys[domain][0];
13837
13838                                 if (i >= MLX5_MTR_RTE_COLORS)
13839                                         return -rte_mtr_error_set(error,
13840                                         ENOTSUP,
13841                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13842                                         NULL, "cannot create policy "
13843                                         "fate queue for this color");
13844                                 memset(&rss_desc, 0,
13845                                         sizeof(struct mlx5_flow_rss_desc));
13846                                 rss_desc.queue_num = 1;
13847                                 rss_desc.const_q = act->conf;
13848                                 hrxq = flow_dv_hrxq_prepare(dev, &dev_flow,
13849                                                     &rss_desc, &hrxq_idx);
13850                                 if (!hrxq)
13851                                         return -rte_mtr_error_set(error,
13852                                         ENOTSUP,
13853                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13854                                         NULL,
13855                                         "cannot create policy fate queue");
13856                                 sub_policy->rix_hrxq[i] = hrxq_idx;
13857                                 act_cnt->fate_action =
13858                                         MLX5_FLOW_FATE_QUEUE;
13859                                 dev_flow.handle->fate_action =
13860                                         MLX5_FLOW_FATE_QUEUE;
13861                                 if (action_flags & MLX5_FLOW_ACTION_MARK ||
13862                                     action_flags & MLX5_FLOW_ACTION_SET_TAG) {
13863                                         dev_flow.handle->rix_hrxq = hrxq_idx;
13864                                         flow_drv_rxq_flags_set(dev,
13865                                                 dev_flow.handle);
13866                                 }
13867                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
13868                                 break;
13869                         }
13870                         case RTE_FLOW_ACTION_TYPE_RSS:
13871                         {
13872                                 int rss_size;
13873
13874                                 if (i >= MLX5_MTR_RTE_COLORS)
13875                                         return -rte_mtr_error_set(error,
13876                                           ENOTSUP,
13877                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13878                                           NULL,
13879                                           "cannot create policy "
13880                                           "rss action for this color");
13881                                 /*
13882                                  * Save RSS conf into policy struct
13883                                  * for translate stage.
13884                                  */
13885                                 rss_size = (int)rte_flow_conv
13886                                         (RTE_FLOW_CONV_OP_ACTION,
13887                                         NULL, 0, act, &flow_err);
13888                                 if (rss_size <= 0)
13889                                         return -rte_mtr_error_set(error,
13890                                           ENOTSUP,
13891                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13892                                           NULL, "Get the wrong "
13893                                           "rss action struct size");
13894                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
13895                                                 rss_size, 0, SOCKET_ID_ANY);
13896                                 if (!act_cnt->rss)
13897                                         return -rte_mtr_error_set(error,
13898                                           ENOTSUP,
13899                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13900                                           NULL,
13901                                           "Fail to malloc rss action memory");
13902                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
13903                                         act_cnt->rss, rss_size,
13904                                         act, &flow_err);
13905                                 if (ret < 0)
13906                                         return -rte_mtr_error_set(error,
13907                                           ENOTSUP,
13908                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13909                                           NULL, "Fail to save "
13910                                           "rss action into policy struct");
13911                                 act_cnt->fate_action =
13912                                         MLX5_FLOW_FATE_SHARED_RSS;
13913                                 action_flags |= MLX5_FLOW_ACTION_RSS;
13914                                 break;
13915                         }
13916                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
13917                         {
13918                                 struct mlx5_flow_dv_port_id_action_resource
13919                                         port_id_resource;
13920                                 uint32_t port_id = 0;
13921
13922                                 if (i >= MLX5_MTR_RTE_COLORS)
13923                                         return -rte_mtr_error_set(error,
13924                                         ENOTSUP,
13925                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13926                                         NULL, "cannot create policy "
13927                                         "port action for this color");
13928                                 memset(&port_id_resource, 0,
13929                                         sizeof(port_id_resource));
13930                                 if (flow_dv_translate_action_port_id(dev, act,
13931                                                 &port_id, &flow_err))
13932                                         return -rte_mtr_error_set(error,
13933                                         ENOTSUP,
13934                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13935                                         NULL, "cannot translate "
13936                                         "policy port action");
13937                                 port_id_resource.port_id = port_id;
13938                                 if (flow_dv_port_id_action_resource_register
13939                                         (dev, &port_id_resource,
13940                                         &dev_flow, &flow_err))
13941                                         return -rte_mtr_error_set(error,
13942                                         ENOTSUP,
13943                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13944                                         NULL, "cannot setup "
13945                                         "policy port action");
13946                                 act_cnt->rix_port_id_action =
13947                                         dev_flow.handle->rix_port_id_action;
13948                                 act_cnt->fate_action =
13949                                         MLX5_FLOW_FATE_PORT_ID;
13950                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
13951                                 break;
13952                         }
13953                         case RTE_FLOW_ACTION_TYPE_JUMP:
13954                         {
13955                                 uint32_t jump_group = 0;
13956                                 uint32_t table = 0;
13957                                 struct mlx5_flow_tbl_data_entry *tbl_data;
13958                                 struct flow_grp_info grp_info = {
13959                                         .external = !!dev_flow.external,
13960                                         .transfer = !!transfer,
13961                                         .fdb_def_rule = !!priv->fdb_def_rule,
13962                                         .std_tbl_fix = 0,
13963                                         .skip_scale = dev_flow.skip_scale &
13964                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
13965                                 };
13966                                 struct mlx5_flow_meter_sub_policy *sub_policy =
13967                                 mtr_policy->sub_policys[domain][0];
13968
13969                                 if (i >= MLX5_MTR_RTE_COLORS)
13970                                         return -rte_mtr_error_set(error,
13971                                           ENOTSUP,
13972                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
13973                                           NULL,
13974                                           "cannot create policy "
13975                                           "jump action for this color");
13976                                 jump_group =
13977                                 ((const struct rte_flow_action_jump *)
13978                                                         act->conf)->group;
13979                                 if (mlx5_flow_group_to_table(dev, NULL,
13980                                                        jump_group,
13981                                                        &table,
13982                                                        &grp_info, &flow_err))
13983                                         return -rte_mtr_error_set(error,
13984                                         ENOTSUP,
13985                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
13986                                         NULL, "cannot setup "
13987                                         "policy jump action");
13988                                 sub_policy->jump_tbl[i] =
13989                                 flow_dv_tbl_resource_get(dev,
13990                                         table, egress,
13991                                         transfer,
13992                                         !!dev_flow.external,
13993                                         NULL, jump_group, 0,
13994                                         0, &flow_err);
13995                                 if
13996                                 (!sub_policy->jump_tbl[i])
13997                                         return  -rte_mtr_error_set(error,
13998                                         ENOTSUP,
13999                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14000                                         NULL, "cannot create jump action.");
14001                                 tbl_data = container_of
14002                                 (sub_policy->jump_tbl[i],
14003                                 struct mlx5_flow_tbl_data_entry, tbl);
14004                                 act_cnt->dr_jump_action[domain] =
14005                                         tbl_data->jump.action;
14006                                 act_cnt->fate_action =
14007                                         MLX5_FLOW_FATE_JUMP;
14008                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
14009                                 break;
14010                         }
14011                         default:
14012                                 return -rte_mtr_error_set(error, ENOTSUP,
14013                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14014                                           NULL, "action type not supported");
14015                         }
14016                 }
14017         }
14018         return 0;
14019 }
14020
14021 /**
14022  * Create policy action per domain, lock free,
14023  * (mutex should be acquired by caller).
14024  * Dispatcher for action type specific call.
14025  *
14026  * @param[in] dev
14027  *   Pointer to the Ethernet device structure.
14028  * @param[in] mtr_policy
14029  *   Meter policy struct.
14030  * @param[in] action
14031  *   Action specification used to create meter actions.
14032  * @param[out] error
14033  *   Perform verbose error reporting if not NULL. Initialized in case of
14034  *   error only.
14035  *
14036  * @return
14037  *   0 on success, otherwise negative errno value.
14038  */
14039 static int
14040 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
14041                       struct mlx5_flow_meter_policy *mtr_policy,
14042                       const struct rte_flow_action *actions[RTE_COLORS],
14043                       struct rte_mtr_error *error)
14044 {
14045         int ret, i;
14046         uint16_t sub_policy_num;
14047
14048         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14049                 sub_policy_num = (mtr_policy->sub_policy_num >>
14050                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14051                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14052                 if (sub_policy_num) {
14053                         ret = __flow_dv_create_domain_policy_acts(dev,
14054                                 mtr_policy, actions,
14055                                 (enum mlx5_meter_domain)i, error);
14056                         if (ret)
14057                                 return ret;
14058                 }
14059         }
14060         return 0;
14061 }
14062
14063 /**
14064  * Query a dv flow  rule for its statistics via devx.
14065  *
14066  * @param[in] dev
14067  *   Pointer to Ethernet device.
14068  * @param[in] flow
14069  *   Pointer to the sub flow.
14070  * @param[out] data
14071  *   data retrieved by the query.
14072  * @param[out] error
14073  *   Perform verbose error reporting if not NULL.
14074  *
14075  * @return
14076  *   0 on success, a negative errno value otherwise and rte_errno is set.
14077  */
14078 static int
14079 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
14080                     void *data, struct rte_flow_error *error)
14081 {
14082         struct mlx5_priv *priv = dev->data->dev_private;
14083         struct rte_flow_query_count *qc = data;
14084
14085         if (!priv->config.devx)
14086                 return rte_flow_error_set(error, ENOTSUP,
14087                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14088                                           NULL,
14089                                           "counters are not supported");
14090         if (flow->counter) {
14091                 uint64_t pkts, bytes;
14092                 struct mlx5_flow_counter *cnt;
14093
14094                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
14095                                                  NULL);
14096                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
14097                                                &bytes);
14098
14099                 if (err)
14100                         return rte_flow_error_set(error, -err,
14101                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14102                                         NULL, "cannot read counters");
14103                 qc->hits_set = 1;
14104                 qc->bytes_set = 1;
14105                 qc->hits = pkts - cnt->hits;
14106                 qc->bytes = bytes - cnt->bytes;
14107                 if (qc->reset) {
14108                         cnt->hits = pkts;
14109                         cnt->bytes = bytes;
14110                 }
14111                 return 0;
14112         }
14113         return rte_flow_error_set(error, EINVAL,
14114                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14115                                   NULL,
14116                                   "counters are not available");
14117 }
14118
14119 /**
14120  * Query a flow rule AGE action for aging information.
14121  *
14122  * @param[in] dev
14123  *   Pointer to Ethernet device.
14124  * @param[in] flow
14125  *   Pointer to the sub flow.
14126  * @param[out] data
14127  *   data retrieved by the query.
14128  * @param[out] error
14129  *   Perform verbose error reporting if not NULL.
14130  *
14131  * @return
14132  *   0 on success, a negative errno value otherwise and rte_errno is set.
14133  */
14134 static int
14135 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
14136                   void *data, struct rte_flow_error *error)
14137 {
14138         struct rte_flow_query_age *resp = data;
14139         struct mlx5_age_param *age_param;
14140
14141         if (flow->age) {
14142                 struct mlx5_aso_age_action *act =
14143                                      flow_aso_age_get_by_idx(dev, flow->age);
14144
14145                 age_param = &act->age_params;
14146         } else if (flow->counter) {
14147                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
14148
14149                 if (!age_param || !age_param->timeout)
14150                         return rte_flow_error_set
14151                                         (error, EINVAL,
14152                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14153                                          NULL, "cannot read age data");
14154         } else {
14155                 return rte_flow_error_set(error, EINVAL,
14156                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14157                                           NULL, "age data not available");
14158         }
14159         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
14160                                      AGE_TMOUT ? 1 : 0;
14161         resp->sec_since_last_hit_valid = !resp->aged;
14162         if (resp->sec_since_last_hit_valid)
14163                 resp->sec_since_last_hit = __atomic_load_n
14164                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
14165         return 0;
14166 }
14167
14168 /**
14169  * Query a flow.
14170  *
14171  * @see rte_flow_query()
14172  * @see rte_flow_ops
14173  */
14174 static int
14175 flow_dv_query(struct rte_eth_dev *dev,
14176               struct rte_flow *flow __rte_unused,
14177               const struct rte_flow_action *actions __rte_unused,
14178               void *data __rte_unused,
14179               struct rte_flow_error *error __rte_unused)
14180 {
14181         int ret = -EINVAL;
14182
14183         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
14184                 switch (actions->type) {
14185                 case RTE_FLOW_ACTION_TYPE_VOID:
14186                         break;
14187                 case RTE_FLOW_ACTION_TYPE_COUNT:
14188                         ret = flow_dv_query_count(dev, flow, data, error);
14189                         break;
14190                 case RTE_FLOW_ACTION_TYPE_AGE:
14191                         ret = flow_dv_query_age(dev, flow, data, error);
14192                         break;
14193                 default:
14194                         return rte_flow_error_set(error, ENOTSUP,
14195                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14196                                                   actions,
14197                                                   "action not supported");
14198                 }
14199         }
14200         return ret;
14201 }
14202
14203 /**
14204  * Destroy the meter table set.
14205  * Lock free, (mutex should be acquired by caller).
14206  *
14207  * @param[in] dev
14208  *   Pointer to Ethernet device.
14209  * @param[in] fm
14210  *   Meter information table.
14211  */
14212 static void
14213 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
14214                         struct mlx5_flow_meter_info *fm)
14215 {
14216         struct mlx5_priv *priv = dev->data->dev_private;
14217         int i;
14218
14219         if (!fm || !priv->config.dv_flow_en)
14220                 return;
14221         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14222                 if (fm->drop_rule[i]) {
14223                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
14224                         fm->drop_rule[i] = NULL;
14225                 }
14226         }
14227 }
14228
14229 static void
14230 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
14231 {
14232         struct mlx5_priv *priv = dev->data->dev_private;
14233         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14234         struct mlx5_flow_tbl_data_entry *tbl;
14235         int i, j;
14236
14237         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14238                 if (mtrmng->def_rule[i]) {
14239                         claim_zero(mlx5_flow_os_destroy_flow
14240                                         (mtrmng->def_rule[i]));
14241                         mtrmng->def_rule[i] = NULL;
14242                 }
14243                 if (mtrmng->def_matcher[i]) {
14244                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
14245                                 struct mlx5_flow_tbl_data_entry, tbl);
14246                         mlx5_cache_unregister(&tbl->matchers,
14247                                       &mtrmng->def_matcher[i]->entry);
14248                         mtrmng->def_matcher[i] = NULL;
14249                 }
14250                 for (j = 0; j < MLX5_REG_BITS; j++) {
14251                         if (mtrmng->drop_matcher[i][j]) {
14252                                 tbl =
14253                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
14254                                              struct mlx5_flow_tbl_data_entry,
14255                                              tbl);
14256                                 mlx5_cache_unregister(&tbl->matchers,
14257                                         &mtrmng->drop_matcher[i][j]->entry);
14258                                 mtrmng->drop_matcher[i][j] = NULL;
14259                         }
14260                 }
14261                 if (mtrmng->drop_tbl[i]) {
14262                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14263                                 mtrmng->drop_tbl[i]);
14264                         mtrmng->drop_tbl[i] = NULL;
14265                 }
14266         }
14267 }
14268
14269 /* Number of meter flow actions, count and jump or count and drop. */
14270 #define METER_ACTIONS 2
14271
14272 static void
14273 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
14274                               enum mlx5_meter_domain domain)
14275 {
14276         struct mlx5_priv *priv = dev->data->dev_private;
14277         struct mlx5_flow_meter_def_policy *def_policy =
14278                         priv->sh->mtrmng->def_policy[domain];
14279
14280         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
14281         mlx5_free(def_policy);
14282         priv->sh->mtrmng->def_policy[domain] = NULL;
14283 }
14284
14285 /**
14286  * Destroy the default policy table set.
14287  *
14288  * @param[in] dev
14289  *   Pointer to Ethernet device.
14290  */
14291 static void
14292 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
14293 {
14294         struct mlx5_priv *priv = dev->data->dev_private;
14295         int i;
14296
14297         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
14298                 if (priv->sh->mtrmng->def_policy[i])
14299                         __flow_dv_destroy_domain_def_policy(dev,
14300                                         (enum mlx5_meter_domain)i);
14301         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
14302 }
14303
14304 static int
14305 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
14306                         uint32_t color_reg_c_idx,
14307                         enum rte_color color, void *matcher_object,
14308                         int actions_n, void *actions,
14309                         bool is_default_policy, void **rule,
14310                         const struct rte_flow_attr *attr)
14311 {
14312         int ret;
14313         struct mlx5_flow_dv_match_params value = {
14314                 .size = sizeof(value.buf) -
14315                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14316         };
14317         struct mlx5_flow_dv_match_params matcher = {
14318                 .size = sizeof(matcher.buf) -
14319                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14320         };
14321         struct mlx5_priv *priv = dev->data->dev_private;
14322
14323         if (!is_default_policy && (priv->representor || priv->master)) {
14324                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
14325                                                    value.buf, NULL, attr)) {
14326                         DRV_LOG(ERR,
14327                         "Failed to create meter policy flow with port.");
14328                         return -1;
14329                 }
14330         }
14331         flow_dv_match_meta_reg(matcher.buf, value.buf,
14332                                 (enum modify_reg)color_reg_c_idx,
14333                                 rte_col_2_mlx5_col(color),
14334                                 UINT32_MAX);
14335         ret = mlx5_flow_os_create_flow(matcher_object,
14336                         (void *)&value, actions_n, actions, rule);
14337         if (ret) {
14338                 DRV_LOG(ERR, "Failed to create meter policy flow.");
14339                 return -1;
14340         }
14341         return 0;
14342 }
14343
14344 static int
14345 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
14346                         uint32_t color_reg_c_idx,
14347                         uint16_t priority,
14348                         struct mlx5_flow_meter_sub_policy *sub_policy,
14349                         const struct rte_flow_attr *attr,
14350                         bool is_default_policy,
14351                         struct rte_flow_error *error)
14352 {
14353         struct mlx5_cache_entry *entry;
14354         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
14355         struct mlx5_flow_dv_matcher matcher = {
14356                 .mask = {
14357                         .size = sizeof(matcher.mask.buf) -
14358                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14359                 },
14360                 .tbl = tbl_rsc,
14361         };
14362         struct mlx5_flow_dv_match_params value = {
14363                 .size = sizeof(value.buf) -
14364                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14365         };
14366         struct mlx5_flow_cb_ctx ctx = {
14367                 .error = error,
14368                 .data = &matcher,
14369         };
14370         struct mlx5_flow_tbl_data_entry *tbl_data;
14371         struct mlx5_priv *priv = dev->data->dev_private;
14372         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
14373
14374         if (!is_default_policy && (priv->representor || priv->master)) {
14375                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
14376                                                    value.buf, NULL, attr)) {
14377                         DRV_LOG(ERR,
14378                         "Failed to register meter drop matcher with port.");
14379                         return -1;
14380                 }
14381         }
14382         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
14383         if (priority < RTE_COLOR_RED)
14384                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14385                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
14386         matcher.priority = priority;
14387         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
14388                                         matcher.mask.size);
14389         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14390         if (!entry) {
14391                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
14392                 return -1;
14393         }
14394         sub_policy->color_matcher[priority] =
14395                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
14396         return 0;
14397 }
14398
14399 /**
14400  * Create the policy rules per domain.
14401  *
14402  * @param[in] dev
14403  *   Pointer to Ethernet device.
14404  * @param[in] sub_policy
14405  *    Pointer to sub policy table..
14406  * @param[in] egress
14407  *   Direction of the table.
14408  * @param[in] transfer
14409  *   E-Switch or NIC flow.
14410  * @param[in] acts
14411  *   Pointer to policy action list per color.
14412  *
14413  * @return
14414  *   0 on success, -1 otherwise.
14415  */
14416 static int
14417 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
14418                 struct mlx5_flow_meter_sub_policy *sub_policy,
14419                 uint8_t egress, uint8_t transfer, bool is_default_policy,
14420                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
14421 {
14422         struct rte_flow_error flow_err;
14423         uint32_t color_reg_c_idx;
14424         struct rte_flow_attr attr = {
14425                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
14426                 .priority = 0,
14427                 .ingress = 0,
14428                 .egress = !!egress,
14429                 .transfer = !!transfer,
14430                 .reserved = 0,
14431         };
14432         int i;
14433         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
14434
14435         if (ret < 0)
14436                 return -1;
14437         /* Create policy table with POLICY level. */
14438         if (!sub_policy->tbl_rsc)
14439                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
14440                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
14441                                 egress, transfer, false, NULL, 0, 0,
14442                                 sub_policy->idx, &flow_err);
14443         if (!sub_policy->tbl_rsc) {
14444                 DRV_LOG(ERR,
14445                         "Failed to create meter sub policy table.");
14446                 return -1;
14447         }
14448         /* Prepare matchers. */
14449         color_reg_c_idx = ret;
14450         for (i = 0; i < RTE_COLORS; i++) {
14451                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
14452                         continue;
14453                 attr.priority = i;
14454                 if (!sub_policy->color_matcher[i]) {
14455                         /* Create matchers for Color. */
14456                         if (__flow_dv_create_policy_matcher(dev,
14457                                 color_reg_c_idx, i, sub_policy,
14458                                 &attr, is_default_policy, &flow_err))
14459                                 return -1;
14460                 }
14461                 /* Create flow, matching color. */
14462                 if (acts[i].actions_n)
14463                         if (__flow_dv_create_policy_flow(dev,
14464                                 color_reg_c_idx, (enum rte_color)i,
14465                                 sub_policy->color_matcher[i]->matcher_object,
14466                                 acts[i].actions_n,
14467                                 acts[i].dv_actions,
14468                                 is_default_policy,
14469                                 &sub_policy->color_rule[i],
14470                                 &attr))
14471                                 return -1;
14472         }
14473         return 0;
14474 }
14475
14476 static int
14477 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
14478                         struct mlx5_flow_meter_policy *mtr_policy,
14479                         struct mlx5_flow_meter_sub_policy *sub_policy,
14480                         uint32_t domain)
14481 {
14482         struct mlx5_priv *priv = dev->data->dev_private;
14483         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14484         struct mlx5_flow_dv_tag_resource *tag;
14485         struct mlx5_flow_dv_port_id_action_resource *port_action;
14486         struct mlx5_hrxq *hrxq;
14487         uint8_t egress, transfer;
14488         int i;
14489
14490         for (i = 0; i < RTE_COLORS; i++) {
14491                 acts[i].actions_n = 0;
14492                 if (i == RTE_COLOR_YELLOW)
14493                         continue;
14494                 if (i == RTE_COLOR_RED) {
14495                         /* Only support drop on red. */
14496                         acts[i].dv_actions[0] =
14497                         mtr_policy->dr_drop_action[domain];
14498                         acts[i].actions_n = 1;
14499                         continue;
14500                 }
14501                 if (mtr_policy->act_cnt[i].rix_mark) {
14502                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
14503                                         mtr_policy->act_cnt[i].rix_mark);
14504                         if (!tag) {
14505                                 DRV_LOG(ERR, "Failed to find "
14506                                 "mark action for policy.");
14507                                 return -1;
14508                         }
14509                         acts[i].dv_actions[acts[i].actions_n] =
14510                                                 tag->action;
14511                         acts[i].actions_n++;
14512                 }
14513                 if (mtr_policy->act_cnt[i].modify_hdr) {
14514                         acts[i].dv_actions[acts[i].actions_n] =
14515                         mtr_policy->act_cnt[i].modify_hdr->action;
14516                         acts[i].actions_n++;
14517                 }
14518                 if (mtr_policy->act_cnt[i].fate_action) {
14519                         switch (mtr_policy->act_cnt[i].fate_action) {
14520                         case MLX5_FLOW_FATE_PORT_ID:
14521                                 port_action = mlx5_ipool_get
14522                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
14523                                 mtr_policy->act_cnt[i].rix_port_id_action);
14524                                 if (!port_action) {
14525                                         DRV_LOG(ERR, "Failed to find "
14526                                                 "port action for policy.");
14527                                         return -1;
14528                                 }
14529                                 acts[i].dv_actions[acts[i].actions_n] =
14530                                 port_action->action;
14531                                 acts[i].actions_n++;
14532                                 break;
14533                         case MLX5_FLOW_FATE_DROP:
14534                         case MLX5_FLOW_FATE_JUMP:
14535                                 acts[i].dv_actions[acts[i].actions_n] =
14536                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
14537                                 acts[i].actions_n++;
14538                                 break;
14539                         case MLX5_FLOW_FATE_SHARED_RSS:
14540                         case MLX5_FLOW_FATE_QUEUE:
14541                                 hrxq = mlx5_ipool_get
14542                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
14543                                 sub_policy->rix_hrxq[i]);
14544                                 if (!hrxq) {
14545                                         DRV_LOG(ERR, "Failed to find "
14546                                                 "queue action for policy.");
14547                                         return -1;
14548                                 }
14549                                 acts[i].dv_actions[acts[i].actions_n] =
14550                                 hrxq->action;
14551                                 acts[i].actions_n++;
14552                                 break;
14553                         default:
14554                                 /*Queue action do nothing*/
14555                                 break;
14556                         }
14557                 }
14558         }
14559         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14560         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14561         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
14562                                 egress, transfer, false, acts)) {
14563                 DRV_LOG(ERR,
14564                 "Failed to create policy rules per domain.");
14565                 return -1;
14566         }
14567         return 0;
14568 }
14569
14570 /**
14571  * Create the policy rules.
14572  *
14573  * @param[in] dev
14574  *   Pointer to Ethernet device.
14575  * @param[in,out] mtr_policy
14576  *   Pointer to meter policy table.
14577  *
14578  * @return
14579  *   0 on success, -1 otherwise.
14580  */
14581 static int
14582 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
14583                              struct mlx5_flow_meter_policy *mtr_policy)
14584 {
14585         int i;
14586         uint16_t sub_policy_num;
14587
14588         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14589                 sub_policy_num = (mtr_policy->sub_policy_num >>
14590                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14591                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14592                 if (!sub_policy_num)
14593                         continue;
14594                 /* Prepare actions list and create policy rules. */
14595                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
14596                         mtr_policy->sub_policys[i][0], i)) {
14597                         DRV_LOG(ERR,
14598                         "Failed to create policy action list per domain.");
14599                         return -1;
14600                 }
14601         }
14602         return 0;
14603 }
14604
14605 static int
14606 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
14607 {
14608         struct mlx5_priv *priv = dev->data->dev_private;
14609         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14610         struct mlx5_flow_meter_def_policy *def_policy;
14611         struct mlx5_flow_tbl_resource *jump_tbl;
14612         struct mlx5_flow_tbl_data_entry *tbl_data;
14613         uint8_t egress, transfer;
14614         struct rte_flow_error error;
14615         struct mlx5_meter_policy_acts acts[RTE_COLORS];
14616         int ret;
14617
14618         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14619         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14620         def_policy = mtrmng->def_policy[domain];
14621         if (!def_policy) {
14622                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
14623                         sizeof(struct mlx5_flow_meter_def_policy),
14624                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
14625                 if (!def_policy) {
14626                         DRV_LOG(ERR, "Failed to alloc "
14627                                         "default policy table.");
14628                         goto def_policy_error;
14629                 }
14630                 mtrmng->def_policy[domain] = def_policy;
14631                 /* Create the meter suffix table with SUFFIX level. */
14632                 jump_tbl = flow_dv_tbl_resource_get(dev,
14633                                 MLX5_FLOW_TABLE_LEVEL_METER,
14634                                 egress, transfer, false, NULL, 0,
14635                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
14636                 if (!jump_tbl) {
14637                         DRV_LOG(ERR,
14638                                 "Failed to create meter suffix table.");
14639                         goto def_policy_error;
14640                 }
14641                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
14642                 tbl_data = container_of(jump_tbl,
14643                                 struct mlx5_flow_tbl_data_entry, tbl);
14644                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
14645                                                 tbl_data->jump.action;
14646                 acts[RTE_COLOR_GREEN].dv_actions[0] =
14647                                                 tbl_data->jump.action;
14648                 acts[RTE_COLOR_GREEN].actions_n = 1;
14649                 /* Create jump action to the drop table. */
14650                 if (!mtrmng->drop_tbl[domain]) {
14651                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
14652                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
14653                                 egress, transfer, false, NULL, 0,
14654                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
14655                         if (!mtrmng->drop_tbl[domain]) {
14656                                 DRV_LOG(ERR, "Failed to create "
14657                                 "meter drop table for default policy.");
14658                                 goto def_policy_error;
14659                         }
14660                 }
14661                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14662                                 struct mlx5_flow_tbl_data_entry, tbl);
14663                 def_policy->dr_jump_action[RTE_COLOR_RED] =
14664                                                 tbl_data->jump.action;
14665                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
14666                 acts[RTE_COLOR_RED].actions_n = 1;
14667                 /* Create default policy rules. */
14668                 ret = __flow_dv_create_domain_policy_rules(dev,
14669                                         &def_policy->sub_policy,
14670                                         egress, transfer, true, acts);
14671                 if (ret) {
14672                         DRV_LOG(ERR, "Failed to create "
14673                                 "default policy rules.");
14674                                 goto def_policy_error;
14675                 }
14676         }
14677         return 0;
14678 def_policy_error:
14679         __flow_dv_destroy_domain_def_policy(dev,
14680                         (enum mlx5_meter_domain)domain);
14681         return -1;
14682 }
14683
14684 /**
14685  * Create the default policy table set.
14686  *
14687  * @param[in] dev
14688  *   Pointer to Ethernet device.
14689  * @return
14690  *   0 on success, -1 otherwise.
14691  */
14692 static int
14693 flow_dv_create_def_policy(struct rte_eth_dev *dev)
14694 {
14695         struct mlx5_priv *priv = dev->data->dev_private;
14696         int i;
14697
14698         /* Non-termination policy table. */
14699         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14700                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
14701                         continue;
14702                 if (__flow_dv_create_domain_def_policy(dev, i)) {
14703                         DRV_LOG(ERR,
14704                         "Failed to create default policy");
14705                         return -1;
14706                 }
14707         }
14708         return 0;
14709 }
14710
14711 /**
14712  * Create the needed meter tables.
14713  * Lock free, (mutex should be acquired by caller).
14714  *
14715  * @param[in] dev
14716  *   Pointer to Ethernet device.
14717  * @param[in] fm
14718  *   Meter information table.
14719  * @param[in] mtr_idx
14720  *   Meter index.
14721  * @param[in] domain_bitmap
14722  *   Domain bitmap.
14723  * @return
14724  *   0 on success, -1 otherwise.
14725  */
14726 static int
14727 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
14728                         struct mlx5_flow_meter_info *fm,
14729                         uint32_t mtr_idx,
14730                         uint8_t domain_bitmap)
14731 {
14732         struct mlx5_priv *priv = dev->data->dev_private;
14733         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
14734         struct rte_flow_error error;
14735         struct mlx5_flow_tbl_data_entry *tbl_data;
14736         uint8_t egress, transfer;
14737         void *actions[METER_ACTIONS];
14738         int domain, ret, i;
14739         struct mlx5_flow_counter *cnt;
14740         struct mlx5_flow_dv_match_params value = {
14741                 .size = sizeof(value.buf) -
14742                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14743         };
14744         struct mlx5_flow_dv_match_params matcher_para = {
14745                 .size = sizeof(matcher_para.buf) -
14746                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14747         };
14748         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
14749                                                      0, &error);
14750         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
14751         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
14752         struct mlx5_cache_entry *entry;
14753         struct mlx5_flow_dv_matcher matcher = {
14754                 .mask = {
14755                         .size = sizeof(matcher.mask.buf) -
14756                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
14757                 },
14758         };
14759         struct mlx5_flow_dv_matcher *drop_matcher;
14760         struct mlx5_flow_cb_ctx ctx = {
14761                 .error = &error,
14762                 .data = &matcher,
14763         };
14764
14765         if (!priv->mtr_en || mtr_id_reg_c < 0) {
14766                 rte_errno = ENOTSUP;
14767                 return -1;
14768         }
14769         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
14770                 if (!(domain_bitmap & (1 << domain)) ||
14771                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
14772                         continue;
14773                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14774                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14775                 /* Create the drop table with METER DROP level. */
14776                 if (!mtrmng->drop_tbl[domain]) {
14777                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
14778                                         MLX5_FLOW_TABLE_LEVEL_METER,
14779                                         egress, transfer, false, NULL, 0,
14780                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
14781                         if (!mtrmng->drop_tbl[domain]) {
14782                                 DRV_LOG(ERR, "Failed to create meter drop table.");
14783                                 goto policy_error;
14784                         }
14785                 }
14786                 /* Create default matcher in drop table. */
14787                 matcher.tbl = mtrmng->drop_tbl[domain],
14788                 tbl_data = container_of(mtrmng->drop_tbl[domain],
14789                                 struct mlx5_flow_tbl_data_entry, tbl);
14790                 if (!mtrmng->def_matcher[domain]) {
14791                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14792                                        (enum modify_reg)mtr_id_reg_c,
14793                                        0, 0);
14794                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
14795                         matcher.crc = rte_raw_cksum
14796                                         ((const void *)matcher.mask.buf,
14797                                         matcher.mask.size);
14798                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14799                         if (!entry) {
14800                                 DRV_LOG(ERR, "Failed to register meter "
14801                                 "drop default matcher.");
14802                                 goto policy_error;
14803                         }
14804                         mtrmng->def_matcher[domain] = container_of(entry,
14805                         struct mlx5_flow_dv_matcher, entry);
14806                 }
14807                 /* Create default rule in drop table. */
14808                 if (!mtrmng->def_rule[domain]) {
14809                         i = 0;
14810                         actions[i++] = priv->sh->dr_drop_action;
14811                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14812                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
14813                         ret = mlx5_flow_os_create_flow
14814                                 (mtrmng->def_matcher[domain]->matcher_object,
14815                                 (void *)&value, i, actions,
14816                                 &mtrmng->def_rule[domain]);
14817                         if (ret) {
14818                                 DRV_LOG(ERR, "Failed to create meter "
14819                                 "default drop rule for drop table.");
14820                                 goto policy_error;
14821                         }
14822                 }
14823                 if (!fm->drop_cnt)
14824                         continue;
14825                 MLX5_ASSERT(mtrmng->max_mtr_bits);
14826                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
14827                         /* Create matchers for Drop. */
14828                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
14829                                         (enum modify_reg)mtr_id_reg_c, 0,
14830                                         (mtr_id_mask << mtr_id_offset));
14831                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
14832                         matcher.crc = rte_raw_cksum
14833                                         ((const void *)matcher.mask.buf,
14834                                         matcher.mask.size);
14835                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
14836                         if (!entry) {
14837                                 DRV_LOG(ERR,
14838                                 "Failed to register meter drop matcher.");
14839                                 goto policy_error;
14840                         }
14841                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
14842                                 container_of(entry, struct mlx5_flow_dv_matcher,
14843                                              entry);
14844                 }
14845                 drop_matcher =
14846                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
14847                 /* Create drop rule, matching meter_id only. */
14848                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
14849                                 (enum modify_reg)mtr_id_reg_c,
14850                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
14851                 i = 0;
14852                 cnt = flow_dv_counter_get_by_idx(dev,
14853                                         fm->drop_cnt, NULL);
14854                 actions[i++] = cnt->action;
14855                 actions[i++] = priv->sh->dr_drop_action;
14856                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
14857                                                (void *)&value, i, actions,
14858                                                &fm->drop_rule[domain]);
14859                 if (ret) {
14860                         DRV_LOG(ERR, "Failed to create meter "
14861                                 "drop rule for drop table.");
14862                                 goto policy_error;
14863                 }
14864         }
14865         return 0;
14866 policy_error:
14867         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14868                 if (fm->drop_rule[i]) {
14869                         claim_zero(mlx5_flow_os_destroy_flow
14870                                 (fm->drop_rule[i]));
14871                         fm->drop_rule[i] = NULL;
14872                 }
14873         }
14874         return -1;
14875 }
14876
14877 /**
14878  * Find the policy table for prefix table with RSS.
14879  *
14880  * @param[in] dev
14881  *   Pointer to Ethernet device.
14882  * @param[in] mtr_policy
14883  *   Pointer to meter policy table.
14884  * @param[in] rss_desc
14885  *   Pointer to rss_desc
14886  * @return
14887  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
14888  */
14889 static struct mlx5_flow_meter_sub_policy *
14890 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
14891                 struct mlx5_flow_meter_policy *mtr_policy,
14892                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
14893 {
14894         struct mlx5_priv *priv = dev->data->dev_private;
14895         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
14896         uint32_t sub_policy_idx = 0;
14897         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
14898         uint32_t i, j;
14899         struct mlx5_hrxq *hrxq;
14900         struct mlx5_flow_handle dh;
14901         struct mlx5_meter_policy_action_container *act_cnt;
14902         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
14903         uint16_t sub_policy_num;
14904
14905         rte_spinlock_lock(&mtr_policy->sl);
14906         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14907                 if (!rss_desc[i])
14908                         continue;
14909                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
14910                 if (!hrxq_idx[i]) {
14911                         rte_spinlock_unlock(&mtr_policy->sl);
14912                         return NULL;
14913                 }
14914         }
14915         sub_policy_num = (mtr_policy->sub_policy_num >>
14916                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
14917                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14918         for (i = 0; i < sub_policy_num;
14919                 i++) {
14920                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
14921                         if (rss_desc[j] &&
14922                                 hrxq_idx[j] !=
14923                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
14924                                 break;
14925                 }
14926                 if (j >= MLX5_MTR_RTE_COLORS) {
14927                         /*
14928                          * Found the sub policy table with
14929                          * the same queue per color
14930                          */
14931                         rte_spinlock_unlock(&mtr_policy->sl);
14932                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
14933                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
14934                         return mtr_policy->sub_policys[domain][i];
14935                 }
14936         }
14937         /* Create sub policy. */
14938         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
14939                 /* Reuse the first dummy sub_policy*/
14940                 sub_policy = mtr_policy->sub_policys[domain][0];
14941                 sub_policy_idx = sub_policy->idx;
14942         } else {
14943                 sub_policy = mlx5_ipool_zmalloc
14944                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
14945                                 &sub_policy_idx);
14946                 if (!sub_policy ||
14947                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
14948                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
14949                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
14950                         goto rss_sub_policy_error;
14951                 }
14952                 sub_policy->idx = sub_policy_idx;
14953                 sub_policy->main_policy = mtr_policy;
14954         }
14955         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14956                 if (!rss_desc[i])
14957                         continue;
14958                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
14959                 /*
14960                  * Overwrite the last action from
14961                  * RSS action to Queue action.
14962                  */
14963                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
14964                               hrxq_idx[i]);
14965                 if (!hrxq) {
14966                         DRV_LOG(ERR, "Failed to create policy hrxq");
14967                         goto rss_sub_policy_error;
14968                 }
14969                 act_cnt = &mtr_policy->act_cnt[i];
14970                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
14971                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14972                         if (act_cnt->rix_mark)
14973                                 dh.mark = 1;
14974                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
14975                         dh.rix_hrxq = hrxq_idx[i];
14976                         flow_drv_rxq_flags_set(dev, &dh);
14977                 }
14978         }
14979         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
14980                 sub_policy, domain)) {
14981                 DRV_LOG(ERR, "Failed to create policy "
14982                         "rules per domain.");
14983                 goto rss_sub_policy_error;
14984         }
14985         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
14986                 i = (mtr_policy->sub_policy_num >>
14987                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
14988                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14989                 mtr_policy->sub_policys[domain][i] = sub_policy;
14990                 i++;
14991                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
14992                         goto rss_sub_policy_error;
14993                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
14994                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
14995                 mtr_policy->sub_policy_num |=
14996                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
14997                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
14998         }
14999         rte_spinlock_unlock(&mtr_policy->sl);
15000         return sub_policy;
15001 rss_sub_policy_error:
15002         if (sub_policy) {
15003                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
15004                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
15005                         i = (mtr_policy->sub_policy_num >>
15006                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15007                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15008                         mtr_policy->sub_policys[domain][i] = NULL;
15009                         mlx5_ipool_free
15010                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15011                                         sub_policy->idx);
15012                 }
15013         }
15014         if (sub_policy_idx)
15015                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15016                         sub_policy_idx);
15017         rte_spinlock_unlock(&mtr_policy->sl);
15018         return NULL;
15019 }
15020
15021 /**
15022  * Validate the batch counter support in root table.
15023  *
15024  * Create a simple flow with invalid counter and drop action on root table to
15025  * validate if batch counter with offset on root table is supported or not.
15026  *
15027  * @param[in] dev
15028  *   Pointer to rte_eth_dev structure.
15029  *
15030  * @return
15031  *   0 on success, a negative errno value otherwise and rte_errno is set.
15032  */
15033 int
15034 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
15035 {
15036         struct mlx5_priv *priv = dev->data->dev_private;
15037         struct mlx5_dev_ctx_shared *sh = priv->sh;
15038         struct mlx5_flow_dv_match_params mask = {
15039                 .size = sizeof(mask.buf),
15040         };
15041         struct mlx5_flow_dv_match_params value = {
15042                 .size = sizeof(value.buf),
15043         };
15044         struct mlx5dv_flow_matcher_attr dv_attr = {
15045                 .type = IBV_FLOW_ATTR_NORMAL,
15046                 .priority = 0,
15047                 .match_criteria_enable = 0,
15048                 .match_mask = (void *)&mask,
15049         };
15050         void *actions[2] = { 0 };
15051         struct mlx5_flow_tbl_resource *tbl = NULL;
15052         struct mlx5_devx_obj *dcs = NULL;
15053         void *matcher = NULL;
15054         void *flow = NULL;
15055         int ret = -1;
15056
15057         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
15058                                         0, 0, 0, NULL);
15059         if (!tbl)
15060                 goto err;
15061         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
15062         if (!dcs)
15063                 goto err;
15064         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
15065                                                     &actions[0]);
15066         if (ret)
15067                 goto err;
15068         actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
15069                                           priv->drop_queue.hrxq->action;
15070         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
15071         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
15072                                                &matcher);
15073         if (ret)
15074                 goto err;
15075         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
15076                                        actions, &flow);
15077 err:
15078         /*
15079          * If batch counter with offset is not supported, the driver will not
15080          * validate the invalid offset value, flow create should success.
15081          * In this case, it means batch counter is not supported in root table.
15082          *
15083          * Otherwise, if flow create is failed, counter offset is supported.
15084          */
15085         if (flow) {
15086                 DRV_LOG(INFO, "Batch counter is not supported in root "
15087                               "table. Switch to fallback mode.");
15088                 rte_errno = ENOTSUP;
15089                 ret = -rte_errno;
15090                 claim_zero(mlx5_flow_os_destroy_flow(flow));
15091         } else {
15092                 /* Check matcher to make sure validate fail at flow create. */
15093                 if (!matcher || (matcher && errno != EINVAL))
15094                         DRV_LOG(ERR, "Unexpected error in counter offset "
15095                                      "support detection");
15096                 ret = 0;
15097         }
15098         if (actions[0])
15099                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
15100         if (matcher)
15101                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
15102         if (tbl)
15103                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
15104         if (dcs)
15105                 claim_zero(mlx5_devx_cmd_destroy(dcs));
15106         return ret;
15107 }
15108
15109 /**
15110  * Query a devx counter.
15111  *
15112  * @param[in] dev
15113  *   Pointer to the Ethernet device structure.
15114  * @param[in] cnt
15115  *   Index to the flow counter.
15116  * @param[in] clear
15117  *   Set to clear the counter statistics.
15118  * @param[out] pkts
15119  *   The statistics value of packets.
15120  * @param[out] bytes
15121  *   The statistics value of bytes.
15122  *
15123  * @return
15124  *   0 on success, otherwise return -1.
15125  */
15126 static int
15127 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
15128                       uint64_t *pkts, uint64_t *bytes)
15129 {
15130         struct mlx5_priv *priv = dev->data->dev_private;
15131         struct mlx5_flow_counter *cnt;
15132         uint64_t inn_pkts, inn_bytes;
15133         int ret;
15134
15135         if (!priv->config.devx)
15136                 return -1;
15137
15138         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
15139         if (ret)
15140                 return -1;
15141         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
15142         *pkts = inn_pkts - cnt->hits;
15143         *bytes = inn_bytes - cnt->bytes;
15144         if (clear) {
15145                 cnt->hits = inn_pkts;
15146                 cnt->bytes = inn_bytes;
15147         }
15148         return 0;
15149 }
15150
15151 /**
15152  * Get aged-out flows.
15153  *
15154  * @param[in] dev
15155  *   Pointer to the Ethernet device structure.
15156  * @param[in] context
15157  *   The address of an array of pointers to the aged-out flows contexts.
15158  * @param[in] nb_contexts
15159  *   The length of context array pointers.
15160  * @param[out] error
15161  *   Perform verbose error reporting if not NULL. Initialized in case of
15162  *   error only.
15163  *
15164  * @return
15165  *   how many contexts get in success, otherwise negative errno value.
15166  *   if nb_contexts is 0, return the amount of all aged contexts.
15167  *   if nb_contexts is not 0 , return the amount of aged flows reported
15168  *   in the context array.
15169  * @note: only stub for now
15170  */
15171 static int
15172 flow_get_aged_flows(struct rte_eth_dev *dev,
15173                     void **context,
15174                     uint32_t nb_contexts,
15175                     struct rte_flow_error *error)
15176 {
15177         struct mlx5_priv *priv = dev->data->dev_private;
15178         struct mlx5_age_info *age_info;
15179         struct mlx5_age_param *age_param;
15180         struct mlx5_flow_counter *counter;
15181         struct mlx5_aso_age_action *act;
15182         int nb_flows = 0;
15183
15184         if (nb_contexts && !context)
15185                 return rte_flow_error_set(error, EINVAL,
15186                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15187                                           NULL, "empty context");
15188         age_info = GET_PORT_AGE_INFO(priv);
15189         rte_spinlock_lock(&age_info->aged_sl);
15190         LIST_FOREACH(act, &age_info->aged_aso, next) {
15191                 nb_flows++;
15192                 if (nb_contexts) {
15193                         context[nb_flows - 1] =
15194                                                 act->age_params.context;
15195                         if (!(--nb_contexts))
15196                                 break;
15197                 }
15198         }
15199         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
15200                 nb_flows++;
15201                 if (nb_contexts) {
15202                         age_param = MLX5_CNT_TO_AGE(counter);
15203                         context[nb_flows - 1] = age_param->context;
15204                         if (!(--nb_contexts))
15205                                 break;
15206                 }
15207         }
15208         rte_spinlock_unlock(&age_info->aged_sl);
15209         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
15210         return nb_flows;
15211 }
15212
15213 /*
15214  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
15215  */
15216 static uint32_t
15217 flow_dv_counter_allocate(struct rte_eth_dev *dev)
15218 {
15219         return flow_dv_counter_alloc(dev, 0);
15220 }
15221
15222 /**
15223  * Validate indirect action.
15224  * Dispatcher for action type specific validation.
15225  *
15226  * @param[in] dev
15227  *   Pointer to the Ethernet device structure.
15228  * @param[in] conf
15229  *   Shared action configuration.
15230  * @param[in] action
15231  *   The indirect action object to validate.
15232  * @param[out] error
15233  *   Perform verbose error reporting if not NULL. Initialized in case of
15234  *   error only.
15235  *
15236  * @return
15237  *   0 on success, otherwise negative errno value.
15238  */
15239 static int
15240 flow_dv_action_validate(struct rte_eth_dev *dev,
15241                         const struct rte_flow_indir_action_conf *conf,
15242                         const struct rte_flow_action *action,
15243                         struct rte_flow_error *err)
15244 {
15245         struct mlx5_priv *priv = dev->data->dev_private;
15246
15247         RTE_SET_USED(conf);
15248         switch (action->type) {
15249         case RTE_FLOW_ACTION_TYPE_RSS:
15250                 /*
15251                  * priv->obj_ops is set according to driver capabilities.
15252                  * When DevX capabilities are
15253                  * sufficient, it is set to devx_obj_ops.
15254                  * Otherwise, it is set to ibv_obj_ops.
15255                  * ibv_obj_ops doesn't support ind_table_modify operation.
15256                  * In this case the shared RSS action can't be used.
15257                  */
15258                 if (priv->obj_ops.ind_table_modify == NULL)
15259                         return rte_flow_error_set
15260                                         (err, ENOTSUP,
15261                                          RTE_FLOW_ERROR_TYPE_ACTION,
15262                                          NULL,
15263                                          "shared RSS action not supported");
15264                 return mlx5_validate_action_rss(dev, action, err);
15265         case RTE_FLOW_ACTION_TYPE_AGE:
15266                 if (!priv->sh->aso_age_mng)
15267                         return rte_flow_error_set(err, ENOTSUP,
15268                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15269                                                 NULL,
15270                                              "shared age action not supported");
15271                 return flow_dv_validate_action_age(0, action, dev, err);
15272         default:
15273                 return rte_flow_error_set(err, ENOTSUP,
15274                                           RTE_FLOW_ERROR_TYPE_ACTION,
15275                                           NULL,
15276                                           "action type not supported");
15277         }
15278 }
15279
15280 /**
15281  * Validate meter policy actions.
15282  * Dispatcher for action type specific validation.
15283  *
15284  * @param[in] dev
15285  *   Pointer to the Ethernet device structure.
15286  * @param[in] action
15287  *   The meter policy action object to validate.
15288  * @param[in] attr
15289  *   Attributes of flow to determine steering domain.
15290  * @param[out] error
15291  *   Perform verbose error reporting if not NULL. Initialized in case of
15292  *   error only.
15293  *
15294  * @return
15295  *   0 on success, otherwise negative errno value.
15296  */
15297 static int
15298 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
15299                         const struct rte_flow_action *actions[RTE_COLORS],
15300                         struct rte_flow_attr *attr,
15301                         bool *is_rss,
15302                         uint8_t *domain_bitmap,
15303                         bool *is_def_policy,
15304                         struct rte_mtr_error *error)
15305 {
15306         struct mlx5_priv *priv = dev->data->dev_private;
15307         struct mlx5_dev_config *dev_conf = &priv->config;
15308         const struct rte_flow_action *act;
15309         uint64_t action_flags = 0;
15310         int actions_n;
15311         int i, ret;
15312         struct rte_flow_error flow_err;
15313         uint8_t domain_color[RTE_COLORS] = {0};
15314         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
15315
15316         if (!priv->config.dv_esw_en)
15317                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
15318         *domain_bitmap = def_domain;
15319         if (actions[RTE_COLOR_YELLOW] &&
15320                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
15321                 return -rte_mtr_error_set(error, ENOTSUP,
15322                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15323                                 NULL,
15324                                 "Yellow color does not support any action.");
15325         if (actions[RTE_COLOR_YELLOW] &&
15326                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
15327                 return -rte_mtr_error_set(error, ENOTSUP,
15328                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15329                                 NULL, "Red color only supports drop action.");
15330         /*
15331          * Check default policy actions:
15332          * Green/Yellow: no action, Red: drop action
15333          */
15334         if ((!actions[RTE_COLOR_GREEN] ||
15335                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
15336                 *is_def_policy = true;
15337                 return 0;
15338         }
15339         flow_err.message = NULL;
15340         for (i = 0; i < RTE_COLORS; i++) {
15341                 act = actions[i];
15342                 for (action_flags = 0, actions_n = 0;
15343                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15344                         act++) {
15345                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
15346                                 return -rte_mtr_error_set(error, ENOTSUP,
15347                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15348                                           NULL, "too many actions");
15349                         switch (act->type) {
15350                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15351                                 if (!priv->config.dv_esw_en)
15352                                         return -rte_mtr_error_set(error,
15353                                         ENOTSUP,
15354                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15355                                         NULL, "PORT action validate check"
15356                                         " fail for ESW disable");
15357                                 ret = flow_dv_validate_action_port_id(dev,
15358                                                 action_flags,
15359                                                 act, attr, &flow_err);
15360                                 if (ret)
15361                                         return -rte_mtr_error_set(error,
15362                                         ENOTSUP,
15363                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15364                                         NULL, flow_err.message ?
15365                                         flow_err.message :
15366                                         "PORT action validate check fail");
15367                                 ++actions_n;
15368                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15369                                 break;
15370                         case RTE_FLOW_ACTION_TYPE_MARK:
15371                                 ret = flow_dv_validate_action_mark(dev, act,
15372                                                            action_flags,
15373                                                            attr, &flow_err);
15374                                 if (ret < 0)
15375                                         return -rte_mtr_error_set(error,
15376                                         ENOTSUP,
15377                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15378                                         NULL, flow_err.message ?
15379                                         flow_err.message :
15380                                         "Mark action validate check fail");
15381                                 if (dev_conf->dv_xmeta_en !=
15382                                         MLX5_XMETA_MODE_LEGACY)
15383                                         return -rte_mtr_error_set(error,
15384                                         ENOTSUP,
15385                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15386                                         NULL, "Extend MARK action is "
15387                                         "not supported. Please try use "
15388                                         "default policy for meter.");
15389                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15390                                 ++actions_n;
15391                                 break;
15392                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15393                                 ret = flow_dv_validate_action_set_tag(dev,
15394                                                         act, action_flags,
15395                                                         attr, &flow_err);
15396                                 if (ret)
15397                                         return -rte_mtr_error_set(error,
15398                                         ENOTSUP,
15399                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15400                                         NULL, flow_err.message ?
15401                                         flow_err.message :
15402                                         "Set tag action validate check fail");
15403                                 /*
15404                                  * Count all modify-header actions
15405                                  * as one action.
15406                                  */
15407                                 if (!(action_flags &
15408                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
15409                                         ++actions_n;
15410                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15411                                 break;
15412                         case RTE_FLOW_ACTION_TYPE_DROP:
15413                                 ret = mlx5_flow_validate_action_drop
15414                                         (action_flags,
15415                                         attr, &flow_err);
15416                                 if (ret < 0)
15417                                         return -rte_mtr_error_set(error,
15418                                         ENOTSUP,
15419                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15420                                         NULL, flow_err.message ?
15421                                         flow_err.message :
15422                                         "Drop action validate check fail");
15423                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15424                                 ++actions_n;
15425                                 break;
15426                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15427                                 /*
15428                                  * Check whether extensive
15429                                  * metadata feature is engaged.
15430                                  */
15431                                 if (dev_conf->dv_flow_en &&
15432                                         (dev_conf->dv_xmeta_en !=
15433                                         MLX5_XMETA_MODE_LEGACY) &&
15434                                         mlx5_flow_ext_mreg_supported(dev))
15435                                         return -rte_mtr_error_set(error,
15436                                           ENOTSUP,
15437                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15438                                           NULL, "Queue action with meta "
15439                                           "is not supported. Please try use "
15440                                           "default policy for meter.");
15441                                 ret = mlx5_flow_validate_action_queue(act,
15442                                                         action_flags, dev,
15443                                                         attr, &flow_err);
15444                                 if (ret < 0)
15445                                         return -rte_mtr_error_set(error,
15446                                           ENOTSUP,
15447                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15448                                           NULL, flow_err.message ?
15449                                           flow_err.message :
15450                                           "Queue action validate check fail");
15451                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15452                                 ++actions_n;
15453                                 break;
15454                         case RTE_FLOW_ACTION_TYPE_RSS:
15455                                 if (dev_conf->dv_flow_en &&
15456                                         (dev_conf->dv_xmeta_en !=
15457                                         MLX5_XMETA_MODE_LEGACY) &&
15458                                         mlx5_flow_ext_mreg_supported(dev))
15459                                         return -rte_mtr_error_set(error,
15460                                           ENOTSUP,
15461                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15462                                           NULL, "RSS action with meta "
15463                                           "is not supported. Please try use "
15464                                           "default policy for meter.");
15465                                 ret = mlx5_validate_action_rss(dev, act,
15466                                                 &flow_err);
15467                                 if (ret < 0)
15468                                         return -rte_mtr_error_set(error,
15469                                           ENOTSUP,
15470                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15471                                           NULL, flow_err.message ?
15472                                           flow_err.message :
15473                                           "RSS action validate check fail");
15474                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15475                                 ++actions_n;
15476                                 *is_rss = true;
15477                                 break;
15478                         case RTE_FLOW_ACTION_TYPE_JUMP:
15479                                 ret = flow_dv_validate_action_jump(dev,
15480                                         NULL, act, action_flags,
15481                                         attr, true, &flow_err);
15482                                 if (ret)
15483                                         return -rte_mtr_error_set(error,
15484                                           ENOTSUP,
15485                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15486                                           NULL, flow_err.message ?
15487                                           flow_err.message :
15488                                           "Jump action validate check fail");
15489                                 ++actions_n;
15490                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15491                                 break;
15492                         default:
15493                                 return -rte_mtr_error_set(error, ENOTSUP,
15494                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15495                                         NULL,
15496                                         "Doesn't support optional action");
15497                         }
15498                 }
15499                 /* Yellow is not supported, just skip. */
15500                 if (i == RTE_COLOR_YELLOW)
15501                         continue;
15502                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
15503                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
15504                 else if ((action_flags &
15505                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
15506                         (action_flags & MLX5_FLOW_ACTION_MARK))
15507                         /*
15508                          * Only support MLX5_XMETA_MODE_LEGACY
15509                          * so MARK action only in ingress domain.
15510                          */
15511                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
15512                 else
15513                         domain_color[i] = def_domain;
15514                 /*
15515                  * Validate the drop action mutual exclusion
15516                  * with other actions. Drop action is mutually-exclusive
15517                  * with any other action, except for Count action.
15518                  */
15519                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
15520                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
15521                         return -rte_mtr_error_set(error, ENOTSUP,
15522                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15523                                 NULL, "Drop action is mutually-exclusive "
15524                                 "with any other action");
15525                 }
15526                 /* Eswitch has few restrictions on using items and actions */
15527                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
15528                         if (!mlx5_flow_ext_mreg_supported(dev) &&
15529                                 action_flags & MLX5_FLOW_ACTION_MARK)
15530                                 return -rte_mtr_error_set(error, ENOTSUP,
15531                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15532                                         NULL, "unsupported action MARK");
15533                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
15534                                 return -rte_mtr_error_set(error, ENOTSUP,
15535                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15536                                         NULL, "unsupported action QUEUE");
15537                         if (action_flags & MLX5_FLOW_ACTION_RSS)
15538                                 return -rte_mtr_error_set(error, ENOTSUP,
15539                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15540                                         NULL, "unsupported action RSS");
15541                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
15542                                 return -rte_mtr_error_set(error, ENOTSUP,
15543                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15544                                         NULL, "no fate action is found");
15545                 } else {
15546                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
15547                                 (domain_color[i] &
15548                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
15549                                 if ((domain_color[i] &
15550                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
15551                                         domain_color[i] =
15552                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
15553                                 else
15554                                         return -rte_mtr_error_set(error,
15555                                         ENOTSUP,
15556                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15557                                         NULL, "no fate action is found");
15558                         }
15559                 }
15560                 if (domain_color[i] != def_domain)
15561                         *domain_bitmap = domain_color[i];
15562         }
15563         return 0;
15564 }
15565
15566 static int
15567 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
15568 {
15569         struct mlx5_priv *priv = dev->data->dev_private;
15570         int ret = 0;
15571
15572         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
15573                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
15574                                                 flags);
15575                 if (ret != 0)
15576                         return ret;
15577         }
15578         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
15579                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
15580                 if (ret != 0)
15581                         return ret;
15582         }
15583         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
15584                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
15585                 if (ret != 0)
15586                         return ret;
15587         }
15588         return 0;
15589 }
15590
15591 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
15592         .validate = flow_dv_validate,
15593         .prepare = flow_dv_prepare,
15594         .translate = flow_dv_translate,
15595         .apply = flow_dv_apply,
15596         .remove = flow_dv_remove,
15597         .destroy = flow_dv_destroy,
15598         .query = flow_dv_query,
15599         .create_mtr_tbls = flow_dv_create_mtr_tbls,
15600         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
15601         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
15602         .create_meter = flow_dv_mtr_alloc,
15603         .free_meter = flow_dv_aso_mtr_release_to_pool,
15604         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
15605         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
15606         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
15607         .create_policy_rules = flow_dv_create_policy_rules,
15608         .destroy_policy_rules = flow_dv_destroy_policy_rules,
15609         .create_def_policy = flow_dv_create_def_policy,
15610         .destroy_def_policy = flow_dv_destroy_def_policy,
15611         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
15612         .counter_alloc = flow_dv_counter_allocate,
15613         .counter_free = flow_dv_counter_free,
15614         .counter_query = flow_dv_counter_query,
15615         .get_aged_flows = flow_get_aged_flows,
15616         .action_validate = flow_dv_action_validate,
15617         .action_create = flow_dv_action_create,
15618         .action_destroy = flow_dv_action_destroy,
15619         .action_update = flow_dv_action_update,
15620         .action_query = flow_dv_action_query,
15621         .sync_domain = flow_dv_sync_domain,
15622 };
15623
15624 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
15625