net/mlx5: fix aging counter deallocation
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static const struct rte_flow_item *
272 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
273 {
274         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
275                 switch (item->type) {
276                 default:
277                         break;
278                 case RTE_FLOW_ITEM_TYPE_VXLAN:
279                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
280                 case RTE_FLOW_ITEM_TYPE_GRE:
281                 case RTE_FLOW_ITEM_TYPE_MPLS:
282                 case RTE_FLOW_ITEM_TYPE_NVGRE:
283                 case RTE_FLOW_ITEM_TYPE_GENEVE:
284                         return item;
285                 case RTE_FLOW_ITEM_TYPE_IPV4:
286                 case RTE_FLOW_ITEM_TYPE_IPV6:
287                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
288                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
289                                 return item;
290                         break;
291                 }
292         }
293         return NULL;
294 }
295
296 static void
297 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
298                           uint8_t next_protocol, uint64_t *item_flags,
299                           int *tunnel)
300 {
301         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
302                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
303         if (next_protocol == IPPROTO_IPIP) {
304                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
305                 *tunnel = 1;
306         }
307         if (next_protocol == IPPROTO_IPV6) {
308                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
309                 *tunnel = 1;
310         }
311 }
312
313 /* Update VLAN's VID/PCP based on input rte_flow_action.
314  *
315  * @param[in] action
316  *   Pointer to struct rte_flow_action.
317  * @param[out] vlan
318  *   Pointer to struct rte_vlan_hdr.
319  */
320 static void
321 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
322                          struct rte_vlan_hdr *vlan)
323 {
324         uint16_t vlan_tci;
325         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
326                 vlan_tci =
327                     ((const struct rte_flow_action_of_set_vlan_pcp *)
328                                                action->conf)->vlan_pcp;
329                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
330                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
331                 vlan->vlan_tci |= vlan_tci;
332         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
333                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
334                 vlan->vlan_tci |= rte_be_to_cpu_16
335                     (((const struct rte_flow_action_of_set_vlan_vid *)
336                                              action->conf)->vlan_vid);
337         }
338 }
339
340 /**
341  * Fetch 1, 2, 3 or 4 byte field from the byte array
342  * and return as unsigned integer in host-endian format.
343  *
344  * @param[in] data
345  *   Pointer to data array.
346  * @param[in] size
347  *   Size of field to extract.
348  *
349  * @return
350  *   converted field in host endian format.
351  */
352 static inline uint32_t
353 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
354 {
355         uint32_t ret;
356
357         switch (size) {
358         case 1:
359                 ret = *data;
360                 break;
361         case 2:
362                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
363                 break;
364         case 3:
365                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
366                 ret = (ret << 8) | *(data + sizeof(uint16_t));
367                 break;
368         case 4:
369                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
370                 break;
371         default:
372                 MLX5_ASSERT(false);
373                 ret = 0;
374                 break;
375         }
376         return ret;
377 }
378
379 /**
380  * Convert modify-header action to DV specification.
381  *
382  * Data length of each action is determined by provided field description
383  * and the item mask. Data bit offset and width of each action is determined
384  * by provided item mask.
385  *
386  * @param[in] item
387  *   Pointer to item specification.
388  * @param[in] field
389  *   Pointer to field modification information.
390  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
393  * @param[in] dcopy
394  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
395  *   Negative offset value sets the same offset as source offset.
396  *   size field is ignored, value is taken from source field.
397  * @param[in,out] resource
398  *   Pointer to the modify-header resource.
399  * @param[in] type
400  *   Type of modification.
401  * @param[out] error
402  *   Pointer to the error structure.
403  *
404  * @return
405  *   0 on success, a negative errno value otherwise and rte_errno is set.
406  */
407 static int
408 flow_dv_convert_modify_action(struct rte_flow_item *item,
409                               struct field_modify_info *field,
410                               struct field_modify_info *dcopy,
411                               struct mlx5_flow_dv_modify_hdr_resource *resource,
412                               uint32_t type, struct rte_flow_error *error)
413 {
414         uint32_t i = resource->actions_num;
415         struct mlx5_modification_cmd *actions = resource->actions;
416
417         /*
418          * The item and mask are provided in big-endian format.
419          * The fields should be presented as in big-endian format either.
420          * Mask must be always present, it defines the actual field width.
421          */
422         MLX5_ASSERT(item->mask);
423         MLX5_ASSERT(field->size);
424         do {
425                 unsigned int size_b;
426                 unsigned int off_b;
427                 uint32_t mask;
428                 uint32_t data;
429
430                 if (i >= MLX5_MAX_MODIFY_NUM)
431                         return rte_flow_error_set(error, EINVAL,
432                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
433                                  "too many items to modify");
434                 /* Fetch variable byte size mask from the array. */
435                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
436                                            field->offset, field->size);
437                 if (!mask) {
438                         ++field;
439                         continue;
440                 }
441                 /* Deduce actual data width in bits from mask value. */
442                 off_b = rte_bsf32(mask);
443                 size_b = sizeof(uint32_t) * CHAR_BIT -
444                          off_b - __builtin_clz(mask);
445                 MLX5_ASSERT(size_b);
446                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
447                 actions[i] = (struct mlx5_modification_cmd) {
448                         .action_type = type,
449                         .field = field->id,
450                         .offset = off_b,
451                         .length = size_b,
452                 };
453                 /* Convert entire record to expected big-endian format. */
454                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
455                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
456                         MLX5_ASSERT(dcopy);
457                         actions[i].dst_field = dcopy->id;
458                         actions[i].dst_offset =
459                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
460                         /* Convert entire record to big-endian format. */
461                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
462                         ++dcopy;
463                 } else {
464                         MLX5_ASSERT(item->spec);
465                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
466                                                    field->offset, field->size);
467                         /* Shift out the trailing masked bits from data. */
468                         data = (data & mask) >> off_b;
469                         actions[i].data1 = rte_cpu_to_be_32(data);
470                 }
471                 ++i;
472                 ++field;
473         } while (field->size);
474         if (resource->actions_num == i)
475                 return rte_flow_error_set(error, EINVAL,
476                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
477                                           "invalid modification flow item");
478         resource->actions_num = i;
479         return 0;
480 }
481
482 /**
483  * Convert modify-header set IPv4 address action to DV specification.
484  *
485  * @param[in,out] resource
486  *   Pointer to the modify-header resource.
487  * @param[in] action
488  *   Pointer to action specification.
489  * @param[out] error
490  *   Pointer to the error structure.
491  *
492  * @return
493  *   0 on success, a negative errno value otherwise and rte_errno is set.
494  */
495 static int
496 flow_dv_convert_action_modify_ipv4
497                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
498                          const struct rte_flow_action *action,
499                          struct rte_flow_error *error)
500 {
501         const struct rte_flow_action_set_ipv4 *conf =
502                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
503         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
504         struct rte_flow_item_ipv4 ipv4;
505         struct rte_flow_item_ipv4 ipv4_mask;
506
507         memset(&ipv4, 0, sizeof(ipv4));
508         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
509         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
510                 ipv4.hdr.src_addr = conf->ipv4_addr;
511                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
512         } else {
513                 ipv4.hdr.dst_addr = conf->ipv4_addr;
514                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
515         }
516         item.spec = &ipv4;
517         item.mask = &ipv4_mask;
518         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
519                                              MLX5_MODIFICATION_TYPE_SET, error);
520 }
521
522 /**
523  * Convert modify-header set IPv6 address action to DV specification.
524  *
525  * @param[in,out] resource
526  *   Pointer to the modify-header resource.
527  * @param[in] action
528  *   Pointer to action specification.
529  * @param[out] error
530  *   Pointer to the error structure.
531  *
532  * @return
533  *   0 on success, a negative errno value otherwise and rte_errno is set.
534  */
535 static int
536 flow_dv_convert_action_modify_ipv6
537                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
538                          const struct rte_flow_action *action,
539                          struct rte_flow_error *error)
540 {
541         const struct rte_flow_action_set_ipv6 *conf =
542                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
543         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
544         struct rte_flow_item_ipv6 ipv6;
545         struct rte_flow_item_ipv6 ipv6_mask;
546
547         memset(&ipv6, 0, sizeof(ipv6));
548         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
549         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
550                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
551                        sizeof(ipv6.hdr.src_addr));
552                 memcpy(&ipv6_mask.hdr.src_addr,
553                        &rte_flow_item_ipv6_mask.hdr.src_addr,
554                        sizeof(ipv6.hdr.src_addr));
555         } else {
556                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
557                        sizeof(ipv6.hdr.dst_addr));
558                 memcpy(&ipv6_mask.hdr.dst_addr,
559                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
560                        sizeof(ipv6.hdr.dst_addr));
561         }
562         item.spec = &ipv6;
563         item.mask = &ipv6_mask;
564         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
565                                              MLX5_MODIFICATION_TYPE_SET, error);
566 }
567
568 /**
569  * Convert modify-header set MAC address action to DV specification.
570  *
571  * @param[in,out] resource
572  *   Pointer to the modify-header resource.
573  * @param[in] action
574  *   Pointer to action specification.
575  * @param[out] error
576  *   Pointer to the error structure.
577  *
578  * @return
579  *   0 on success, a negative errno value otherwise and rte_errno is set.
580  */
581 static int
582 flow_dv_convert_action_modify_mac
583                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
584                          const struct rte_flow_action *action,
585                          struct rte_flow_error *error)
586 {
587         const struct rte_flow_action_set_mac *conf =
588                 (const struct rte_flow_action_set_mac *)(action->conf);
589         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
590         struct rte_flow_item_eth eth;
591         struct rte_flow_item_eth eth_mask;
592
593         memset(&eth, 0, sizeof(eth));
594         memset(&eth_mask, 0, sizeof(eth_mask));
595         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
596                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
597                        sizeof(eth.src.addr_bytes));
598                 memcpy(&eth_mask.src.addr_bytes,
599                        &rte_flow_item_eth_mask.src.addr_bytes,
600                        sizeof(eth_mask.src.addr_bytes));
601         } else {
602                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
603                        sizeof(eth.dst.addr_bytes));
604                 memcpy(&eth_mask.dst.addr_bytes,
605                        &rte_flow_item_eth_mask.dst.addr_bytes,
606                        sizeof(eth_mask.dst.addr_bytes));
607         }
608         item.spec = &eth;
609         item.mask = &eth_mask;
610         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
611                                              MLX5_MODIFICATION_TYPE_SET, error);
612 }
613
614 /**
615  * Convert modify-header set VLAN VID action to DV specification.
616  *
617  * @param[in,out] resource
618  *   Pointer to the modify-header resource.
619  * @param[in] action
620  *   Pointer to action specification.
621  * @param[out] error
622  *   Pointer to the error structure.
623  *
624  * @return
625  *   0 on success, a negative errno value otherwise and rte_errno is set.
626  */
627 static int
628 flow_dv_convert_action_modify_vlan_vid
629                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
630                          const struct rte_flow_action *action,
631                          struct rte_flow_error *error)
632 {
633         const struct rte_flow_action_of_set_vlan_vid *conf =
634                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
635         int i = resource->actions_num;
636         struct mlx5_modification_cmd *actions = resource->actions;
637         struct field_modify_info *field = modify_vlan_out_first_vid;
638
639         if (i >= MLX5_MAX_MODIFY_NUM)
640                 return rte_flow_error_set(error, EINVAL,
641                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
642                          "too many items to modify");
643         actions[i] = (struct mlx5_modification_cmd) {
644                 .action_type = MLX5_MODIFICATION_TYPE_SET,
645                 .field = field->id,
646                 .length = field->size,
647                 .offset = field->offset,
648         };
649         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
650         actions[i].data1 = conf->vlan_vid;
651         actions[i].data1 = actions[i].data1 << 16;
652         resource->actions_num = ++i;
653         return 0;
654 }
655
656 /**
657  * Convert modify-header set TP action to DV specification.
658  *
659  * @param[in,out] resource
660  *   Pointer to the modify-header resource.
661  * @param[in] action
662  *   Pointer to action specification.
663  * @param[in] items
664  *   Pointer to rte_flow_item objects list.
665  * @param[in] attr
666  *   Pointer to flow attributes structure.
667  * @param[in] dev_flow
668  *   Pointer to the sub flow.
669  * @param[in] tunnel_decap
670  *   Whether action is after tunnel decapsulation.
671  * @param[out] error
672  *   Pointer to the error structure.
673  *
674  * @return
675  *   0 on success, a negative errno value otherwise and rte_errno is set.
676  */
677 static int
678 flow_dv_convert_action_modify_tp
679                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
680                          const struct rte_flow_action *action,
681                          const struct rte_flow_item *items,
682                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
683                          bool tunnel_decap, struct rte_flow_error *error)
684 {
685         const struct rte_flow_action_set_tp *conf =
686                 (const struct rte_flow_action_set_tp *)(action->conf);
687         struct rte_flow_item item;
688         struct rte_flow_item_udp udp;
689         struct rte_flow_item_udp udp_mask;
690         struct rte_flow_item_tcp tcp;
691         struct rte_flow_item_tcp tcp_mask;
692         struct field_modify_info *field;
693
694         if (!attr->valid)
695                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
696         if (attr->udp) {
697                 memset(&udp, 0, sizeof(udp));
698                 memset(&udp_mask, 0, sizeof(udp_mask));
699                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
700                         udp.hdr.src_port = conf->port;
701                         udp_mask.hdr.src_port =
702                                         rte_flow_item_udp_mask.hdr.src_port;
703                 } else {
704                         udp.hdr.dst_port = conf->port;
705                         udp_mask.hdr.dst_port =
706                                         rte_flow_item_udp_mask.hdr.dst_port;
707                 }
708                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
709                 item.spec = &udp;
710                 item.mask = &udp_mask;
711                 field = modify_udp;
712         } else {
713                 MLX5_ASSERT(attr->tcp);
714                 memset(&tcp, 0, sizeof(tcp));
715                 memset(&tcp_mask, 0, sizeof(tcp_mask));
716                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
717                         tcp.hdr.src_port = conf->port;
718                         tcp_mask.hdr.src_port =
719                                         rte_flow_item_tcp_mask.hdr.src_port;
720                 } else {
721                         tcp.hdr.dst_port = conf->port;
722                         tcp_mask.hdr.dst_port =
723                                         rte_flow_item_tcp_mask.hdr.dst_port;
724                 }
725                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
726                 item.spec = &tcp;
727                 item.mask = &tcp_mask;
728                 field = modify_tcp;
729         }
730         return flow_dv_convert_modify_action(&item, field, NULL, resource,
731                                              MLX5_MODIFICATION_TYPE_SET, error);
732 }
733
734 /**
735  * Convert modify-header set TTL action to DV specification.
736  *
737  * @param[in,out] resource
738  *   Pointer to the modify-header resource.
739  * @param[in] action
740  *   Pointer to action specification.
741  * @param[in] items
742  *   Pointer to rte_flow_item objects list.
743  * @param[in] attr
744  *   Pointer to flow attributes structure.
745  * @param[in] dev_flow
746  *   Pointer to the sub flow.
747  * @param[in] tunnel_decap
748  *   Whether action is after tunnel decapsulation.
749  * @param[out] error
750  *   Pointer to the error structure.
751  *
752  * @return
753  *   0 on success, a negative errno value otherwise and rte_errno is set.
754  */
755 static int
756 flow_dv_convert_action_modify_ttl
757                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
758                          const struct rte_flow_action *action,
759                          const struct rte_flow_item *items,
760                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
761                          bool tunnel_decap, struct rte_flow_error *error)
762 {
763         const struct rte_flow_action_set_ttl *conf =
764                 (const struct rte_flow_action_set_ttl *)(action->conf);
765         struct rte_flow_item item;
766         struct rte_flow_item_ipv4 ipv4;
767         struct rte_flow_item_ipv4 ipv4_mask;
768         struct rte_flow_item_ipv6 ipv6;
769         struct rte_flow_item_ipv6 ipv6_mask;
770         struct field_modify_info *field;
771
772         if (!attr->valid)
773                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
774         if (attr->ipv4) {
775                 memset(&ipv4, 0, sizeof(ipv4));
776                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
777                 ipv4.hdr.time_to_live = conf->ttl_value;
778                 ipv4_mask.hdr.time_to_live = 0xFF;
779                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
780                 item.spec = &ipv4;
781                 item.mask = &ipv4_mask;
782                 field = modify_ipv4;
783         } else {
784                 MLX5_ASSERT(attr->ipv6);
785                 memset(&ipv6, 0, sizeof(ipv6));
786                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
787                 ipv6.hdr.hop_limits = conf->ttl_value;
788                 ipv6_mask.hdr.hop_limits = 0xFF;
789                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
790                 item.spec = &ipv6;
791                 item.mask = &ipv6_mask;
792                 field = modify_ipv6;
793         }
794         return flow_dv_convert_modify_action(&item, field, NULL, resource,
795                                              MLX5_MODIFICATION_TYPE_SET, error);
796 }
797
798 /**
799  * Convert modify-header decrement TTL action to DV specification.
800  *
801  * @param[in,out] resource
802  *   Pointer to the modify-header resource.
803  * @param[in] action
804  *   Pointer to action specification.
805  * @param[in] items
806  *   Pointer to rte_flow_item objects list.
807  * @param[in] attr
808  *   Pointer to flow attributes structure.
809  * @param[in] dev_flow
810  *   Pointer to the sub flow.
811  * @param[in] tunnel_decap
812  *   Whether action is after tunnel decapsulation.
813  * @param[out] error
814  *   Pointer to the error structure.
815  *
816  * @return
817  *   0 on success, a negative errno value otherwise and rte_errno is set.
818  */
819 static int
820 flow_dv_convert_action_modify_dec_ttl
821                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
822                          const struct rte_flow_item *items,
823                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
824                          bool tunnel_decap, struct rte_flow_error *error)
825 {
826         struct rte_flow_item item;
827         struct rte_flow_item_ipv4 ipv4;
828         struct rte_flow_item_ipv4 ipv4_mask;
829         struct rte_flow_item_ipv6 ipv6;
830         struct rte_flow_item_ipv6 ipv6_mask;
831         struct field_modify_info *field;
832
833         if (!attr->valid)
834                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
835         if (attr->ipv4) {
836                 memset(&ipv4, 0, sizeof(ipv4));
837                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
838                 ipv4.hdr.time_to_live = 0xFF;
839                 ipv4_mask.hdr.time_to_live = 0xFF;
840                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
841                 item.spec = &ipv4;
842                 item.mask = &ipv4_mask;
843                 field = modify_ipv4;
844         } else {
845                 MLX5_ASSERT(attr->ipv6);
846                 memset(&ipv6, 0, sizeof(ipv6));
847                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
848                 ipv6.hdr.hop_limits = 0xFF;
849                 ipv6_mask.hdr.hop_limits = 0xFF;
850                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
851                 item.spec = &ipv6;
852                 item.mask = &ipv6_mask;
853                 field = modify_ipv6;
854         }
855         return flow_dv_convert_modify_action(&item, field, NULL, resource,
856                                              MLX5_MODIFICATION_TYPE_ADD, error);
857 }
858
859 /**
860  * Convert modify-header increment/decrement TCP Sequence number
861  * to DV specification.
862  *
863  * @param[in,out] resource
864  *   Pointer to the modify-header resource.
865  * @param[in] action
866  *   Pointer to action specification.
867  * @param[out] error
868  *   Pointer to the error structure.
869  *
870  * @return
871  *   0 on success, a negative errno value otherwise and rte_errno is set.
872  */
873 static int
874 flow_dv_convert_action_modify_tcp_seq
875                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
876                          const struct rte_flow_action *action,
877                          struct rte_flow_error *error)
878 {
879         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
880         uint64_t value = rte_be_to_cpu_32(*conf);
881         struct rte_flow_item item;
882         struct rte_flow_item_tcp tcp;
883         struct rte_flow_item_tcp tcp_mask;
884
885         memset(&tcp, 0, sizeof(tcp));
886         memset(&tcp_mask, 0, sizeof(tcp_mask));
887         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
888                 /*
889                  * The HW has no decrement operation, only increment operation.
890                  * To simulate decrement X from Y using increment operation
891                  * we need to add UINT32_MAX X times to Y.
892                  * Each adding of UINT32_MAX decrements Y by 1.
893                  */
894                 value *= UINT32_MAX;
895         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
896         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
897         item.type = RTE_FLOW_ITEM_TYPE_TCP;
898         item.spec = &tcp;
899         item.mask = &tcp_mask;
900         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
901                                              MLX5_MODIFICATION_TYPE_ADD, error);
902 }
903
904 /**
905  * Convert modify-header increment/decrement TCP Acknowledgment number
906  * to DV specification.
907  *
908  * @param[in,out] resource
909  *   Pointer to the modify-header resource.
910  * @param[in] action
911  *   Pointer to action specification.
912  * @param[out] error
913  *   Pointer to the error structure.
914  *
915  * @return
916  *   0 on success, a negative errno value otherwise and rte_errno is set.
917  */
918 static int
919 flow_dv_convert_action_modify_tcp_ack
920                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
921                          const struct rte_flow_action *action,
922                          struct rte_flow_error *error)
923 {
924         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
925         uint64_t value = rte_be_to_cpu_32(*conf);
926         struct rte_flow_item item;
927         struct rte_flow_item_tcp tcp;
928         struct rte_flow_item_tcp tcp_mask;
929
930         memset(&tcp, 0, sizeof(tcp));
931         memset(&tcp_mask, 0, sizeof(tcp_mask));
932         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
933                 /*
934                  * The HW has no decrement operation, only increment operation.
935                  * To simulate decrement X from Y using increment operation
936                  * we need to add UINT32_MAX X times to Y.
937                  * Each adding of UINT32_MAX decrements Y by 1.
938                  */
939                 value *= UINT32_MAX;
940         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
941         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
942         item.type = RTE_FLOW_ITEM_TYPE_TCP;
943         item.spec = &tcp;
944         item.mask = &tcp_mask;
945         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
946                                              MLX5_MODIFICATION_TYPE_ADD, error);
947 }
948
949 static enum mlx5_modification_field reg_to_field[] = {
950         [REG_NON] = MLX5_MODI_OUT_NONE,
951         [REG_A] = MLX5_MODI_META_DATA_REG_A,
952         [REG_B] = MLX5_MODI_META_DATA_REG_B,
953         [REG_C_0] = MLX5_MODI_META_REG_C_0,
954         [REG_C_1] = MLX5_MODI_META_REG_C_1,
955         [REG_C_2] = MLX5_MODI_META_REG_C_2,
956         [REG_C_3] = MLX5_MODI_META_REG_C_3,
957         [REG_C_4] = MLX5_MODI_META_REG_C_4,
958         [REG_C_5] = MLX5_MODI_META_REG_C_5,
959         [REG_C_6] = MLX5_MODI_META_REG_C_6,
960         [REG_C_7] = MLX5_MODI_META_REG_C_7,
961 };
962
963 /**
964  * Convert register set to DV specification.
965  *
966  * @param[in,out] resource
967  *   Pointer to the modify-header resource.
968  * @param[in] action
969  *   Pointer to action specification.
970  * @param[out] error
971  *   Pointer to the error structure.
972  *
973  * @return
974  *   0 on success, a negative errno value otherwise and rte_errno is set.
975  */
976 static int
977 flow_dv_convert_action_set_reg
978                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
979                          const struct rte_flow_action *action,
980                          struct rte_flow_error *error)
981 {
982         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
983         struct mlx5_modification_cmd *actions = resource->actions;
984         uint32_t i = resource->actions_num;
985
986         if (i >= MLX5_MAX_MODIFY_NUM)
987                 return rte_flow_error_set(error, EINVAL,
988                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
989                                           "too many items to modify");
990         MLX5_ASSERT(conf->id != REG_NON);
991         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
992         actions[i] = (struct mlx5_modification_cmd) {
993                 .action_type = MLX5_MODIFICATION_TYPE_SET,
994                 .field = reg_to_field[conf->id],
995                 .offset = conf->offset,
996                 .length = conf->length,
997         };
998         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
999         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1000         ++i;
1001         resource->actions_num = i;
1002         return 0;
1003 }
1004
1005 /**
1006  * Convert SET_TAG action to DV specification.
1007  *
1008  * @param[in] dev
1009  *   Pointer to the rte_eth_dev structure.
1010  * @param[in,out] resource
1011  *   Pointer to the modify-header resource.
1012  * @param[in] conf
1013  *   Pointer to action specification.
1014  * @param[out] error
1015  *   Pointer to the error structure.
1016  *
1017  * @return
1018  *   0 on success, a negative errno value otherwise and rte_errno is set.
1019  */
1020 static int
1021 flow_dv_convert_action_set_tag
1022                         (struct rte_eth_dev *dev,
1023                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1024                          const struct rte_flow_action_set_tag *conf,
1025                          struct rte_flow_error *error)
1026 {
1027         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1028         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1029         struct rte_flow_item item = {
1030                 .spec = &data,
1031                 .mask = &mask,
1032         };
1033         struct field_modify_info reg_c_x[] = {
1034                 [1] = {0, 0, 0},
1035         };
1036         enum mlx5_modification_field reg_type;
1037         int ret;
1038
1039         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1040         if (ret < 0)
1041                 return ret;
1042         MLX5_ASSERT(ret != REG_NON);
1043         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1044         reg_type = reg_to_field[ret];
1045         MLX5_ASSERT(reg_type > 0);
1046         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1047         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1048                                              MLX5_MODIFICATION_TYPE_SET, error);
1049 }
1050
1051 /**
1052  * Convert internal COPY_REG action to DV specification.
1053  *
1054  * @param[in] dev
1055  *   Pointer to the rte_eth_dev structure.
1056  * @param[in,out] res
1057  *   Pointer to the modify-header resource.
1058  * @param[in] action
1059  *   Pointer to action specification.
1060  * @param[out] error
1061  *   Pointer to the error structure.
1062  *
1063  * @return
1064  *   0 on success, a negative errno value otherwise and rte_errno is set.
1065  */
1066 static int
1067 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1068                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1069                                  const struct rte_flow_action *action,
1070                                  struct rte_flow_error *error)
1071 {
1072         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1073         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1074         struct rte_flow_item item = {
1075                 .spec = NULL,
1076                 .mask = &mask,
1077         };
1078         struct field_modify_info reg_src[] = {
1079                 {4, 0, reg_to_field[conf->src]},
1080                 {0, 0, 0},
1081         };
1082         struct field_modify_info reg_dst = {
1083                 .offset = 0,
1084                 .id = reg_to_field[conf->dst],
1085         };
1086         /* Adjust reg_c[0] usage according to reported mask. */
1087         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1088                 struct mlx5_priv *priv = dev->data->dev_private;
1089                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1090
1091                 MLX5_ASSERT(reg_c0);
1092                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1093                 if (conf->dst == REG_C_0) {
1094                         /* Copy to reg_c[0], within mask only. */
1095                         reg_dst.offset = rte_bsf32(reg_c0);
1096                         /*
1097                          * Mask is ignoring the enianness, because
1098                          * there is no conversion in datapath.
1099                          */
1100 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1101                         /* Copy from destination lower bits to reg_c[0]. */
1102                         mask = reg_c0 >> reg_dst.offset;
1103 #else
1104                         /* Copy from destination upper bits to reg_c[0]. */
1105                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1106                                           rte_fls_u32(reg_c0));
1107 #endif
1108                 } else {
1109                         mask = rte_cpu_to_be_32(reg_c0);
1110 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1111                         /* Copy from reg_c[0] to destination lower bits. */
1112                         reg_dst.offset = 0;
1113 #else
1114                         /* Copy from reg_c[0] to destination upper bits. */
1115                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1116                                          (rte_fls_u32(reg_c0) -
1117                                           rte_bsf32(reg_c0));
1118 #endif
1119                 }
1120         }
1121         return flow_dv_convert_modify_action(&item,
1122                                              reg_src, &reg_dst, res,
1123                                              MLX5_MODIFICATION_TYPE_COPY,
1124                                              error);
1125 }
1126
1127 /**
1128  * Convert MARK action to DV specification. This routine is used
1129  * in extensive metadata only and requires metadata register to be
1130  * handled. In legacy mode hardware tag resource is engaged.
1131  *
1132  * @param[in] dev
1133  *   Pointer to the rte_eth_dev structure.
1134  * @param[in] conf
1135  *   Pointer to MARK action specification.
1136  * @param[in,out] resource
1137  *   Pointer to the modify-header resource.
1138  * @param[out] error
1139  *   Pointer to the error structure.
1140  *
1141  * @return
1142  *   0 on success, a negative errno value otherwise and rte_errno is set.
1143  */
1144 static int
1145 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1146                             const struct rte_flow_action_mark *conf,
1147                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1148                             struct rte_flow_error *error)
1149 {
1150         struct mlx5_priv *priv = dev->data->dev_private;
1151         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1152                                            priv->sh->dv_mark_mask);
1153         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1154         struct rte_flow_item item = {
1155                 .spec = &data,
1156                 .mask = &mask,
1157         };
1158         struct field_modify_info reg_c_x[] = {
1159                 [1] = {0, 0, 0},
1160         };
1161         int reg;
1162
1163         if (!mask)
1164                 return rte_flow_error_set(error, EINVAL,
1165                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1166                                           NULL, "zero mark action mask");
1167         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1168         if (reg < 0)
1169                 return reg;
1170         MLX5_ASSERT(reg > 0);
1171         if (reg == REG_C_0) {
1172                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1173                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1174
1175                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1176                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1177                 mask = rte_cpu_to_be_32(mask << shl_c0);
1178         }
1179         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1180         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1181                                              MLX5_MODIFICATION_TYPE_SET, error);
1182 }
1183
1184 /**
1185  * Get metadata register index for specified steering domain.
1186  *
1187  * @param[in] dev
1188  *   Pointer to the rte_eth_dev structure.
1189  * @param[in] attr
1190  *   Attributes of flow to determine steering domain.
1191  * @param[out] error
1192  *   Pointer to the error structure.
1193  *
1194  * @return
1195  *   positive index on success, a negative errno value otherwise
1196  *   and rte_errno is set.
1197  */
1198 static enum modify_reg
1199 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1200                          const struct rte_flow_attr *attr,
1201                          struct rte_flow_error *error)
1202 {
1203         int reg =
1204                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1205                                           MLX5_METADATA_FDB :
1206                                             attr->egress ?
1207                                             MLX5_METADATA_TX :
1208                                             MLX5_METADATA_RX, 0, error);
1209         if (reg < 0)
1210                 return rte_flow_error_set(error,
1211                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1212                                           NULL, "unavailable "
1213                                           "metadata register");
1214         return reg;
1215 }
1216
1217 /**
1218  * Convert SET_META action to DV specification.
1219  *
1220  * @param[in] dev
1221  *   Pointer to the rte_eth_dev structure.
1222  * @param[in,out] resource
1223  *   Pointer to the modify-header resource.
1224  * @param[in] attr
1225  *   Attributes of flow that includes this item.
1226  * @param[in] conf
1227  *   Pointer to action specification.
1228  * @param[out] error
1229  *   Pointer to the error structure.
1230  *
1231  * @return
1232  *   0 on success, a negative errno value otherwise and rte_errno is set.
1233  */
1234 static int
1235 flow_dv_convert_action_set_meta
1236                         (struct rte_eth_dev *dev,
1237                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1238                          const struct rte_flow_attr *attr,
1239                          const struct rte_flow_action_set_meta *conf,
1240                          struct rte_flow_error *error)
1241 {
1242         uint32_t data = conf->data;
1243         uint32_t mask = conf->mask;
1244         struct rte_flow_item item = {
1245                 .spec = &data,
1246                 .mask = &mask,
1247         };
1248         struct field_modify_info reg_c_x[] = {
1249                 [1] = {0, 0, 0},
1250         };
1251         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1252
1253         if (reg < 0)
1254                 return reg;
1255         MLX5_ASSERT(reg != REG_NON);
1256         /*
1257          * In datapath code there is no endianness
1258          * coversions for perfromance reasons, all
1259          * pattern conversions are done in rte_flow.
1260          */
1261         if (reg == REG_C_0) {
1262                 struct mlx5_priv *priv = dev->data->dev_private;
1263                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1264                 uint32_t shl_c0;
1265
1266                 MLX5_ASSERT(msk_c0);
1267 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1268                 shl_c0 = rte_bsf32(msk_c0);
1269 #else
1270                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1271 #endif
1272                 mask <<= shl_c0;
1273                 data <<= shl_c0;
1274                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1275         }
1276         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1277         /* The routine expects parameters in memory as big-endian ones. */
1278         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1279                                              MLX5_MODIFICATION_TYPE_SET, error);
1280 }
1281
1282 /**
1283  * Convert modify-header set IPv4 DSCP action to DV specification.
1284  *
1285  * @param[in,out] resource
1286  *   Pointer to the modify-header resource.
1287  * @param[in] action
1288  *   Pointer to action specification.
1289  * @param[out] error
1290  *   Pointer to the error structure.
1291  *
1292  * @return
1293  *   0 on success, a negative errno value otherwise and rte_errno is set.
1294  */
1295 static int
1296 flow_dv_convert_action_modify_ipv4_dscp
1297                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1298                          const struct rte_flow_action *action,
1299                          struct rte_flow_error *error)
1300 {
1301         const struct rte_flow_action_set_dscp *conf =
1302                 (const struct rte_flow_action_set_dscp *)(action->conf);
1303         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1304         struct rte_flow_item_ipv4 ipv4;
1305         struct rte_flow_item_ipv4 ipv4_mask;
1306
1307         memset(&ipv4, 0, sizeof(ipv4));
1308         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1309         ipv4.hdr.type_of_service = conf->dscp;
1310         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1311         item.spec = &ipv4;
1312         item.mask = &ipv4_mask;
1313         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1314                                              MLX5_MODIFICATION_TYPE_SET, error);
1315 }
1316
1317 /**
1318  * Convert modify-header set IPv6 DSCP action to DV specification.
1319  *
1320  * @param[in,out] resource
1321  *   Pointer to the modify-header resource.
1322  * @param[in] action
1323  *   Pointer to action specification.
1324  * @param[out] error
1325  *   Pointer to the error structure.
1326  *
1327  * @return
1328  *   0 on success, a negative errno value otherwise and rte_errno is set.
1329  */
1330 static int
1331 flow_dv_convert_action_modify_ipv6_dscp
1332                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1333                          const struct rte_flow_action *action,
1334                          struct rte_flow_error *error)
1335 {
1336         const struct rte_flow_action_set_dscp *conf =
1337                 (const struct rte_flow_action_set_dscp *)(action->conf);
1338         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1339         struct rte_flow_item_ipv6 ipv6;
1340         struct rte_flow_item_ipv6 ipv6_mask;
1341
1342         memset(&ipv6, 0, sizeof(ipv6));
1343         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1344         /*
1345          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1346          * rdma-core only accept the DSCP bits byte aligned start from
1347          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1348          * bits in IPv6 case as rdma-core requires byte aligned value.
1349          */
1350         ipv6.hdr.vtc_flow = conf->dscp;
1351         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1352         item.spec = &ipv6;
1353         item.mask = &ipv6_mask;
1354         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1355                                              MLX5_MODIFICATION_TYPE_SET, error);
1356 }
1357
1358 static int
1359 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1360                            enum rte_flow_field_id field)
1361 {
1362         switch (field) {
1363         case RTE_FLOW_FIELD_START:
1364                 return 32;
1365         case RTE_FLOW_FIELD_MAC_DST:
1366         case RTE_FLOW_FIELD_MAC_SRC:
1367                 return 48;
1368         case RTE_FLOW_FIELD_VLAN_TYPE:
1369                 return 16;
1370         case RTE_FLOW_FIELD_VLAN_ID:
1371                 return 12;
1372         case RTE_FLOW_FIELD_MAC_TYPE:
1373                 return 16;
1374         case RTE_FLOW_FIELD_IPV4_DSCP:
1375                 return 6;
1376         case RTE_FLOW_FIELD_IPV4_TTL:
1377                 return 8;
1378         case RTE_FLOW_FIELD_IPV4_SRC:
1379         case RTE_FLOW_FIELD_IPV4_DST:
1380                 return 32;
1381         case RTE_FLOW_FIELD_IPV6_DSCP:
1382                 return 6;
1383         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1384                 return 8;
1385         case RTE_FLOW_FIELD_IPV6_SRC:
1386         case RTE_FLOW_FIELD_IPV6_DST:
1387                 return 128;
1388         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1389         case RTE_FLOW_FIELD_TCP_PORT_DST:
1390                 return 16;
1391         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1392         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1393                 return 32;
1394         case RTE_FLOW_FIELD_TCP_FLAGS:
1395                 return 9;
1396         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1397         case RTE_FLOW_FIELD_UDP_PORT_DST:
1398                 return 16;
1399         case RTE_FLOW_FIELD_VXLAN_VNI:
1400         case RTE_FLOW_FIELD_GENEVE_VNI:
1401                 return 24;
1402         case RTE_FLOW_FIELD_GTP_TEID:
1403         case RTE_FLOW_FIELD_TAG:
1404                 return 32;
1405         case RTE_FLOW_FIELD_MARK:
1406                 return 24;
1407         case RTE_FLOW_FIELD_META:
1408                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1409                         return 16;
1410                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1411                         return 32;
1412                 else
1413                         return 0;
1414         case RTE_FLOW_FIELD_POINTER:
1415         case RTE_FLOW_FIELD_VALUE:
1416                 return 64;
1417         default:
1418                 MLX5_ASSERT(false);
1419         }
1420         return 0;
1421 }
1422
1423 static void
1424 mlx5_flow_field_id_to_modify_info
1425                 (const struct rte_flow_action_modify_data *data,
1426                  struct field_modify_info *info,
1427                  uint32_t *mask, uint32_t *value,
1428                  uint32_t width, uint32_t dst_width,
1429                  struct rte_eth_dev *dev,
1430                  const struct rte_flow_attr *attr,
1431                  struct rte_flow_error *error)
1432 {
1433         struct mlx5_priv *priv = dev->data->dev_private;
1434         struct mlx5_dev_config *config = &priv->config;
1435         uint32_t idx = 0;
1436         uint64_t val = 0;
1437         switch (data->field) {
1438         case RTE_FLOW_FIELD_START:
1439                 /* not supported yet */
1440                 MLX5_ASSERT(false);
1441                 break;
1442         case RTE_FLOW_FIELD_MAC_DST:
1443                 if (mask) {
1444                         if (data->offset < 32) {
1445                                 info[idx] = (struct field_modify_info){4, 0,
1446                                                 MLX5_MODI_OUT_DMAC_47_16};
1447                                 if (width < 32) {
1448                                         mask[idx] =
1449                                                 rte_cpu_to_be_32(0xffffffff >>
1450                                                                  (32 - width));
1451                                         width = 0;
1452                                 } else {
1453                                         mask[idx] = RTE_BE32(0xffffffff);
1454                                         width -= 32;
1455                                 }
1456                                 if (!width)
1457                                         break;
1458                                 ++idx;
1459                         }
1460                         info[idx] = (struct field_modify_info){2, 4 * idx,
1461                                                 MLX5_MODI_OUT_DMAC_15_0};
1462                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1463                 } else {
1464                         if (data->offset < 32)
1465                                 info[idx++] = (struct field_modify_info){4, 0,
1466                                                 MLX5_MODI_OUT_DMAC_47_16};
1467                         info[idx] = (struct field_modify_info){2, 0,
1468                                                 MLX5_MODI_OUT_DMAC_15_0};
1469                 }
1470                 break;
1471         case RTE_FLOW_FIELD_MAC_SRC:
1472                 if (mask) {
1473                         if (data->offset < 32) {
1474                                 info[idx] = (struct field_modify_info){4, 0,
1475                                                 MLX5_MODI_OUT_SMAC_47_16};
1476                                 if (width < 32) {
1477                                         mask[idx] =
1478                                                 rte_cpu_to_be_32(0xffffffff >>
1479                                                                 (32 - width));
1480                                         width = 0;
1481                                 } else {
1482                                         mask[idx] = RTE_BE32(0xffffffff);
1483                                         width -= 32;
1484                                 }
1485                                 if (!width)
1486                                         break;
1487                                 ++idx;
1488                         }
1489                         info[idx] = (struct field_modify_info){2, 4 * idx,
1490                                                 MLX5_MODI_OUT_SMAC_15_0};
1491                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1492                 } else {
1493                         if (data->offset < 32)
1494                                 info[idx++] = (struct field_modify_info){4, 0,
1495                                                 MLX5_MODI_OUT_SMAC_47_16};
1496                         info[idx] = (struct field_modify_info){2, 0,
1497                                                 MLX5_MODI_OUT_SMAC_15_0};
1498                 }
1499                 break;
1500         case RTE_FLOW_FIELD_VLAN_TYPE:
1501                 /* not supported yet */
1502                 break;
1503         case RTE_FLOW_FIELD_VLAN_ID:
1504                 info[idx] = (struct field_modify_info){2, 0,
1505                                         MLX5_MODI_OUT_FIRST_VID};
1506                 if (mask)
1507                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1508                 break;
1509         case RTE_FLOW_FIELD_MAC_TYPE:
1510                 info[idx] = (struct field_modify_info){2, 0,
1511                                         MLX5_MODI_OUT_ETHERTYPE};
1512                 if (mask)
1513                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1514                 break;
1515         case RTE_FLOW_FIELD_IPV4_DSCP:
1516                 info[idx] = (struct field_modify_info){1, 0,
1517                                         MLX5_MODI_OUT_IP_DSCP};
1518                 if (mask)
1519                         mask[idx] = 0x3f >> (6 - width);
1520                 break;
1521         case RTE_FLOW_FIELD_IPV4_TTL:
1522                 info[idx] = (struct field_modify_info){1, 0,
1523                                         MLX5_MODI_OUT_IPV4_TTL};
1524                 if (mask)
1525                         mask[idx] = 0xff >> (8 - width);
1526                 break;
1527         case RTE_FLOW_FIELD_IPV4_SRC:
1528                 info[idx] = (struct field_modify_info){4, 0,
1529                                         MLX5_MODI_OUT_SIPV4};
1530                 if (mask)
1531                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1532                                                      (32 - width));
1533                 break;
1534         case RTE_FLOW_FIELD_IPV4_DST:
1535                 info[idx] = (struct field_modify_info){4, 0,
1536                                         MLX5_MODI_OUT_DIPV4};
1537                 if (mask)
1538                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1539                                                      (32 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV6_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV6_SRC:
1554                 if (mask) {
1555                         if (data->offset < 32) {
1556                                 info[idx] = (struct field_modify_info){4,
1557                                                 4 * idx,
1558                                                 MLX5_MODI_OUT_SIPV6_31_0};
1559                                 if (width < 32) {
1560                                         mask[idx] =
1561                                                 rte_cpu_to_be_32(0xffffffff >>
1562                                                                  (32 - width));
1563                                         width = 0;
1564                                 } else {
1565                                         mask[idx] = RTE_BE32(0xffffffff);
1566                                         width -= 32;
1567                                 }
1568                                 if (!width)
1569                                         break;
1570                                 ++idx;
1571                         }
1572                         if (data->offset < 64) {
1573                                 info[idx] = (struct field_modify_info){4,
1574                                                 4 * idx,
1575                                                 MLX5_MODI_OUT_SIPV6_63_32};
1576                                 if (width < 32) {
1577                                         mask[idx] =
1578                                                 rte_cpu_to_be_32(0xffffffff >>
1579                                                                  (32 - width));
1580                                         width = 0;
1581                                 } else {
1582                                         mask[idx] = RTE_BE32(0xffffffff);
1583                                         width -= 32;
1584                                 }
1585                                 if (!width)
1586                                         break;
1587                                 ++idx;
1588                         }
1589                         if (data->offset < 96) {
1590                                 info[idx] = (struct field_modify_info){4,
1591                                                 4 * idx,
1592                                                 MLX5_MODI_OUT_SIPV6_95_64};
1593                                 if (width < 32) {
1594                                         mask[idx] =
1595                                                 rte_cpu_to_be_32(0xffffffff >>
1596                                                                  (32 - width));
1597                                         width = 0;
1598                                 } else {
1599                                         mask[idx] = RTE_BE32(0xffffffff);
1600                                         width -= 32;
1601                                 }
1602                                 if (!width)
1603                                         break;
1604                                 ++idx;
1605                         }
1606                         info[idx] = (struct field_modify_info){4, 4 * idx,
1607                                                 MLX5_MODI_OUT_SIPV6_127_96};
1608                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1609                                                      (32 - width));
1610                 } else {
1611                         if (data->offset < 32)
1612                                 info[idx++] = (struct field_modify_info){4, 0,
1613                                                 MLX5_MODI_OUT_SIPV6_31_0};
1614                         if (data->offset < 64)
1615                                 info[idx++] = (struct field_modify_info){4, 0,
1616                                                 MLX5_MODI_OUT_SIPV6_63_32};
1617                         if (data->offset < 96)
1618                                 info[idx++] = (struct field_modify_info){4, 0,
1619                                                 MLX5_MODI_OUT_SIPV6_95_64};
1620                         if (data->offset < 128)
1621                                 info[idx++] = (struct field_modify_info){4, 0,
1622                                                 MLX5_MODI_OUT_SIPV6_127_96};
1623                 }
1624                 break;
1625         case RTE_FLOW_FIELD_IPV6_DST:
1626                 if (mask) {
1627                         if (data->offset < 32) {
1628                                 info[idx] = (struct field_modify_info){4,
1629                                                 4 * idx,
1630                                                 MLX5_MODI_OUT_DIPV6_31_0};
1631                                 if (width < 32) {
1632                                         mask[idx] =
1633                                                 rte_cpu_to_be_32(0xffffffff >>
1634                                                                  (32 - width));
1635                                         width = 0;
1636                                 } else {
1637                                         mask[idx] = RTE_BE32(0xffffffff);
1638                                         width -= 32;
1639                                 }
1640                                 if (!width)
1641                                         break;
1642                                 ++idx;
1643                         }
1644                         if (data->offset < 64) {
1645                                 info[idx] = (struct field_modify_info){4,
1646                                                 4 * idx,
1647                                                 MLX5_MODI_OUT_DIPV6_63_32};
1648                                 if (width < 32) {
1649                                         mask[idx] =
1650                                                 rte_cpu_to_be_32(0xffffffff >>
1651                                                                  (32 - width));
1652                                         width = 0;
1653                                 } else {
1654                                         mask[idx] = RTE_BE32(0xffffffff);
1655                                         width -= 32;
1656                                 }
1657                                 if (!width)
1658                                         break;
1659                                 ++idx;
1660                         }
1661                         if (data->offset < 96) {
1662                                 info[idx] = (struct field_modify_info){4,
1663                                                 4 * idx,
1664                                                 MLX5_MODI_OUT_DIPV6_95_64};
1665                                 if (width < 32) {
1666                                         mask[idx] =
1667                                                 rte_cpu_to_be_32(0xffffffff >>
1668                                                                  (32 - width));
1669                                         width = 0;
1670                                 } else {
1671                                         mask[idx] = RTE_BE32(0xffffffff);
1672                                         width -= 32;
1673                                 }
1674                                 if (!width)
1675                                         break;
1676                                 ++idx;
1677                         }
1678                         info[idx] = (struct field_modify_info){4, 4 * idx,
1679                                                 MLX5_MODI_OUT_DIPV6_127_96};
1680                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1681                                                      (32 - width));
1682                 } else {
1683                         if (data->offset < 32)
1684                                 info[idx++] = (struct field_modify_info){4, 0,
1685                                                 MLX5_MODI_OUT_DIPV6_31_0};
1686                         if (data->offset < 64)
1687                                 info[idx++] = (struct field_modify_info){4, 0,
1688                                                 MLX5_MODI_OUT_DIPV6_63_32};
1689                         if (data->offset < 96)
1690                                 info[idx++] = (struct field_modify_info){4, 0,
1691                                                 MLX5_MODI_OUT_DIPV6_95_64};
1692                         if (data->offset < 128)
1693                                 info[idx++] = (struct field_modify_info){4, 0,
1694                                                 MLX5_MODI_OUT_DIPV6_127_96};
1695                 }
1696                 break;
1697         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1698                 info[idx] = (struct field_modify_info){2, 0,
1699                                         MLX5_MODI_OUT_TCP_SPORT};
1700                 if (mask)
1701                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1702                 break;
1703         case RTE_FLOW_FIELD_TCP_PORT_DST:
1704                 info[idx] = (struct field_modify_info){2, 0,
1705                                         MLX5_MODI_OUT_TCP_DPORT};
1706                 if (mask)
1707                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1708                 break;
1709         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1710                 info[idx] = (struct field_modify_info){4, 0,
1711                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1712                 if (mask)
1713                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1714                                                      (32 - width));
1715                 break;
1716         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1717                 info[idx] = (struct field_modify_info){4, 0,
1718                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1719                 if (mask)
1720                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1721                                                      (32 - width));
1722                 break;
1723         case RTE_FLOW_FIELD_TCP_FLAGS:
1724                 info[idx] = (struct field_modify_info){2, 0,
1725                                         MLX5_MODI_OUT_TCP_FLAGS};
1726                 if (mask)
1727                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1728                 break;
1729         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1730                 info[idx] = (struct field_modify_info){2, 0,
1731                                         MLX5_MODI_OUT_UDP_SPORT};
1732                 if (mask)
1733                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1734                 break;
1735         case RTE_FLOW_FIELD_UDP_PORT_DST:
1736                 info[idx] = (struct field_modify_info){2, 0,
1737                                         MLX5_MODI_OUT_UDP_DPORT};
1738                 if (mask)
1739                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1740                 break;
1741         case RTE_FLOW_FIELD_VXLAN_VNI:
1742                 /* not supported yet */
1743                 break;
1744         case RTE_FLOW_FIELD_GENEVE_VNI:
1745                 /* not supported yet*/
1746                 break;
1747         case RTE_FLOW_FIELD_GTP_TEID:
1748                 info[idx] = (struct field_modify_info){4, 0,
1749                                         MLX5_MODI_GTP_TEID};
1750                 if (mask)
1751                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1752                                                      (32 - width));
1753                 break;
1754         case RTE_FLOW_FIELD_TAG:
1755                 {
1756                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1757                                                    data->level, error);
1758                         if (reg < 0)
1759                                 return;
1760                         MLX5_ASSERT(reg != REG_NON);
1761                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1762                         info[idx] = (struct field_modify_info){4, 0,
1763                                                 reg_to_field[reg]};
1764                         if (mask)
1765                                 mask[idx] =
1766                                         rte_cpu_to_be_32(0xffffffff >>
1767                                                          (32 - width));
1768                 }
1769                 break;
1770         case RTE_FLOW_FIELD_MARK:
1771                 {
1772                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1773                                                        0, error);
1774                         if (reg < 0)
1775                                 return;
1776                         MLX5_ASSERT(reg != REG_NON);
1777                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1778                         info[idx] = (struct field_modify_info){4, 0,
1779                                                 reg_to_field[reg]};
1780                         if (mask)
1781                                 mask[idx] =
1782                                         rte_cpu_to_be_32(0xffffffff >>
1783                                                          (32 - width));
1784                 }
1785                 break;
1786         case RTE_FLOW_FIELD_META:
1787                 {
1788                         unsigned int xmeta = config->dv_xmeta_en;
1789                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1790                         if (reg < 0)
1791                                 return;
1792                         MLX5_ASSERT(reg != REG_NON);
1793                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1794                         if (xmeta == MLX5_XMETA_MODE_META16) {
1795                                 info[idx] = (struct field_modify_info){2, 0,
1796                                                         reg_to_field[reg]};
1797                                 if (mask)
1798                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1799                                                                 (16 - width));
1800                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1801                                 info[idx] = (struct field_modify_info){4, 0,
1802                                                         reg_to_field[reg]};
1803                                 if (mask)
1804                                         mask[idx] =
1805                                                 rte_cpu_to_be_32(0xffffffff >>
1806                                                                 (32 - width));
1807                         } else {
1808                                 MLX5_ASSERT(false);
1809                         }
1810                 }
1811                 break;
1812         case RTE_FLOW_FIELD_POINTER:
1813         case RTE_FLOW_FIELD_VALUE:
1814                 if (data->field == RTE_FLOW_FIELD_POINTER)
1815                         memcpy(&val, (void *)(uintptr_t)data->value,
1816                                sizeof(uint64_t));
1817                 else
1818                         val = data->value;
1819                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1820                         if (mask[idx]) {
1821                                 if (dst_width > 16) {
1822                                         value[idx] = rte_cpu_to_be_32(val);
1823                                         val >>= 32;
1824                                 } else if (dst_width > 8) {
1825                                         value[idx] = rte_cpu_to_be_16(val);
1826                                         val >>= 16;
1827                                 } else {
1828                                         value[idx] = (uint8_t)val;
1829                                         val >>= 8;
1830                                 }
1831                                 if (!val)
1832                                         break;
1833                         }
1834                 }
1835                 break;
1836         default:
1837                 MLX5_ASSERT(false);
1838                 break;
1839         }
1840 }
1841
1842 /**
1843  * Convert modify_field action to DV specification.
1844  *
1845  * @param[in] dev
1846  *   Pointer to the rte_eth_dev structure.
1847  * @param[in,out] resource
1848  *   Pointer to the modify-header resource.
1849  * @param[in] action
1850  *   Pointer to action specification.
1851  * @param[in] attr
1852  *   Attributes of flow that includes this item.
1853  * @param[out] error
1854  *   Pointer to the error structure.
1855  *
1856  * @return
1857  *   0 on success, a negative errno value otherwise and rte_errno is set.
1858  */
1859 static int
1860 flow_dv_convert_action_modify_field
1861                         (struct rte_eth_dev *dev,
1862                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1863                          const struct rte_flow_action *action,
1864                          const struct rte_flow_attr *attr,
1865                          struct rte_flow_error *error)
1866 {
1867         struct mlx5_priv *priv = dev->data->dev_private;
1868         struct mlx5_dev_config *config = &priv->config;
1869         const struct rte_flow_action_modify_field *conf =
1870                 (const struct rte_flow_action_modify_field *)(action->conf);
1871         struct rte_flow_item item;
1872         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1873                                                                 {0, 0, 0} };
1874         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1875                                                                 {0, 0, 0} };
1876         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1877         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1878         uint32_t type;
1879         uint32_t dst_width = mlx5_flow_item_field_width(config,
1880                                                         conf->dst.field);
1881
1882         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1883                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1884                 type = MLX5_MODIFICATION_TYPE_SET;
1885                 /** For SET fill the destination field (field) first. */
1886                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1887                         value, conf->width, dst_width, dev, attr, error);
1888                 /** Then copy immediate value from source as per mask. */
1889                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1890                         value, conf->width, dst_width, dev, attr, error);
1891                 item.spec = &value;
1892         } else {
1893                 type = MLX5_MODIFICATION_TYPE_COPY;
1894                 /** For COPY fill the destination field (dcopy) without mask. */
1895                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1896                         value, conf->width, dst_width, dev, attr, error);
1897                 /** Then construct the source field (field) with mask. */
1898                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1899                         value, conf->width, dst_width, dev, attr, error);
1900         }
1901         item.mask = &mask;
1902         return flow_dv_convert_modify_action(&item,
1903                         field, dcopy, resource, type, error);
1904 }
1905
1906 /**
1907  * Validate MARK item.
1908  *
1909  * @param[in] dev
1910  *   Pointer to the rte_eth_dev structure.
1911  * @param[in] item
1912  *   Item specification.
1913  * @param[in] attr
1914  *   Attributes of flow that includes this item.
1915  * @param[out] error
1916  *   Pointer to error structure.
1917  *
1918  * @return
1919  *   0 on success, a negative errno value otherwise and rte_errno is set.
1920  */
1921 static int
1922 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1923                            const struct rte_flow_item *item,
1924                            const struct rte_flow_attr *attr __rte_unused,
1925                            struct rte_flow_error *error)
1926 {
1927         struct mlx5_priv *priv = dev->data->dev_private;
1928         struct mlx5_dev_config *config = &priv->config;
1929         const struct rte_flow_item_mark *spec = item->spec;
1930         const struct rte_flow_item_mark *mask = item->mask;
1931         const struct rte_flow_item_mark nic_mask = {
1932                 .id = priv->sh->dv_mark_mask,
1933         };
1934         int ret;
1935
1936         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1937                 return rte_flow_error_set(error, ENOTSUP,
1938                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1939                                           "extended metadata feature"
1940                                           " isn't enabled");
1941         if (!mlx5_flow_ext_mreg_supported(dev))
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1944                                           "extended metadata register"
1945                                           " isn't supported");
1946         if (!nic_mask.id)
1947                 return rte_flow_error_set(error, ENOTSUP,
1948                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1949                                           "extended metadata register"
1950                                           " isn't available");
1951         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1952         if (ret < 0)
1953                 return ret;
1954         if (!spec)
1955                 return rte_flow_error_set(error, EINVAL,
1956                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1957                                           item->spec,
1958                                           "data cannot be empty");
1959         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1960                 return rte_flow_error_set(error, EINVAL,
1961                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1962                                           &spec->id,
1963                                           "mark id exceeds the limit");
1964         if (!mask)
1965                 mask = &nic_mask;
1966         if (!mask->id)
1967                 return rte_flow_error_set(error, EINVAL,
1968                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1969                                         "mask cannot be zero");
1970
1971         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1972                                         (const uint8_t *)&nic_mask,
1973                                         sizeof(struct rte_flow_item_mark),
1974                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1975         if (ret < 0)
1976                 return ret;
1977         return 0;
1978 }
1979
1980 /**
1981  * Validate META item.
1982  *
1983  * @param[in] dev
1984  *   Pointer to the rte_eth_dev structure.
1985  * @param[in] item
1986  *   Item specification.
1987  * @param[in] attr
1988  *   Attributes of flow that includes this item.
1989  * @param[out] error
1990  *   Pointer to error structure.
1991  *
1992  * @return
1993  *   0 on success, a negative errno value otherwise and rte_errno is set.
1994  */
1995 static int
1996 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1997                            const struct rte_flow_item *item,
1998                            const struct rte_flow_attr *attr,
1999                            struct rte_flow_error *error)
2000 {
2001         struct mlx5_priv *priv = dev->data->dev_private;
2002         struct mlx5_dev_config *config = &priv->config;
2003         const struct rte_flow_item_meta *spec = item->spec;
2004         const struct rte_flow_item_meta *mask = item->mask;
2005         struct rte_flow_item_meta nic_mask = {
2006                 .data = UINT32_MAX
2007         };
2008         int reg;
2009         int ret;
2010
2011         if (!spec)
2012                 return rte_flow_error_set(error, EINVAL,
2013                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2014                                           item->spec,
2015                                           "data cannot be empty");
2016         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2017                 if (!mlx5_flow_ext_mreg_supported(dev))
2018                         return rte_flow_error_set(error, ENOTSUP,
2019                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2020                                           "extended metadata register"
2021                                           " isn't supported");
2022                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2023                 if (reg < 0)
2024                         return reg;
2025                 if (reg == REG_NON)
2026                         return rte_flow_error_set(error, ENOTSUP,
2027                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2028                                         "unavalable extended metadata register");
2029                 if (reg == REG_B)
2030                         return rte_flow_error_set(error, ENOTSUP,
2031                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2032                                           "match on reg_b "
2033                                           "isn't supported");
2034                 if (reg != REG_A)
2035                         nic_mask.data = priv->sh->dv_meta_mask;
2036         } else {
2037                 if (attr->transfer)
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                         "extended metadata feature "
2041                                         "should be enabled when "
2042                                         "meta item is requested "
2043                                         "with e-switch mode ");
2044                 if (attr->ingress)
2045                         return rte_flow_error_set(error, ENOTSUP,
2046                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2047                                         "match on metadata for ingress "
2048                                         "is not supported in legacy "
2049                                         "metadata mode");
2050         }
2051         if (!mask)
2052                 mask = &rte_flow_item_meta_mask;
2053         if (!mask->data)
2054                 return rte_flow_error_set(error, EINVAL,
2055                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2056                                         "mask cannot be zero");
2057
2058         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2059                                         (const uint8_t *)&nic_mask,
2060                                         sizeof(struct rte_flow_item_meta),
2061                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2062         return ret;
2063 }
2064
2065 /**
2066  * Validate TAG item.
2067  *
2068  * @param[in] dev
2069  *   Pointer to the rte_eth_dev structure.
2070  * @param[in] item
2071  *   Item specification.
2072  * @param[in] attr
2073  *   Attributes of flow that includes this item.
2074  * @param[out] error
2075  *   Pointer to error structure.
2076  *
2077  * @return
2078  *   0 on success, a negative errno value otherwise and rte_errno is set.
2079  */
2080 static int
2081 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2082                           const struct rte_flow_item *item,
2083                           const struct rte_flow_attr *attr __rte_unused,
2084                           struct rte_flow_error *error)
2085 {
2086         const struct rte_flow_item_tag *spec = item->spec;
2087         const struct rte_flow_item_tag *mask = item->mask;
2088         const struct rte_flow_item_tag nic_mask = {
2089                 .data = RTE_BE32(UINT32_MAX),
2090                 .index = 0xff,
2091         };
2092         int ret;
2093
2094         if (!mlx5_flow_ext_mreg_supported(dev))
2095                 return rte_flow_error_set(error, ENOTSUP,
2096                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2097                                           "extensive metadata register"
2098                                           " isn't supported");
2099         if (!spec)
2100                 return rte_flow_error_set(error, EINVAL,
2101                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2102                                           item->spec,
2103                                           "data cannot be empty");
2104         if (!mask)
2105                 mask = &rte_flow_item_tag_mask;
2106         if (!mask->data)
2107                 return rte_flow_error_set(error, EINVAL,
2108                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2109                                         "mask cannot be zero");
2110
2111         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2112                                         (const uint8_t *)&nic_mask,
2113                                         sizeof(struct rte_flow_item_tag),
2114                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2115         if (ret < 0)
2116                 return ret;
2117         if (mask->index != 0xff)
2118                 return rte_flow_error_set(error, EINVAL,
2119                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2120                                           "partial mask for tag index"
2121                                           " is not supported");
2122         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2123         if (ret < 0)
2124                 return ret;
2125         MLX5_ASSERT(ret != REG_NON);
2126         return 0;
2127 }
2128
2129 /**
2130  * Validate vport item.
2131  *
2132  * @param[in] dev
2133  *   Pointer to the rte_eth_dev structure.
2134  * @param[in] item
2135  *   Item specification.
2136  * @param[in] attr
2137  *   Attributes of flow that includes this item.
2138  * @param[in] item_flags
2139  *   Bit-fields that holds the items detected until now.
2140  * @param[out] error
2141  *   Pointer to error structure.
2142  *
2143  * @return
2144  *   0 on success, a negative errno value otherwise and rte_errno is set.
2145  */
2146 static int
2147 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2148                               const struct rte_flow_item *item,
2149                               const struct rte_flow_attr *attr,
2150                               uint64_t item_flags,
2151                               struct rte_flow_error *error)
2152 {
2153         const struct rte_flow_item_port_id *spec = item->spec;
2154         const struct rte_flow_item_port_id *mask = item->mask;
2155         const struct rte_flow_item_port_id switch_mask = {
2156                         .id = 0xffffffff,
2157         };
2158         struct mlx5_priv *esw_priv;
2159         struct mlx5_priv *dev_priv;
2160         int ret;
2161
2162         if (!attr->transfer)
2163                 return rte_flow_error_set(error, EINVAL,
2164                                           RTE_FLOW_ERROR_TYPE_ITEM,
2165                                           NULL,
2166                                           "match on port id is valid only"
2167                                           " when transfer flag is enabled");
2168         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2171                                           "multiple source ports are not"
2172                                           " supported");
2173         if (!mask)
2174                 mask = &switch_mask;
2175         if (mask->id != 0xffffffff)
2176                 return rte_flow_error_set(error, ENOTSUP,
2177                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2178                                            mask,
2179                                            "no support for partial mask on"
2180                                            " \"id\" field");
2181         ret = mlx5_flow_item_acceptable
2182                                 (item, (const uint8_t *)mask,
2183                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2184                                  sizeof(struct rte_flow_item_port_id),
2185                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2186         if (ret)
2187                 return ret;
2188         if (!spec)
2189                 return 0;
2190         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2191         if (!esw_priv)
2192                 return rte_flow_error_set(error, rte_errno,
2193                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2194                                           "failed to obtain E-Switch info for"
2195                                           " port");
2196         dev_priv = mlx5_dev_to_eswitch_info(dev);
2197         if (!dev_priv)
2198                 return rte_flow_error_set(error, rte_errno,
2199                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2200                                           NULL,
2201                                           "failed to obtain E-Switch info");
2202         if (esw_priv->domain_id != dev_priv->domain_id)
2203                 return rte_flow_error_set(error, EINVAL,
2204                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2205                                           "cannot match on a port from a"
2206                                           " different E-Switch");
2207         return 0;
2208 }
2209
2210 /**
2211  * Validate VLAN item.
2212  *
2213  * @param[in] item
2214  *   Item specification.
2215  * @param[in] item_flags
2216  *   Bit-fields that holds the items detected until now.
2217  * @param[in] dev
2218  *   Ethernet device flow is being created on.
2219  * @param[out] error
2220  *   Pointer to error structure.
2221  *
2222  * @return
2223  *   0 on success, a negative errno value otherwise and rte_errno is set.
2224  */
2225 static int
2226 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2227                            uint64_t item_flags,
2228                            struct rte_eth_dev *dev,
2229                            struct rte_flow_error *error)
2230 {
2231         const struct rte_flow_item_vlan *mask = item->mask;
2232         const struct rte_flow_item_vlan nic_mask = {
2233                 .tci = RTE_BE16(UINT16_MAX),
2234                 .inner_type = RTE_BE16(UINT16_MAX),
2235                 .has_more_vlan = 1,
2236         };
2237         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2238         int ret;
2239         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2240                                         MLX5_FLOW_LAYER_INNER_L4) :
2241                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2242                                         MLX5_FLOW_LAYER_OUTER_L4);
2243         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2244                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2245
2246         if (item_flags & vlanm)
2247                 return rte_flow_error_set(error, EINVAL,
2248                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2249                                           "multiple VLAN layers not supported");
2250         else if ((item_flags & l34m) != 0)
2251                 return rte_flow_error_set(error, EINVAL,
2252                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2253                                           "VLAN cannot follow L3/L4 layer");
2254         if (!mask)
2255                 mask = &rte_flow_item_vlan_mask;
2256         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2257                                         (const uint8_t *)&nic_mask,
2258                                         sizeof(struct rte_flow_item_vlan),
2259                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2260         if (ret)
2261                 return ret;
2262         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2263                 struct mlx5_priv *priv = dev->data->dev_private;
2264
2265                 if (priv->vmwa_context) {
2266                         /*
2267                          * Non-NULL context means we have a virtual machine
2268                          * and SR-IOV enabled, we have to create VLAN interface
2269                          * to make hypervisor to setup E-Switch vport
2270                          * context correctly. We avoid creating the multiple
2271                          * VLAN interfaces, so we cannot support VLAN tag mask.
2272                          */
2273                         return rte_flow_error_set(error, EINVAL,
2274                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2275                                                   item,
2276                                                   "VLAN tag mask is not"
2277                                                   " supported in virtual"
2278                                                   " environment");
2279                 }
2280         }
2281         return 0;
2282 }
2283
2284 /*
2285  * GTP flags are contained in 1 byte of the format:
2286  * -------------------------------------------
2287  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2288  * |-----------------------------------------|
2289  * | value | Version | PT | Res | E | S | PN |
2290  * -------------------------------------------
2291  *
2292  * Matching is supported only for GTP flags E, S, PN.
2293  */
2294 #define MLX5_GTP_FLAGS_MASK     0x07
2295
2296 /**
2297  * Validate GTP item.
2298  *
2299  * @param[in] dev
2300  *   Pointer to the rte_eth_dev structure.
2301  * @param[in] item
2302  *   Item specification.
2303  * @param[in] item_flags
2304  *   Bit-fields that holds the items detected until now.
2305  * @param[out] error
2306  *   Pointer to error structure.
2307  *
2308  * @return
2309  *   0 on success, a negative errno value otherwise and rte_errno is set.
2310  */
2311 static int
2312 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2313                           const struct rte_flow_item *item,
2314                           uint64_t item_flags,
2315                           struct rte_flow_error *error)
2316 {
2317         struct mlx5_priv *priv = dev->data->dev_private;
2318         const struct rte_flow_item_gtp *spec = item->spec;
2319         const struct rte_flow_item_gtp *mask = item->mask;
2320         const struct rte_flow_item_gtp nic_mask = {
2321                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2322                 .msg_type = 0xff,
2323                 .teid = RTE_BE32(0xffffffff),
2324         };
2325
2326         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2327                 return rte_flow_error_set(error, ENOTSUP,
2328                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2329                                           "GTP support is not enabled");
2330         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2331                 return rte_flow_error_set(error, ENOTSUP,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "multiple tunnel layers not"
2334                                           " supported");
2335         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2336                 return rte_flow_error_set(error, EINVAL,
2337                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2338                                           "no outer UDP layer found");
2339         if (!mask)
2340                 mask = &rte_flow_item_gtp_mask;
2341         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2342                 return rte_flow_error_set(error, ENOTSUP,
2343                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2344                                           "Match is supported for GTP"
2345                                           " flags only");
2346         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2347                                          (const uint8_t *)&nic_mask,
2348                                          sizeof(struct rte_flow_item_gtp),
2349                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2350 }
2351
2352 /**
2353  * Validate GTP PSC item.
2354  *
2355  * @param[in] item
2356  *   Item specification.
2357  * @param[in] last_item
2358  *   Previous validated item in the pattern items.
2359  * @param[in] gtp_item
2360  *   Previous GTP item specification.
2361  * @param[in] attr
2362  *   Pointer to flow attributes.
2363  * @param[out] error
2364  *   Pointer to error structure.
2365  *
2366  * @return
2367  *   0 on success, a negative errno value otherwise and rte_errno is set.
2368  */
2369 static int
2370 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2371                               uint64_t last_item,
2372                               const struct rte_flow_item *gtp_item,
2373                               const struct rte_flow_attr *attr,
2374                               struct rte_flow_error *error)
2375 {
2376         const struct rte_flow_item_gtp *gtp_spec;
2377         const struct rte_flow_item_gtp *gtp_mask;
2378         const struct rte_flow_item_gtp_psc *spec;
2379         const struct rte_flow_item_gtp_psc *mask;
2380         const struct rte_flow_item_gtp_psc nic_mask = {
2381                 .pdu_type = 0xFF,
2382                 .qfi = 0xFF,
2383         };
2384
2385         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2386                 return rte_flow_error_set
2387                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2388                          "GTP PSC item must be preceded with GTP item");
2389         gtp_spec = gtp_item->spec;
2390         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2391         /* GTP spec and E flag is requested to match zero. */
2392         if (gtp_spec &&
2393                 (gtp_mask->v_pt_rsv_flags &
2394                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2395                 return rte_flow_error_set
2396                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2397                          "GTP E flag must be 1 to match GTP PSC");
2398         /* Check the flow is not created in group zero. */
2399         if (!attr->transfer && !attr->group)
2400                 return rte_flow_error_set
2401                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2402                          "GTP PSC is not supported for group 0");
2403         /* GTP spec is here and E flag is requested to match zero. */
2404         if (!item->spec)
2405                 return 0;
2406         spec = item->spec;
2407         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2408         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2409                 return rte_flow_error_set
2410                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2411                          "PDU type should be smaller than 16");
2412         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2413                                          (const uint8_t *)&nic_mask,
2414                                          sizeof(struct rte_flow_item_gtp_psc),
2415                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2416 }
2417
2418 /**
2419  * Validate IPV4 item.
2420  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2421  * add specific validation of fragment_offset field,
2422  *
2423  * @param[in] item
2424  *   Item specification.
2425  * @param[in] item_flags
2426  *   Bit-fields that holds the items detected until now.
2427  * @param[out] error
2428  *   Pointer to error structure.
2429  *
2430  * @return
2431  *   0 on success, a negative errno value otherwise and rte_errno is set.
2432  */
2433 static int
2434 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2435                            uint64_t item_flags,
2436                            uint64_t last_item,
2437                            uint16_t ether_type,
2438                            struct rte_flow_error *error)
2439 {
2440         int ret;
2441         const struct rte_flow_item_ipv4 *spec = item->spec;
2442         const struct rte_flow_item_ipv4 *last = item->last;
2443         const struct rte_flow_item_ipv4 *mask = item->mask;
2444         rte_be16_t fragment_offset_spec = 0;
2445         rte_be16_t fragment_offset_last = 0;
2446         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2447                 .hdr = {
2448                         .src_addr = RTE_BE32(0xffffffff),
2449                         .dst_addr = RTE_BE32(0xffffffff),
2450                         .type_of_service = 0xff,
2451                         .fragment_offset = RTE_BE16(0xffff),
2452                         .next_proto_id = 0xff,
2453                         .time_to_live = 0xff,
2454                 },
2455         };
2456
2457         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2458                                            ether_type, &nic_ipv4_mask,
2459                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2460         if (ret < 0)
2461                 return ret;
2462         if (spec && mask)
2463                 fragment_offset_spec = spec->hdr.fragment_offset &
2464                                        mask->hdr.fragment_offset;
2465         if (!fragment_offset_spec)
2466                 return 0;
2467         /*
2468          * spec and mask are valid, enforce using full mask to make sure the
2469          * complete value is used correctly.
2470          */
2471         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2472                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2475                                           item, "must use full mask for"
2476                                           " fragment_offset");
2477         /*
2478          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2479          * indicating this is 1st fragment of fragmented packet.
2480          * This is not yet supported in MLX5, return appropriate error message.
2481          */
2482         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2483                 return rte_flow_error_set(error, ENOTSUP,
2484                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2485                                           "match on first fragment not "
2486                                           "supported");
2487         if (fragment_offset_spec && !last)
2488                 return rte_flow_error_set(error, ENOTSUP,
2489                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2490                                           "specified value not supported");
2491         /* spec and last are valid, validate the specified range. */
2492         fragment_offset_last = last->hdr.fragment_offset &
2493                                mask->hdr.fragment_offset;
2494         /*
2495          * Match on fragment_offset spec 0x2001 and last 0x3fff
2496          * means MF is 1 and frag-offset is > 0.
2497          * This packet is fragment 2nd and onward, excluding last.
2498          * This is not yet supported in MLX5, return appropriate
2499          * error message.
2500          */
2501         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2502             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2505                                           last, "match on following "
2506                                           "fragments not supported");
2507         /*
2508          * Match on fragment_offset spec 0x0001 and last 0x1fff
2509          * means MF is 0 and frag-offset is > 0.
2510          * This packet is last fragment of fragmented packet.
2511          * This is not yet supported in MLX5, return appropriate
2512          * error message.
2513          */
2514         if (fragment_offset_spec == RTE_BE16(1) &&
2515             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2516                 return rte_flow_error_set(error, ENOTSUP,
2517                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2518                                           last, "match on last "
2519                                           "fragment not supported");
2520         /*
2521          * Match on fragment_offset spec 0x0001 and last 0x3fff
2522          * means MF and/or frag-offset is not 0.
2523          * This is a fragmented packet.
2524          * Other range values are invalid and rejected.
2525          */
2526         if (!(fragment_offset_spec == RTE_BE16(1) &&
2527               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2528                 return rte_flow_error_set(error, ENOTSUP,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2530                                           "specified range not supported");
2531         return 0;
2532 }
2533
2534 /**
2535  * Validate IPV6 fragment extension item.
2536  *
2537  * @param[in] item
2538  *   Item specification.
2539  * @param[in] item_flags
2540  *   Bit-fields that holds the items detected until now.
2541  * @param[out] error
2542  *   Pointer to error structure.
2543  *
2544  * @return
2545  *   0 on success, a negative errno value otherwise and rte_errno is set.
2546  */
2547 static int
2548 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2549                                     uint64_t item_flags,
2550                                     struct rte_flow_error *error)
2551 {
2552         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2553         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2554         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2555         rte_be16_t frag_data_spec = 0;
2556         rte_be16_t frag_data_last = 0;
2557         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2558         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2559                                       MLX5_FLOW_LAYER_OUTER_L4;
2560         int ret = 0;
2561         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2562                 .hdr = {
2563                         .next_header = 0xff,
2564                         .frag_data = RTE_BE16(0xffff),
2565                 },
2566         };
2567
2568         if (item_flags & l4m)
2569                 return rte_flow_error_set(error, EINVAL,
2570                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2571                                           "ipv6 fragment extension item cannot "
2572                                           "follow L4 item.");
2573         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2574             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2575                 return rte_flow_error_set(error, EINVAL,
2576                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2577                                           "ipv6 fragment extension item must "
2578                                           "follow ipv6 item");
2579         if (spec && mask)
2580                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2581         if (!frag_data_spec)
2582                 return 0;
2583         /*
2584          * spec and mask are valid, enforce using full mask to make sure the
2585          * complete value is used correctly.
2586          */
2587         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2588                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2591                                           item, "must use full mask for"
2592                                           " frag_data");
2593         /*
2594          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2595          * This is 1st fragment of fragmented packet.
2596          */
2597         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2598                 return rte_flow_error_set(error, ENOTSUP,
2599                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2600                                           "match on first fragment not "
2601                                           "supported");
2602         if (frag_data_spec && !last)
2603                 return rte_flow_error_set(error, EINVAL,
2604                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2605                                           "specified value not supported");
2606         ret = mlx5_flow_item_acceptable
2607                                 (item, (const uint8_t *)mask,
2608                                  (const uint8_t *)&nic_mask,
2609                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2610                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2611         if (ret)
2612                 return ret;
2613         /* spec and last are valid, validate the specified range. */
2614         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2615         /*
2616          * Match on frag_data spec 0x0009 and last 0xfff9
2617          * means M is 1 and frag-offset is > 0.
2618          * This packet is fragment 2nd and onward, excluding last.
2619          * This is not yet supported in MLX5, return appropriate
2620          * error message.
2621          */
2622         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2623                                        RTE_IPV6_EHDR_MF_MASK) &&
2624             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2625                 return rte_flow_error_set(error, ENOTSUP,
2626                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2627                                           last, "match on following "
2628                                           "fragments not supported");
2629         /*
2630          * Match on frag_data spec 0x0008 and last 0xfff8
2631          * means M is 0 and frag-offset is > 0.
2632          * This packet is last fragment of fragmented packet.
2633          * This is not yet supported in MLX5, return appropriate
2634          * error message.
2635          */
2636         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2637             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2640                                           last, "match on last "
2641                                           "fragment not supported");
2642         /* Other range values are invalid and rejected. */
2643         return rte_flow_error_set(error, EINVAL,
2644                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2645                                   "specified range not supported");
2646 }
2647
2648 /*
2649  * Validate ASO CT item.
2650  *
2651  * @param[in] dev
2652  *   Pointer to the rte_eth_dev structure.
2653  * @param[in] item
2654  *   Item specification.
2655  * @param[in] item_flags
2656  *   Pointer to bit-fields that holds the items detected until now.
2657  * @param[out] error
2658  *   Pointer to error structure.
2659  *
2660  * @return
2661  *   0 on success, a negative errno value otherwise and rte_errno is set.
2662  */
2663 static int
2664 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2665                              const struct rte_flow_item *item,
2666                              uint64_t *item_flags,
2667                              struct rte_flow_error *error)
2668 {
2669         const struct rte_flow_item_conntrack *spec = item->spec;
2670         const struct rte_flow_item_conntrack *mask = item->mask;
2671         RTE_SET_USED(dev);
2672         uint32_t flags;
2673
2674         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2675                 return rte_flow_error_set(error, EINVAL,
2676                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2677                                           "Only one CT is supported");
2678         if (!mask)
2679                 mask = &rte_flow_item_conntrack_mask;
2680         flags = spec->flags & mask->flags;
2681         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2682             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2683              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2684              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2685                 return rte_flow_error_set(error, EINVAL,
2686                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2687                                           "Conflict status bits");
2688         /* State change also needs to be considered. */
2689         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2690         return 0;
2691 }
2692
2693 /**
2694  * Validate the pop VLAN action.
2695  *
2696  * @param[in] dev
2697  *   Pointer to the rte_eth_dev structure.
2698  * @param[in] action_flags
2699  *   Holds the actions detected until now.
2700  * @param[in] action
2701  *   Pointer to the pop vlan action.
2702  * @param[in] item_flags
2703  *   The items found in this flow rule.
2704  * @param[in] attr
2705  *   Pointer to flow attributes.
2706  * @param[out] error
2707  *   Pointer to error structure.
2708  *
2709  * @return
2710  *   0 on success, a negative errno value otherwise and rte_errno is set.
2711  */
2712 static int
2713 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2714                                  uint64_t action_flags,
2715                                  const struct rte_flow_action *action,
2716                                  uint64_t item_flags,
2717                                  const struct rte_flow_attr *attr,
2718                                  struct rte_flow_error *error)
2719 {
2720         const struct mlx5_priv *priv = dev->data->dev_private;
2721
2722         (void)action;
2723         (void)attr;
2724         if (!priv->sh->pop_vlan_action)
2725                 return rte_flow_error_set(error, ENOTSUP,
2726                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2727                                           NULL,
2728                                           "pop vlan action is not supported");
2729         if (attr->egress)
2730                 return rte_flow_error_set(error, ENOTSUP,
2731                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2732                                           NULL,
2733                                           "pop vlan action not supported for "
2734                                           "egress");
2735         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2736                 return rte_flow_error_set(error, ENOTSUP,
2737                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2738                                           "no support for multiple VLAN "
2739                                           "actions");
2740         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2741         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2742             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2743                 return rte_flow_error_set(error, ENOTSUP,
2744                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2745                                           NULL,
2746                                           "cannot pop vlan after decap without "
2747                                           "match on inner vlan in the flow");
2748         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2749         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2750             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2753                                           NULL,
2754                                           "cannot pop vlan without a "
2755                                           "match on (outer) vlan in the flow");
2756         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2757                 return rte_flow_error_set(error, EINVAL,
2758                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2759                                           "wrong action order, port_id should "
2760                                           "be after pop VLAN action");
2761         if (!attr->transfer && priv->representor)
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2764                                           "pop vlan action for VF representor "
2765                                           "not supported on NIC table");
2766         return 0;
2767 }
2768
2769 /**
2770  * Get VLAN default info from vlan match info.
2771  *
2772  * @param[in] items
2773  *   the list of item specifications.
2774  * @param[out] vlan
2775  *   pointer VLAN info to fill to.
2776  *
2777  * @return
2778  *   0 on success, a negative errno value otherwise and rte_errno is set.
2779  */
2780 static void
2781 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2782                                   struct rte_vlan_hdr *vlan)
2783 {
2784         const struct rte_flow_item_vlan nic_mask = {
2785                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2786                                 MLX5DV_FLOW_VLAN_VID_MASK),
2787                 .inner_type = RTE_BE16(0xffff),
2788         };
2789
2790         if (items == NULL)
2791                 return;
2792         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2793                 int type = items->type;
2794
2795                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2796                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2797                         break;
2798         }
2799         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2800                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2801                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2802
2803                 /* If VLAN item in pattern doesn't contain data, return here. */
2804                 if (!vlan_v)
2805                         return;
2806                 if (!vlan_m)
2807                         vlan_m = &nic_mask;
2808                 /* Only full match values are accepted */
2809                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2810                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2811                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2812                         vlan->vlan_tci |=
2813                                 rte_be_to_cpu_16(vlan_v->tci &
2814                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2815                 }
2816                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2817                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2818                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2819                         vlan->vlan_tci |=
2820                                 rte_be_to_cpu_16(vlan_v->tci &
2821                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2822                 }
2823                 if (vlan_m->inner_type == nic_mask.inner_type)
2824                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2825                                                            vlan_m->inner_type);
2826         }
2827 }
2828
2829 /**
2830  * Validate the push VLAN action.
2831  *
2832  * @param[in] dev
2833  *   Pointer to the rte_eth_dev structure.
2834  * @param[in] action_flags
2835  *   Holds the actions detected until now.
2836  * @param[in] item_flags
2837  *   The items found in this flow rule.
2838  * @param[in] action
2839  *   Pointer to the action structure.
2840  * @param[in] attr
2841  *   Pointer to flow attributes
2842  * @param[out] error
2843  *   Pointer to error structure.
2844  *
2845  * @return
2846  *   0 on success, a negative errno value otherwise and rte_errno is set.
2847  */
2848 static int
2849 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2850                                   uint64_t action_flags,
2851                                   const struct rte_flow_item_vlan *vlan_m,
2852                                   const struct rte_flow_action *action,
2853                                   const struct rte_flow_attr *attr,
2854                                   struct rte_flow_error *error)
2855 {
2856         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2857         const struct mlx5_priv *priv = dev->data->dev_private;
2858
2859         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2860             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2861                 return rte_flow_error_set(error, EINVAL,
2862                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2863                                           "invalid vlan ethertype");
2864         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2865                 return rte_flow_error_set(error, EINVAL,
2866                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2867                                           "wrong action order, port_id should "
2868                                           "be after push VLAN");
2869         if (!attr->transfer && priv->representor)
2870                 return rte_flow_error_set(error, ENOTSUP,
2871                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2872                                           "push vlan action for VF representor "
2873                                           "not supported on NIC table");
2874         if (vlan_m &&
2875             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2876             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2877                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2878             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2879             !(mlx5_flow_find_action
2880                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2881                 return rte_flow_error_set(error, EINVAL,
2882                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2883                                           "not full match mask on VLAN PCP and "
2884                                           "there is no of_set_vlan_pcp action, "
2885                                           "push VLAN action cannot figure out "
2886                                           "PCP value");
2887         if (vlan_m &&
2888             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2889             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2890                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2891             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2892             !(mlx5_flow_find_action
2893                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2894                 return rte_flow_error_set(error, EINVAL,
2895                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2896                                           "not full match mask on VLAN VID and "
2897                                           "there is no of_set_vlan_vid action, "
2898                                           "push VLAN action cannot figure out "
2899                                           "VID value");
2900         (void)attr;
2901         return 0;
2902 }
2903
2904 /**
2905  * Validate the set VLAN PCP.
2906  *
2907  * @param[in] action_flags
2908  *   Holds the actions detected until now.
2909  * @param[in] actions
2910  *   Pointer to the list of actions remaining in the flow rule.
2911  * @param[out] error
2912  *   Pointer to error structure.
2913  *
2914  * @return
2915  *   0 on success, a negative errno value otherwise and rte_errno is set.
2916  */
2917 static int
2918 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2919                                      const struct rte_flow_action actions[],
2920                                      struct rte_flow_error *error)
2921 {
2922         const struct rte_flow_action *action = actions;
2923         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2924
2925         if (conf->vlan_pcp > 7)
2926                 return rte_flow_error_set(error, EINVAL,
2927                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2928                                           "VLAN PCP value is too big");
2929         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2930                 return rte_flow_error_set(error, ENOTSUP,
2931                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2932                                           "set VLAN PCP action must follow "
2933                                           "the push VLAN action");
2934         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2935                 return rte_flow_error_set(error, ENOTSUP,
2936                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2937                                           "Multiple VLAN PCP modification are "
2938                                           "not supported");
2939         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2940                 return rte_flow_error_set(error, EINVAL,
2941                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2942                                           "wrong action order, port_id should "
2943                                           "be after set VLAN PCP");
2944         return 0;
2945 }
2946
2947 /**
2948  * Validate the set VLAN VID.
2949  *
2950  * @param[in] item_flags
2951  *   Holds the items detected in this rule.
2952  * @param[in] action_flags
2953  *   Holds the actions detected until now.
2954  * @param[in] actions
2955  *   Pointer to the list of actions remaining in the flow rule.
2956  * @param[out] error
2957  *   Pointer to error structure.
2958  *
2959  * @return
2960  *   0 on success, a negative errno value otherwise and rte_errno is set.
2961  */
2962 static int
2963 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2964                                      uint64_t action_flags,
2965                                      const struct rte_flow_action actions[],
2966                                      struct rte_flow_error *error)
2967 {
2968         const struct rte_flow_action *action = actions;
2969         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2970
2971         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2972                 return rte_flow_error_set(error, EINVAL,
2973                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2974                                           "VLAN VID value is too big");
2975         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2976             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2977                 return rte_flow_error_set(error, ENOTSUP,
2978                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2979                                           "set VLAN VID action must follow push"
2980                                           " VLAN action or match on VLAN item");
2981         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2982                 return rte_flow_error_set(error, ENOTSUP,
2983                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2984                                           "Multiple VLAN VID modifications are "
2985                                           "not supported");
2986         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2987                 return rte_flow_error_set(error, EINVAL,
2988                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2989                                           "wrong action order, port_id should "
2990                                           "be after set VLAN VID");
2991         return 0;
2992 }
2993
2994 /*
2995  * Validate the FLAG action.
2996  *
2997  * @param[in] dev
2998  *   Pointer to the rte_eth_dev structure.
2999  * @param[in] action_flags
3000  *   Holds the actions detected until now.
3001  * @param[in] attr
3002  *   Pointer to flow attributes
3003  * @param[out] error
3004  *   Pointer to error structure.
3005  *
3006  * @return
3007  *   0 on success, a negative errno value otherwise and rte_errno is set.
3008  */
3009 static int
3010 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3011                              uint64_t action_flags,
3012                              const struct rte_flow_attr *attr,
3013                              struct rte_flow_error *error)
3014 {
3015         struct mlx5_priv *priv = dev->data->dev_private;
3016         struct mlx5_dev_config *config = &priv->config;
3017         int ret;
3018
3019         /* Fall back if no extended metadata register support. */
3020         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3021                 return mlx5_flow_validate_action_flag(action_flags, attr,
3022                                                       error);
3023         /* Extensive metadata mode requires registers. */
3024         if (!mlx5_flow_ext_mreg_supported(dev))
3025                 return rte_flow_error_set(error, ENOTSUP,
3026                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3027                                           "no metadata registers "
3028                                           "to support flag action");
3029         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3030                 return rte_flow_error_set(error, ENOTSUP,
3031                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3032                                           "extended metadata register"
3033                                           " isn't available");
3034         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3035         if (ret < 0)
3036                 return ret;
3037         MLX5_ASSERT(ret > 0);
3038         if (action_flags & MLX5_FLOW_ACTION_MARK)
3039                 return rte_flow_error_set(error, EINVAL,
3040                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3041                                           "can't mark and flag in same flow");
3042         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3043                 return rte_flow_error_set(error, EINVAL,
3044                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3045                                           "can't have 2 flag"
3046                                           " actions in same flow");
3047         return 0;
3048 }
3049
3050 /**
3051  * Validate MARK action.
3052  *
3053  * @param[in] dev
3054  *   Pointer to the rte_eth_dev structure.
3055  * @param[in] action
3056  *   Pointer to action.
3057  * @param[in] action_flags
3058  *   Holds the actions detected until now.
3059  * @param[in] attr
3060  *   Pointer to flow attributes
3061  * @param[out] error
3062  *   Pointer to error structure.
3063  *
3064  * @return
3065  *   0 on success, a negative errno value otherwise and rte_errno is set.
3066  */
3067 static int
3068 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3069                              const struct rte_flow_action *action,
3070                              uint64_t action_flags,
3071                              const struct rte_flow_attr *attr,
3072                              struct rte_flow_error *error)
3073 {
3074         struct mlx5_priv *priv = dev->data->dev_private;
3075         struct mlx5_dev_config *config = &priv->config;
3076         const struct rte_flow_action_mark *mark = action->conf;
3077         int ret;
3078
3079         if (is_tunnel_offload_active(dev))
3080                 return rte_flow_error_set(error, ENOTSUP,
3081                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3082                                           "no mark action "
3083                                           "if tunnel offload active");
3084         /* Fall back if no extended metadata register support. */
3085         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3086                 return mlx5_flow_validate_action_mark(action, action_flags,
3087                                                       attr, error);
3088         /* Extensive metadata mode requires registers. */
3089         if (!mlx5_flow_ext_mreg_supported(dev))
3090                 return rte_flow_error_set(error, ENOTSUP,
3091                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3092                                           "no metadata registers "
3093                                           "to support mark action");
3094         if (!priv->sh->dv_mark_mask)
3095                 return rte_flow_error_set(error, ENOTSUP,
3096                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3097                                           "extended metadata register"
3098                                           " isn't available");
3099         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3100         if (ret < 0)
3101                 return ret;
3102         MLX5_ASSERT(ret > 0);
3103         if (!mark)
3104                 return rte_flow_error_set(error, EINVAL,
3105                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3106                                           "configuration cannot be null");
3107         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3108                 return rte_flow_error_set(error, EINVAL,
3109                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3110                                           &mark->id,
3111                                           "mark id exceeds the limit");
3112         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3113                 return rte_flow_error_set(error, EINVAL,
3114                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3115                                           "can't flag and mark in same flow");
3116         if (action_flags & MLX5_FLOW_ACTION_MARK)
3117                 return rte_flow_error_set(error, EINVAL,
3118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3119                                           "can't have 2 mark actions in same"
3120                                           " flow");
3121         return 0;
3122 }
3123
3124 /**
3125  * Validate SET_META action.
3126  *
3127  * @param[in] dev
3128  *   Pointer to the rte_eth_dev structure.
3129  * @param[in] action
3130  *   Pointer to the action structure.
3131  * @param[in] action_flags
3132  *   Holds the actions detected until now.
3133  * @param[in] attr
3134  *   Pointer to flow attributes
3135  * @param[out] error
3136  *   Pointer to error structure.
3137  *
3138  * @return
3139  *   0 on success, a negative errno value otherwise and rte_errno is set.
3140  */
3141 static int
3142 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3143                                  const struct rte_flow_action *action,
3144                                  uint64_t action_flags __rte_unused,
3145                                  const struct rte_flow_attr *attr,
3146                                  struct rte_flow_error *error)
3147 {
3148         const struct rte_flow_action_set_meta *conf;
3149         uint32_t nic_mask = UINT32_MAX;
3150         int reg;
3151
3152         if (!mlx5_flow_ext_mreg_supported(dev))
3153                 return rte_flow_error_set(error, ENOTSUP,
3154                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3155                                           "extended metadata register"
3156                                           " isn't supported");
3157         reg = flow_dv_get_metadata_reg(dev, attr, error);
3158         if (reg < 0)
3159                 return reg;
3160         if (reg == REG_NON)
3161                 return rte_flow_error_set(error, ENOTSUP,
3162                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3163                                           "unavalable extended metadata register");
3164         if (reg != REG_A && reg != REG_B) {
3165                 struct mlx5_priv *priv = dev->data->dev_private;
3166
3167                 nic_mask = priv->sh->dv_meta_mask;
3168         }
3169         if (!(action->conf))
3170                 return rte_flow_error_set(error, EINVAL,
3171                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3172                                           "configuration cannot be null");
3173         conf = (const struct rte_flow_action_set_meta *)action->conf;
3174         if (!conf->mask)
3175                 return rte_flow_error_set(error, EINVAL,
3176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3177                                           "zero mask doesn't have any effect");
3178         if (conf->mask & ~nic_mask)
3179                 return rte_flow_error_set(error, EINVAL,
3180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3181                                           "meta data must be within reg C0");
3182         return 0;
3183 }
3184
3185 /**
3186  * Validate SET_TAG action.
3187  *
3188  * @param[in] dev
3189  *   Pointer to the rte_eth_dev structure.
3190  * @param[in] action
3191  *   Pointer to the action structure.
3192  * @param[in] action_flags
3193  *   Holds the actions detected until now.
3194  * @param[in] attr
3195  *   Pointer to flow attributes
3196  * @param[out] error
3197  *   Pointer to error structure.
3198  *
3199  * @return
3200  *   0 on success, a negative errno value otherwise and rte_errno is set.
3201  */
3202 static int
3203 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3204                                 const struct rte_flow_action *action,
3205                                 uint64_t action_flags,
3206                                 const struct rte_flow_attr *attr,
3207                                 struct rte_flow_error *error)
3208 {
3209         const struct rte_flow_action_set_tag *conf;
3210         const uint64_t terminal_action_flags =
3211                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3212                 MLX5_FLOW_ACTION_RSS;
3213         int ret;
3214
3215         if (!mlx5_flow_ext_mreg_supported(dev))
3216                 return rte_flow_error_set(error, ENOTSUP,
3217                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3218                                           "extensive metadata register"
3219                                           " isn't supported");
3220         if (!(action->conf))
3221                 return rte_flow_error_set(error, EINVAL,
3222                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3223                                           "configuration cannot be null");
3224         conf = (const struct rte_flow_action_set_tag *)action->conf;
3225         if (!conf->mask)
3226                 return rte_flow_error_set(error, EINVAL,
3227                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3228                                           "zero mask doesn't have any effect");
3229         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3230         if (ret < 0)
3231                 return ret;
3232         if (!attr->transfer && attr->ingress &&
3233             (action_flags & terminal_action_flags))
3234                 return rte_flow_error_set(error, EINVAL,
3235                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3236                                           "set_tag has no effect"
3237                                           " with terminal actions");
3238         return 0;
3239 }
3240
3241 /**
3242  * Check if action counter is shared by either old or new mechanism.
3243  *
3244  * @param[in] action
3245  *   Pointer to the action structure.
3246  *
3247  * @return
3248  *   True when counter is shared, false otherwise.
3249  */
3250 static inline bool
3251 is_shared_action_count(const struct rte_flow_action *action)
3252 {
3253         const struct rte_flow_action_count *count =
3254                         (const struct rte_flow_action_count *)action->conf;
3255
3256         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3257                 return true;
3258         return !!(count && count->shared);
3259 }
3260
3261 /**
3262  * Validate count action.
3263  *
3264  * @param[in] dev
3265  *   Pointer to rte_eth_dev structure.
3266  * @param[in] shared
3267  *   Indicator if action is shared.
3268  * @param[in] action_flags
3269  *   Holds the actions detected until now.
3270  * @param[out] error
3271  *   Pointer to error structure.
3272  *
3273  * @return
3274  *   0 on success, a negative errno value otherwise and rte_errno is set.
3275  */
3276 static int
3277 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3278                               uint64_t action_flags,
3279                               struct rte_flow_error *error)
3280 {
3281         struct mlx5_priv *priv = dev->data->dev_private;
3282
3283         if (!priv->config.devx)
3284                 goto notsup_err;
3285         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3286                 return rte_flow_error_set(error, EINVAL,
3287                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3288                                           "duplicate count actions set");
3289         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3290             !priv->sh->flow_hit_aso_en)
3291                 return rte_flow_error_set(error, EINVAL,
3292                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3293                                           "old age and shared count combination is not supported");
3294 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3295         return 0;
3296 #endif
3297 notsup_err:
3298         return rte_flow_error_set
3299                       (error, ENOTSUP,
3300                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3301                        NULL,
3302                        "count action not supported");
3303 }
3304
3305 /**
3306  * Validate the L2 encap action.
3307  *
3308  * @param[in] dev
3309  *   Pointer to the rte_eth_dev structure.
3310  * @param[in] action_flags
3311  *   Holds the actions detected until now.
3312  * @param[in] action
3313  *   Pointer to the action structure.
3314  * @param[in] attr
3315  *   Pointer to flow attributes.
3316  * @param[out] error
3317  *   Pointer to error structure.
3318  *
3319  * @return
3320  *   0 on success, a negative errno value otherwise and rte_errno is set.
3321  */
3322 static int
3323 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3324                                  uint64_t action_flags,
3325                                  const struct rte_flow_action *action,
3326                                  const struct rte_flow_attr *attr,
3327                                  struct rte_flow_error *error)
3328 {
3329         const struct mlx5_priv *priv = dev->data->dev_private;
3330
3331         if (!(action->conf))
3332                 return rte_flow_error_set(error, EINVAL,
3333                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3334                                           "configuration cannot be null");
3335         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3336                 return rte_flow_error_set(error, EINVAL,
3337                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3338                                           "can only have a single encap action "
3339                                           "in a flow");
3340         if (!attr->transfer && priv->representor)
3341                 return rte_flow_error_set(error, ENOTSUP,
3342                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3343                                           "encap action for VF representor "
3344                                           "not supported on NIC table");
3345         return 0;
3346 }
3347
3348 /**
3349  * Validate a decap action.
3350  *
3351  * @param[in] dev
3352  *   Pointer to the rte_eth_dev structure.
3353  * @param[in] action_flags
3354  *   Holds the actions detected until now.
3355  * @param[in] action
3356  *   Pointer to the action structure.
3357  * @param[in] item_flags
3358  *   Holds the items detected.
3359  * @param[in] attr
3360  *   Pointer to flow attributes
3361  * @param[out] error
3362  *   Pointer to error structure.
3363  *
3364  * @return
3365  *   0 on success, a negative errno value otherwise and rte_errno is set.
3366  */
3367 static int
3368 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3369                               uint64_t action_flags,
3370                               const struct rte_flow_action *action,
3371                               const uint64_t item_flags,
3372                               const struct rte_flow_attr *attr,
3373                               struct rte_flow_error *error)
3374 {
3375         const struct mlx5_priv *priv = dev->data->dev_private;
3376
3377         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3378             !priv->config.decap_en)
3379                 return rte_flow_error_set(error, ENOTSUP,
3380                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3381                                           "decap is not enabled");
3382         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3383                 return rte_flow_error_set(error, ENOTSUP,
3384                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3385                                           action_flags &
3386                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3387                                           "have a single decap action" : "decap "
3388                                           "after encap is not supported");
3389         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3390                 return rte_flow_error_set(error, EINVAL,
3391                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3392                                           "can't have decap action after"
3393                                           " modify action");
3394         if (attr->egress)
3395                 return rte_flow_error_set(error, ENOTSUP,
3396                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3397                                           NULL,
3398                                           "decap action not supported for "
3399                                           "egress");
3400         if (!attr->transfer && priv->representor)
3401                 return rte_flow_error_set(error, ENOTSUP,
3402                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3403                                           "decap action for VF representor "
3404                                           "not supported on NIC table");
3405         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3406             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3407                 return rte_flow_error_set(error, ENOTSUP,
3408                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3409                                 "VXLAN item should be present for VXLAN decap");
3410         return 0;
3411 }
3412
3413 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3414
3415 /**
3416  * Validate the raw encap and decap actions.
3417  *
3418  * @param[in] dev
3419  *   Pointer to the rte_eth_dev structure.
3420  * @param[in] decap
3421  *   Pointer to the decap action.
3422  * @param[in] encap
3423  *   Pointer to the encap action.
3424  * @param[in] attr
3425  *   Pointer to flow attributes
3426  * @param[in/out] action_flags
3427  *   Holds the actions detected until now.
3428  * @param[out] actions_n
3429  *   pointer to the number of actions counter.
3430  * @param[in] action
3431  *   Pointer to the action structure.
3432  * @param[in] item_flags
3433  *   Holds the items detected.
3434  * @param[out] error
3435  *   Pointer to error structure.
3436  *
3437  * @return
3438  *   0 on success, a negative errno value otherwise and rte_errno is set.
3439  */
3440 static int
3441 flow_dv_validate_action_raw_encap_decap
3442         (struct rte_eth_dev *dev,
3443          const struct rte_flow_action_raw_decap *decap,
3444          const struct rte_flow_action_raw_encap *encap,
3445          const struct rte_flow_attr *attr, uint64_t *action_flags,
3446          int *actions_n, const struct rte_flow_action *action,
3447          uint64_t item_flags, struct rte_flow_error *error)
3448 {
3449         const struct mlx5_priv *priv = dev->data->dev_private;
3450         int ret;
3451
3452         if (encap && (!encap->size || !encap->data))
3453                 return rte_flow_error_set(error, EINVAL,
3454                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3455                                           "raw encap data cannot be empty");
3456         if (decap && encap) {
3457                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3458                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3459                         /* L3 encap. */
3460                         decap = NULL;
3461                 else if (encap->size <=
3462                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3463                            decap->size >
3464                            MLX5_ENCAPSULATION_DECISION_SIZE)
3465                         /* L3 decap. */
3466                         encap = NULL;
3467                 else if (encap->size >
3468                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3469                            decap->size >
3470                            MLX5_ENCAPSULATION_DECISION_SIZE)
3471                         /* 2 L2 actions: encap and decap. */
3472                         ;
3473                 else
3474                         return rte_flow_error_set(error,
3475                                 ENOTSUP,
3476                                 RTE_FLOW_ERROR_TYPE_ACTION,
3477                                 NULL, "unsupported too small "
3478                                 "raw decap and too small raw "
3479                                 "encap combination");
3480         }
3481         if (decap) {
3482                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3483                                                     item_flags, attr, error);
3484                 if (ret < 0)
3485                         return ret;
3486                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3487                 ++(*actions_n);
3488         }
3489         if (encap) {
3490                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         return rte_flow_error_set(error, ENOTSUP,
3492                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3493                                                   NULL,
3494                                                   "small raw encap size");
3495                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3496                         return rte_flow_error_set(error, EINVAL,
3497                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3498                                                   NULL,
3499                                                   "more than one encap action");
3500                 if (!attr->transfer && priv->representor)
3501                         return rte_flow_error_set
3502                                         (error, ENOTSUP,
3503                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3504                                          "encap action for VF representor "
3505                                          "not supported on NIC table");
3506                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3507                 ++(*actions_n);
3508         }
3509         return 0;
3510 }
3511
3512 /*
3513  * Validate the ASO CT action.
3514  *
3515  * @param[in] dev
3516  *   Pointer to the rte_eth_dev structure.
3517  * @param[in] action_flags
3518  *   Holds the actions detected until now.
3519  * @param[in] item_flags
3520  *   The items found in this flow rule.
3521  * @param[in] attr
3522  *   Pointer to flow attributes.
3523  * @param[out] error
3524  *   Pointer to error structure.
3525  *
3526  * @return
3527  *   0 on success, a negative errno value otherwise and rte_errno is set.
3528  */
3529 static int
3530 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3531                                uint64_t action_flags,
3532                                uint64_t item_flags,
3533                                const struct rte_flow_attr *attr,
3534                                struct rte_flow_error *error)
3535 {
3536         RTE_SET_USED(dev);
3537
3538         if (attr->group == 0 && !attr->transfer)
3539                 return rte_flow_error_set(error, ENOTSUP,
3540                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3541                                           NULL,
3542                                           "Only support non-root table");
3543         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3544                 return rte_flow_error_set(error, ENOTSUP,
3545                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3546                                           "CT cannot follow a fate action");
3547         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3548             (action_flags & MLX5_FLOW_ACTION_AGE))
3549                 return rte_flow_error_set(error, EINVAL,
3550                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3551                                           "Only one ASO action is supported");
3552         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3553                 return rte_flow_error_set(error, EINVAL,
3554                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3555                                           "Encap cannot exist before CT");
3556         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3557                 return rte_flow_error_set(error, EINVAL,
3558                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3559                                           "Not a outer TCP packet");
3560         return 0;
3561 }
3562
3563 /**
3564  * Match encap_decap resource.
3565  *
3566  * @param list
3567  *   Pointer to the hash list.
3568  * @param entry
3569  *   Pointer to exist resource entry object.
3570  * @param key
3571  *   Key of the new entry.
3572  * @param ctx_cb
3573  *   Pointer to new encap_decap resource.
3574  *
3575  * @return
3576  *   0 on matching, none-zero otherwise.
3577  */
3578 int
3579 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3580                              struct mlx5_hlist_entry *entry,
3581                              uint64_t key __rte_unused, void *cb_ctx)
3582 {
3583         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3584         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3585         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3586
3587         cache_resource = container_of(entry,
3588                                       struct mlx5_flow_dv_encap_decap_resource,
3589                                       entry);
3590         if (resource->reformat_type == cache_resource->reformat_type &&
3591             resource->ft_type == cache_resource->ft_type &&
3592             resource->flags == cache_resource->flags &&
3593             resource->size == cache_resource->size &&
3594             !memcmp((const void *)resource->buf,
3595                     (const void *)cache_resource->buf,
3596                     resource->size))
3597                 return 0;
3598         return -1;
3599 }
3600
3601 /**
3602  * Allocate encap_decap resource.
3603  *
3604  * @param list
3605  *   Pointer to the hash list.
3606  * @param entry
3607  *   Pointer to exist resource entry object.
3608  * @param ctx_cb
3609  *   Pointer to new encap_decap resource.
3610  *
3611  * @return
3612  *   0 on matching, none-zero otherwise.
3613  */
3614 struct mlx5_hlist_entry *
3615 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3616                               uint64_t key __rte_unused,
3617                               void *cb_ctx)
3618 {
3619         struct mlx5_dev_ctx_shared *sh = list->ctx;
3620         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3621         struct mlx5dv_dr_domain *domain;
3622         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3623         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3624         uint32_t idx;
3625         int ret;
3626
3627         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3628                 domain = sh->fdb_domain;
3629         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3630                 domain = sh->rx_domain;
3631         else
3632                 domain = sh->tx_domain;
3633         /* Register new encap/decap resource. */
3634         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3635                                        &idx);
3636         if (!cache_resource) {
3637                 rte_flow_error_set(ctx->error, ENOMEM,
3638                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3639                                    "cannot allocate resource memory");
3640                 return NULL;
3641         }
3642         *cache_resource = *resource;
3643         cache_resource->idx = idx;
3644         ret = mlx5_flow_os_create_flow_action_packet_reformat
3645                                         (sh->ctx, domain, cache_resource,
3646                                          &cache_resource->action);
3647         if (ret) {
3648                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3649                 rte_flow_error_set(ctx->error, ENOMEM,
3650                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3651                                    NULL, "cannot create action");
3652                 return NULL;
3653         }
3654
3655         return &cache_resource->entry;
3656 }
3657
3658 /**
3659  * Find existing encap/decap resource or create and register a new one.
3660  *
3661  * @param[in, out] dev
3662  *   Pointer to rte_eth_dev structure.
3663  * @param[in, out] resource
3664  *   Pointer to encap/decap resource.
3665  * @parm[in, out] dev_flow
3666  *   Pointer to the dev_flow.
3667  * @param[out] error
3668  *   pointer to error structure.
3669  *
3670  * @return
3671  *   0 on success otherwise -errno and errno is set.
3672  */
3673 static int
3674 flow_dv_encap_decap_resource_register
3675                         (struct rte_eth_dev *dev,
3676                          struct mlx5_flow_dv_encap_decap_resource *resource,
3677                          struct mlx5_flow *dev_flow,
3678                          struct rte_flow_error *error)
3679 {
3680         struct mlx5_priv *priv = dev->data->dev_private;
3681         struct mlx5_dev_ctx_shared *sh = priv->sh;
3682         struct mlx5_hlist_entry *entry;
3683         union {
3684                 struct {
3685                         uint32_t ft_type:8;
3686                         uint32_t refmt_type:8;
3687                         /*
3688                          * Header reformat actions can be shared between
3689                          * non-root tables. One bit to indicate non-root
3690                          * table or not.
3691                          */
3692                         uint32_t is_root:1;
3693                         uint32_t reserve:15;
3694                 };
3695                 uint32_t v32;
3696         } encap_decap_key = {
3697                 {
3698                         .ft_type = resource->ft_type,
3699                         .refmt_type = resource->reformat_type,
3700                         .is_root = !!dev_flow->dv.group,
3701                         .reserve = 0,
3702                 }
3703         };
3704         struct mlx5_flow_cb_ctx ctx = {
3705                 .error = error,
3706                 .data = resource,
3707         };
3708         uint64_t key64;
3709
3710         resource->flags = dev_flow->dv.group ? 0 : 1;
3711         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3712                                  sizeof(encap_decap_key.v32), 0);
3713         if (resource->reformat_type !=
3714             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3715             resource->size)
3716                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3717         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3718         if (!entry)
3719                 return -rte_errno;
3720         resource = container_of(entry, typeof(*resource), entry);
3721         dev_flow->dv.encap_decap = resource;
3722         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3723         return 0;
3724 }
3725
3726 /**
3727  * Find existing table jump resource or create and register a new one.
3728  *
3729  * @param[in, out] dev
3730  *   Pointer to rte_eth_dev structure.
3731  * @param[in, out] tbl
3732  *   Pointer to flow table resource.
3733  * @parm[in, out] dev_flow
3734  *   Pointer to the dev_flow.
3735  * @param[out] error
3736  *   pointer to error structure.
3737  *
3738  * @return
3739  *   0 on success otherwise -errno and errno is set.
3740  */
3741 static int
3742 flow_dv_jump_tbl_resource_register
3743                         (struct rte_eth_dev *dev __rte_unused,
3744                          struct mlx5_flow_tbl_resource *tbl,
3745                          struct mlx5_flow *dev_flow,
3746                          struct rte_flow_error *error __rte_unused)
3747 {
3748         struct mlx5_flow_tbl_data_entry *tbl_data =
3749                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3750
3751         MLX5_ASSERT(tbl);
3752         MLX5_ASSERT(tbl_data->jump.action);
3753         dev_flow->handle->rix_jump = tbl_data->idx;
3754         dev_flow->dv.jump = &tbl_data->jump;
3755         return 0;
3756 }
3757
3758 int
3759 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3760                          struct mlx5_cache_entry *entry, void *cb_ctx)
3761 {
3762         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3763         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3764         struct mlx5_flow_dv_port_id_action_resource *res =
3765                         container_of(entry, typeof(*res), entry);
3766
3767         return ref->port_id != res->port_id;
3768 }
3769
3770 struct mlx5_cache_entry *
3771 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3772                           struct mlx5_cache_entry *entry __rte_unused,
3773                           void *cb_ctx)
3774 {
3775         struct mlx5_dev_ctx_shared *sh = list->ctx;
3776         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3777         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3778         struct mlx5_flow_dv_port_id_action_resource *cache;
3779         uint32_t idx;
3780         int ret;
3781
3782         /* Register new port id action resource. */
3783         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3784         if (!cache) {
3785                 rte_flow_error_set(ctx->error, ENOMEM,
3786                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3787                                    "cannot allocate port_id action cache memory");
3788                 return NULL;
3789         }
3790         *cache = *ref;
3791         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3792                                                         ref->port_id,
3793                                                         &cache->action);
3794         if (ret) {
3795                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3796                 rte_flow_error_set(ctx->error, ENOMEM,
3797                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3798                                    "cannot create action");
3799                 return NULL;
3800         }
3801         cache->idx = idx;
3802         return &cache->entry;
3803 }
3804
3805 /**
3806  * Find existing table port ID resource or create and register a new one.
3807  *
3808  * @param[in, out] dev
3809  *   Pointer to rte_eth_dev structure.
3810  * @param[in, out] resource
3811  *   Pointer to port ID action resource.
3812  * @parm[in, out] dev_flow
3813  *   Pointer to the dev_flow.
3814  * @param[out] error
3815  *   pointer to error structure.
3816  *
3817  * @return
3818  *   0 on success otherwise -errno and errno is set.
3819  */
3820 static int
3821 flow_dv_port_id_action_resource_register
3822                         (struct rte_eth_dev *dev,
3823                          struct mlx5_flow_dv_port_id_action_resource *resource,
3824                          struct mlx5_flow *dev_flow,
3825                          struct rte_flow_error *error)
3826 {
3827         struct mlx5_priv *priv = dev->data->dev_private;
3828         struct mlx5_cache_entry *entry;
3829         struct mlx5_flow_dv_port_id_action_resource *cache;
3830         struct mlx5_flow_cb_ctx ctx = {
3831                 .error = error,
3832                 .data = resource,
3833         };
3834
3835         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3836         if (!entry)
3837                 return -rte_errno;
3838         cache = container_of(entry, typeof(*cache), entry);
3839         dev_flow->dv.port_id_action = cache;
3840         dev_flow->handle->rix_port_id_action = cache->idx;
3841         return 0;
3842 }
3843
3844 int
3845 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3846                          struct mlx5_cache_entry *entry, void *cb_ctx)
3847 {
3848         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3849         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3850         struct mlx5_flow_dv_push_vlan_action_resource *res =
3851                         container_of(entry, typeof(*res), entry);
3852
3853         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3854 }
3855
3856 struct mlx5_cache_entry *
3857 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3858                           struct mlx5_cache_entry *entry __rte_unused,
3859                           void *cb_ctx)
3860 {
3861         struct mlx5_dev_ctx_shared *sh = list->ctx;
3862         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3863         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3864         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3865         struct mlx5dv_dr_domain *domain;
3866         uint32_t idx;
3867         int ret;
3868
3869         /* Register new port id action resource. */
3870         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3871         if (!cache) {
3872                 rte_flow_error_set(ctx->error, ENOMEM,
3873                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3874                                    "cannot allocate push_vlan action cache memory");
3875                 return NULL;
3876         }
3877         *cache = *ref;
3878         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3879                 domain = sh->fdb_domain;
3880         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3881                 domain = sh->rx_domain;
3882         else
3883                 domain = sh->tx_domain;
3884         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3885                                                         &cache->action);
3886         if (ret) {
3887                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3888                 rte_flow_error_set(ctx->error, ENOMEM,
3889                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3890                                    "cannot create push vlan action");
3891                 return NULL;
3892         }
3893         cache->idx = idx;
3894         return &cache->entry;
3895 }
3896
3897 /**
3898  * Find existing push vlan resource or create and register a new one.
3899  *
3900  * @param [in, out] dev
3901  *   Pointer to rte_eth_dev structure.
3902  * @param[in, out] resource
3903  *   Pointer to port ID action resource.
3904  * @parm[in, out] dev_flow
3905  *   Pointer to the dev_flow.
3906  * @param[out] error
3907  *   pointer to error structure.
3908  *
3909  * @return
3910  *   0 on success otherwise -errno and errno is set.
3911  */
3912 static int
3913 flow_dv_push_vlan_action_resource_register
3914                        (struct rte_eth_dev *dev,
3915                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3916                         struct mlx5_flow *dev_flow,
3917                         struct rte_flow_error *error)
3918 {
3919         struct mlx5_priv *priv = dev->data->dev_private;
3920         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3921         struct mlx5_cache_entry *entry;
3922         struct mlx5_flow_cb_ctx ctx = {
3923                 .error = error,
3924                 .data = resource,
3925         };
3926
3927         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3928         if (!entry)
3929                 return -rte_errno;
3930         cache = container_of(entry, typeof(*cache), entry);
3931
3932         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3933         dev_flow->dv.push_vlan_res = cache;
3934         return 0;
3935 }
3936
3937 /**
3938  * Get the size of specific rte_flow_item_type hdr size
3939  *
3940  * @param[in] item_type
3941  *   Tested rte_flow_item_type.
3942  *
3943  * @return
3944  *   sizeof struct item_type, 0 if void or irrelevant.
3945  */
3946 static size_t
3947 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3948 {
3949         size_t retval;
3950
3951         switch (item_type) {
3952         case RTE_FLOW_ITEM_TYPE_ETH:
3953                 retval = sizeof(struct rte_ether_hdr);
3954                 break;
3955         case RTE_FLOW_ITEM_TYPE_VLAN:
3956                 retval = sizeof(struct rte_vlan_hdr);
3957                 break;
3958         case RTE_FLOW_ITEM_TYPE_IPV4:
3959                 retval = sizeof(struct rte_ipv4_hdr);
3960                 break;
3961         case RTE_FLOW_ITEM_TYPE_IPV6:
3962                 retval = sizeof(struct rte_ipv6_hdr);
3963                 break;
3964         case RTE_FLOW_ITEM_TYPE_UDP:
3965                 retval = sizeof(struct rte_udp_hdr);
3966                 break;
3967         case RTE_FLOW_ITEM_TYPE_TCP:
3968                 retval = sizeof(struct rte_tcp_hdr);
3969                 break;
3970         case RTE_FLOW_ITEM_TYPE_VXLAN:
3971         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3972                 retval = sizeof(struct rte_vxlan_hdr);
3973                 break;
3974         case RTE_FLOW_ITEM_TYPE_GRE:
3975         case RTE_FLOW_ITEM_TYPE_NVGRE:
3976                 retval = sizeof(struct rte_gre_hdr);
3977                 break;
3978         case RTE_FLOW_ITEM_TYPE_MPLS:
3979                 retval = sizeof(struct rte_mpls_hdr);
3980                 break;
3981         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3982         default:
3983                 retval = 0;
3984                 break;
3985         }
3986         return retval;
3987 }
3988
3989 #define MLX5_ENCAP_IPV4_VERSION         0x40
3990 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3991 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3992 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3993 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3994 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3995 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3996
3997 /**
3998  * Convert the encap action data from list of rte_flow_item to raw buffer
3999  *
4000  * @param[in] items
4001  *   Pointer to rte_flow_item objects list.
4002  * @param[out] buf
4003  *   Pointer to the output buffer.
4004  * @param[out] size
4005  *   Pointer to the output buffer size.
4006  * @param[out] error
4007  *   Pointer to the error structure.
4008  *
4009  * @return
4010  *   0 on success, a negative errno value otherwise and rte_errno is set.
4011  */
4012 static int
4013 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4014                            size_t *size, struct rte_flow_error *error)
4015 {
4016         struct rte_ether_hdr *eth = NULL;
4017         struct rte_vlan_hdr *vlan = NULL;
4018         struct rte_ipv4_hdr *ipv4 = NULL;
4019         struct rte_ipv6_hdr *ipv6 = NULL;
4020         struct rte_udp_hdr *udp = NULL;
4021         struct rte_vxlan_hdr *vxlan = NULL;
4022         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4023         struct rte_gre_hdr *gre = NULL;
4024         size_t len;
4025         size_t temp_size = 0;
4026
4027         if (!items)
4028                 return rte_flow_error_set(error, EINVAL,
4029                                           RTE_FLOW_ERROR_TYPE_ACTION,
4030                                           NULL, "invalid empty data");
4031         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4032                 len = flow_dv_get_item_hdr_len(items->type);
4033                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4034                         return rte_flow_error_set(error, EINVAL,
4035                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4036                                                   (void *)items->type,
4037                                                   "items total size is too big"
4038                                                   " for encap action");
4039                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4040                 switch (items->type) {
4041                 case RTE_FLOW_ITEM_TYPE_ETH:
4042                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4043                         break;
4044                 case RTE_FLOW_ITEM_TYPE_VLAN:
4045                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4046                         if (!eth)
4047                                 return rte_flow_error_set(error, EINVAL,
4048                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4049                                                 (void *)items->type,
4050                                                 "eth header not found");
4051                         if (!eth->ether_type)
4052                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4053                         break;
4054                 case RTE_FLOW_ITEM_TYPE_IPV4:
4055                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4056                         if (!vlan && !eth)
4057                                 return rte_flow_error_set(error, EINVAL,
4058                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4059                                                 (void *)items->type,
4060                                                 "neither eth nor vlan"
4061                                                 " header found");
4062                         if (vlan && !vlan->eth_proto)
4063                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4064                         else if (eth && !eth->ether_type)
4065                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4066                         if (!ipv4->version_ihl)
4067                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4068                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4069                         if (!ipv4->time_to_live)
4070                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4071                         break;
4072                 case RTE_FLOW_ITEM_TYPE_IPV6:
4073                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4074                         if (!vlan && !eth)
4075                                 return rte_flow_error_set(error, EINVAL,
4076                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4077                                                 (void *)items->type,
4078                                                 "neither eth nor vlan"
4079                                                 " header found");
4080                         if (vlan && !vlan->eth_proto)
4081                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4082                         else if (eth && !eth->ether_type)
4083                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4084                         if (!ipv6->vtc_flow)
4085                                 ipv6->vtc_flow =
4086                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4087                         if (!ipv6->hop_limits)
4088                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4089                         break;
4090                 case RTE_FLOW_ITEM_TYPE_UDP:
4091                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4092                         if (!ipv4 && !ipv6)
4093                                 return rte_flow_error_set(error, EINVAL,
4094                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4095                                                 (void *)items->type,
4096                                                 "ip header not found");
4097                         if (ipv4 && !ipv4->next_proto_id)
4098                                 ipv4->next_proto_id = IPPROTO_UDP;
4099                         else if (ipv6 && !ipv6->proto)
4100                                 ipv6->proto = IPPROTO_UDP;
4101                         break;
4102                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4103                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4104                         if (!udp)
4105                                 return rte_flow_error_set(error, EINVAL,
4106                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4107                                                 (void *)items->type,
4108                                                 "udp header not found");
4109                         if (!udp->dst_port)
4110                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4111                         if (!vxlan->vx_flags)
4112                                 vxlan->vx_flags =
4113                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4114                         break;
4115                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4116                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4117                         if (!udp)
4118                                 return rte_flow_error_set(error, EINVAL,
4119                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4120                                                 (void *)items->type,
4121                                                 "udp header not found");
4122                         if (!vxlan_gpe->proto)
4123                                 return rte_flow_error_set(error, EINVAL,
4124                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4125                                                 (void *)items->type,
4126                                                 "next protocol not found");
4127                         if (!udp->dst_port)
4128                                 udp->dst_port =
4129                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4130                         if (!vxlan_gpe->vx_flags)
4131                                 vxlan_gpe->vx_flags =
4132                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4133                         break;
4134                 case RTE_FLOW_ITEM_TYPE_GRE:
4135                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4136                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4137                         if (!gre->proto)
4138                                 return rte_flow_error_set(error, EINVAL,
4139                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4140                                                 (void *)items->type,
4141                                                 "next protocol not found");
4142                         if (!ipv4 && !ipv6)
4143                                 return rte_flow_error_set(error, EINVAL,
4144                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4145                                                 (void *)items->type,
4146                                                 "ip header not found");
4147                         if (ipv4 && !ipv4->next_proto_id)
4148                                 ipv4->next_proto_id = IPPROTO_GRE;
4149                         else if (ipv6 && !ipv6->proto)
4150                                 ipv6->proto = IPPROTO_GRE;
4151                         break;
4152                 case RTE_FLOW_ITEM_TYPE_VOID:
4153                         break;
4154                 default:
4155                         return rte_flow_error_set(error, EINVAL,
4156                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4157                                                   (void *)items->type,
4158                                                   "unsupported item type");
4159                         break;
4160                 }
4161                 temp_size += len;
4162         }
4163         *size = temp_size;
4164         return 0;
4165 }
4166
4167 static int
4168 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4169 {
4170         struct rte_ether_hdr *eth = NULL;
4171         struct rte_vlan_hdr *vlan = NULL;
4172         struct rte_ipv6_hdr *ipv6 = NULL;
4173         struct rte_udp_hdr *udp = NULL;
4174         char *next_hdr;
4175         uint16_t proto;
4176
4177         eth = (struct rte_ether_hdr *)data;
4178         next_hdr = (char *)(eth + 1);
4179         proto = RTE_BE16(eth->ether_type);
4180
4181         /* VLAN skipping */
4182         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4183                 vlan = (struct rte_vlan_hdr *)next_hdr;
4184                 proto = RTE_BE16(vlan->eth_proto);
4185                 next_hdr += sizeof(struct rte_vlan_hdr);
4186         }
4187
4188         /* HW calculates IPv4 csum. no need to proceed */
4189         if (proto == RTE_ETHER_TYPE_IPV4)
4190                 return 0;
4191
4192         /* non IPv4/IPv6 header. not supported */
4193         if (proto != RTE_ETHER_TYPE_IPV6) {
4194                 return rte_flow_error_set(error, ENOTSUP,
4195                                           RTE_FLOW_ERROR_TYPE_ACTION,
4196                                           NULL, "Cannot offload non IPv4/IPv6");
4197         }
4198
4199         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4200
4201         /* ignore non UDP */
4202         if (ipv6->proto != IPPROTO_UDP)
4203                 return 0;
4204
4205         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4206         udp->dgram_cksum = 0;
4207
4208         return 0;
4209 }
4210
4211 /**
4212  * Convert L2 encap action to DV specification.
4213  *
4214  * @param[in] dev
4215  *   Pointer to rte_eth_dev structure.
4216  * @param[in] action
4217  *   Pointer to action structure.
4218  * @param[in, out] dev_flow
4219  *   Pointer to the mlx5_flow.
4220  * @param[in] transfer
4221  *   Mark if the flow is E-Switch flow.
4222  * @param[out] error
4223  *   Pointer to the error structure.
4224  *
4225  * @return
4226  *   0 on success, a negative errno value otherwise and rte_errno is set.
4227  */
4228 static int
4229 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4230                                const struct rte_flow_action *action,
4231                                struct mlx5_flow *dev_flow,
4232                                uint8_t transfer,
4233                                struct rte_flow_error *error)
4234 {
4235         const struct rte_flow_item *encap_data;
4236         const struct rte_flow_action_raw_encap *raw_encap_data;
4237         struct mlx5_flow_dv_encap_decap_resource res = {
4238                 .reformat_type =
4239                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4240                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4241                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4242         };
4243
4244         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4245                 raw_encap_data =
4246                         (const struct rte_flow_action_raw_encap *)action->conf;
4247                 res.size = raw_encap_data->size;
4248                 memcpy(res.buf, raw_encap_data->data, res.size);
4249         } else {
4250                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4251                         encap_data =
4252                                 ((const struct rte_flow_action_vxlan_encap *)
4253                                                 action->conf)->definition;
4254                 else
4255                         encap_data =
4256                                 ((const struct rte_flow_action_nvgre_encap *)
4257                                                 action->conf)->definition;
4258                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4259                                                &res.size, error))
4260                         return -rte_errno;
4261         }
4262         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4263                 return -rte_errno;
4264         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4265                 return rte_flow_error_set(error, EINVAL,
4266                                           RTE_FLOW_ERROR_TYPE_ACTION,
4267                                           NULL, "can't create L2 encap action");
4268         return 0;
4269 }
4270
4271 /**
4272  * Convert L2 decap action to DV specification.
4273  *
4274  * @param[in] dev
4275  *   Pointer to rte_eth_dev structure.
4276  * @param[in, out] dev_flow
4277  *   Pointer to the mlx5_flow.
4278  * @param[in] transfer
4279  *   Mark if the flow is E-Switch flow.
4280  * @param[out] error
4281  *   Pointer to the error structure.
4282  *
4283  * @return
4284  *   0 on success, a negative errno value otherwise and rte_errno is set.
4285  */
4286 static int
4287 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4288                                struct mlx5_flow *dev_flow,
4289                                uint8_t transfer,
4290                                struct rte_flow_error *error)
4291 {
4292         struct mlx5_flow_dv_encap_decap_resource res = {
4293                 .size = 0,
4294                 .reformat_type =
4295                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4296                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4297                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4298         };
4299
4300         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4301                 return rte_flow_error_set(error, EINVAL,
4302                                           RTE_FLOW_ERROR_TYPE_ACTION,
4303                                           NULL, "can't create L2 decap action");
4304         return 0;
4305 }
4306
4307 /**
4308  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4309  *
4310  * @param[in] dev
4311  *   Pointer to rte_eth_dev structure.
4312  * @param[in] action
4313  *   Pointer to action structure.
4314  * @param[in, out] dev_flow
4315  *   Pointer to the mlx5_flow.
4316  * @param[in] attr
4317  *   Pointer to the flow attributes.
4318  * @param[out] error
4319  *   Pointer to the error structure.
4320  *
4321  * @return
4322  *   0 on success, a negative errno value otherwise and rte_errno is set.
4323  */
4324 static int
4325 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4326                                 const struct rte_flow_action *action,
4327                                 struct mlx5_flow *dev_flow,
4328                                 const struct rte_flow_attr *attr,
4329                                 struct rte_flow_error *error)
4330 {
4331         const struct rte_flow_action_raw_encap *encap_data;
4332         struct mlx5_flow_dv_encap_decap_resource res;
4333
4334         memset(&res, 0, sizeof(res));
4335         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4336         res.size = encap_data->size;
4337         memcpy(res.buf, encap_data->data, res.size);
4338         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4339                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4340                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4341         if (attr->transfer)
4342                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4343         else
4344                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4345                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4346         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4347                 return rte_flow_error_set(error, EINVAL,
4348                                           RTE_FLOW_ERROR_TYPE_ACTION,
4349                                           NULL, "can't create encap action");
4350         return 0;
4351 }
4352
4353 /**
4354  * Create action push VLAN.
4355  *
4356  * @param[in] dev
4357  *   Pointer to rte_eth_dev structure.
4358  * @param[in] attr
4359  *   Pointer to the flow attributes.
4360  * @param[in] vlan
4361  *   Pointer to the vlan to push to the Ethernet header.
4362  * @param[in, out] dev_flow
4363  *   Pointer to the mlx5_flow.
4364  * @param[out] error
4365  *   Pointer to the error structure.
4366  *
4367  * @return
4368  *   0 on success, a negative errno value otherwise and rte_errno is set.
4369  */
4370 static int
4371 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4372                                 const struct rte_flow_attr *attr,
4373                                 const struct rte_vlan_hdr *vlan,
4374                                 struct mlx5_flow *dev_flow,
4375                                 struct rte_flow_error *error)
4376 {
4377         struct mlx5_flow_dv_push_vlan_action_resource res;
4378
4379         memset(&res, 0, sizeof(res));
4380         res.vlan_tag =
4381                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4382                                  vlan->vlan_tci);
4383         if (attr->transfer)
4384                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4385         else
4386                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4387                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4388         return flow_dv_push_vlan_action_resource_register
4389                                             (dev, &res, dev_flow, error);
4390 }
4391
4392 /**
4393  * Validate the modify-header actions.
4394  *
4395  * @param[in] action_flags
4396  *   Holds the actions detected until now.
4397  * @param[in] action
4398  *   Pointer to the modify action.
4399  * @param[out] error
4400  *   Pointer to error structure.
4401  *
4402  * @return
4403  *   0 on success, a negative errno value otherwise and rte_errno is set.
4404  */
4405 static int
4406 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4407                                    const struct rte_flow_action *action,
4408                                    struct rte_flow_error *error)
4409 {
4410         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4411                 return rte_flow_error_set(error, EINVAL,
4412                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4413                                           NULL, "action configuration not set");
4414         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4415                 return rte_flow_error_set(error, EINVAL,
4416                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4417                                           "can't have encap action before"
4418                                           " modify action");
4419         return 0;
4420 }
4421
4422 /**
4423  * Validate the modify-header MAC address actions.
4424  *
4425  * @param[in] action_flags
4426  *   Holds the actions detected until now.
4427  * @param[in] action
4428  *   Pointer to the modify action.
4429  * @param[in] item_flags
4430  *   Holds the items detected.
4431  * @param[out] error
4432  *   Pointer to error structure.
4433  *
4434  * @return
4435  *   0 on success, a negative errno value otherwise and rte_errno is set.
4436  */
4437 static int
4438 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4439                                    const struct rte_flow_action *action,
4440                                    const uint64_t item_flags,
4441                                    struct rte_flow_error *error)
4442 {
4443         int ret = 0;
4444
4445         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4446         if (!ret) {
4447                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4448                         return rte_flow_error_set(error, EINVAL,
4449                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4450                                                   NULL,
4451                                                   "no L2 item in pattern");
4452         }
4453         return ret;
4454 }
4455
4456 /**
4457  * Validate the modify-header IPv4 address actions.
4458  *
4459  * @param[in] action_flags
4460  *   Holds the actions detected until now.
4461  * @param[in] action
4462  *   Pointer to the modify action.
4463  * @param[in] item_flags
4464  *   Holds the items detected.
4465  * @param[out] error
4466  *   Pointer to error structure.
4467  *
4468  * @return
4469  *   0 on success, a negative errno value otherwise and rte_errno is set.
4470  */
4471 static int
4472 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4473                                     const struct rte_flow_action *action,
4474                                     const uint64_t item_flags,
4475                                     struct rte_flow_error *error)
4476 {
4477         int ret = 0;
4478         uint64_t layer;
4479
4480         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4481         if (!ret) {
4482                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4483                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4484                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4485                 if (!(item_flags & layer))
4486                         return rte_flow_error_set(error, EINVAL,
4487                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4488                                                   NULL,
4489                                                   "no ipv4 item in pattern");
4490         }
4491         return ret;
4492 }
4493
4494 /**
4495  * Validate the modify-header IPv6 address actions.
4496  *
4497  * @param[in] action_flags
4498  *   Holds the actions detected until now.
4499  * @param[in] action
4500  *   Pointer to the modify action.
4501  * @param[in] item_flags
4502  *   Holds the items detected.
4503  * @param[out] error
4504  *   Pointer to error structure.
4505  *
4506  * @return
4507  *   0 on success, a negative errno value otherwise and rte_errno is set.
4508  */
4509 static int
4510 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4511                                     const struct rte_flow_action *action,
4512                                     const uint64_t item_flags,
4513                                     struct rte_flow_error *error)
4514 {
4515         int ret = 0;
4516         uint64_t layer;
4517
4518         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4519         if (!ret) {
4520                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4521                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4522                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4523                 if (!(item_flags & layer))
4524                         return rte_flow_error_set(error, EINVAL,
4525                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4526                                                   NULL,
4527                                                   "no ipv6 item in pattern");
4528         }
4529         return ret;
4530 }
4531
4532 /**
4533  * Validate the modify-header TP actions.
4534  *
4535  * @param[in] action_flags
4536  *   Holds the actions detected until now.
4537  * @param[in] action
4538  *   Pointer to the modify action.
4539  * @param[in] item_flags
4540  *   Holds the items detected.
4541  * @param[out] error
4542  *   Pointer to error structure.
4543  *
4544  * @return
4545  *   0 on success, a negative errno value otherwise and rte_errno is set.
4546  */
4547 static int
4548 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4549                                   const struct rte_flow_action *action,
4550                                   const uint64_t item_flags,
4551                                   struct rte_flow_error *error)
4552 {
4553         int ret = 0;
4554         uint64_t layer;
4555
4556         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4557         if (!ret) {
4558                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4559                                  MLX5_FLOW_LAYER_INNER_L4 :
4560                                  MLX5_FLOW_LAYER_OUTER_L4;
4561                 if (!(item_flags & layer))
4562                         return rte_flow_error_set(error, EINVAL,
4563                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4564                                                   NULL, "no transport layer "
4565                                                   "in pattern");
4566         }
4567         return ret;
4568 }
4569
4570 /**
4571  * Validate the modify-header actions of increment/decrement
4572  * TCP Sequence-number.
4573  *
4574  * @param[in] action_flags
4575  *   Holds the actions detected until now.
4576  * @param[in] action
4577  *   Pointer to the modify action.
4578  * @param[in] item_flags
4579  *   Holds the items detected.
4580  * @param[out] error
4581  *   Pointer to error structure.
4582  *
4583  * @return
4584  *   0 on success, a negative errno value otherwise and rte_errno is set.
4585  */
4586 static int
4587 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4588                                        const struct rte_flow_action *action,
4589                                        const uint64_t item_flags,
4590                                        struct rte_flow_error *error)
4591 {
4592         int ret = 0;
4593         uint64_t layer;
4594
4595         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4596         if (!ret) {
4597                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4598                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4599                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4600                 if (!(item_flags & layer))
4601                         return rte_flow_error_set(error, EINVAL,
4602                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4603                                                   NULL, "no TCP item in"
4604                                                   " pattern");
4605                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4606                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4607                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4608                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4609                         return rte_flow_error_set(error, EINVAL,
4610                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4611                                                   NULL,
4612                                                   "cannot decrease and increase"
4613                                                   " TCP sequence number"
4614                                                   " at the same time");
4615         }
4616         return ret;
4617 }
4618
4619 /**
4620  * Validate the modify-header actions of increment/decrement
4621  * TCP Acknowledgment number.
4622  *
4623  * @param[in] action_flags
4624  *   Holds the actions detected until now.
4625  * @param[in] action
4626  *   Pointer to the modify action.
4627  * @param[in] item_flags
4628  *   Holds the items detected.
4629  * @param[out] error
4630  *   Pointer to error structure.
4631  *
4632  * @return
4633  *   0 on success, a negative errno value otherwise and rte_errno is set.
4634  */
4635 static int
4636 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4637                                        const struct rte_flow_action *action,
4638                                        const uint64_t item_flags,
4639                                        struct rte_flow_error *error)
4640 {
4641         int ret = 0;
4642         uint64_t layer;
4643
4644         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4645         if (!ret) {
4646                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4647                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4648                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4649                 if (!(item_flags & layer))
4650                         return rte_flow_error_set(error, EINVAL,
4651                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4652                                                   NULL, "no TCP item in"
4653                                                   " pattern");
4654                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4655                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4656                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4657                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4658                         return rte_flow_error_set(error, EINVAL,
4659                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4660                                                   NULL,
4661                                                   "cannot decrease and increase"
4662                                                   " TCP acknowledgment number"
4663                                                   " at the same time");
4664         }
4665         return ret;
4666 }
4667
4668 /**
4669  * Validate the modify-header TTL actions.
4670  *
4671  * @param[in] action_flags
4672  *   Holds the actions detected until now.
4673  * @param[in] action
4674  *   Pointer to the modify action.
4675  * @param[in] item_flags
4676  *   Holds the items detected.
4677  * @param[out] error
4678  *   Pointer to error structure.
4679  *
4680  * @return
4681  *   0 on success, a negative errno value otherwise and rte_errno is set.
4682  */
4683 static int
4684 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4685                                    const struct rte_flow_action *action,
4686                                    const uint64_t item_flags,
4687                                    struct rte_flow_error *error)
4688 {
4689         int ret = 0;
4690         uint64_t layer;
4691
4692         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4693         if (!ret) {
4694                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4695                                  MLX5_FLOW_LAYER_INNER_L3 :
4696                                  MLX5_FLOW_LAYER_OUTER_L3;
4697                 if (!(item_flags & layer))
4698                         return rte_flow_error_set(error, EINVAL,
4699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4700                                                   NULL,
4701                                                   "no IP protocol in pattern");
4702         }
4703         return ret;
4704 }
4705
4706 /**
4707  * Validate the generic modify field actions.
4708  * @param[in] dev
4709  *   Pointer to the rte_eth_dev structure.
4710  * @param[in] action_flags
4711  *   Holds the actions detected until now.
4712  * @param[in] action
4713  *   Pointer to the modify action.
4714  * @param[in] attr
4715  *   Pointer to the flow attributes.
4716  * @param[out] error
4717  *   Pointer to error structure.
4718  *
4719  * @return
4720  *   Number of header fields to modify (0 or more) on success,
4721  *   a negative errno value otherwise and rte_errno is set.
4722  */
4723 static int
4724 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4725                                    const uint64_t action_flags,
4726                                    const struct rte_flow_action *action,
4727                                    const struct rte_flow_attr *attr,
4728                                    struct rte_flow_error *error)
4729 {
4730         int ret = 0;
4731         struct mlx5_priv *priv = dev->data->dev_private;
4732         struct mlx5_dev_config *config = &priv->config;
4733         const struct rte_flow_action_modify_field *action_modify_field =
4734                 action->conf;
4735         uint32_t dst_width = mlx5_flow_item_field_width(config,
4736                                 action_modify_field->dst.field);
4737         uint32_t src_width = mlx5_flow_item_field_width(config,
4738                                 action_modify_field->src.field);
4739
4740         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4741         if (ret)
4742                 return ret;
4743
4744         if (action_modify_field->width == 0)
4745                 return rte_flow_error_set(error, EINVAL,
4746                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4747                                 "no bits are requested to be modified");
4748         else if (action_modify_field->width > dst_width ||
4749                  action_modify_field->width > src_width)
4750                 return rte_flow_error_set(error, EINVAL,
4751                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4752                                 "cannot modify more bits than"
4753                                 " the width of a field");
4754         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4755             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4756                 if ((action_modify_field->dst.offset +
4757                      action_modify_field->width > dst_width) ||
4758                     (action_modify_field->dst.offset % 32))
4759                         return rte_flow_error_set(error, EINVAL,
4760                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4761                                         "destination offset is too big"
4762                                         " or not aligned to 4 bytes");
4763                 if (action_modify_field->dst.level &&
4764                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4765                         return rte_flow_error_set(error, ENOTSUP,
4766                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4767                                         "inner header fields modification"
4768                                         " is not supported");
4769         }
4770         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4771             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4772                 if (!attr->transfer && !attr->group)
4773                         return rte_flow_error_set(error, ENOTSUP,
4774                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4775                                         "modify field action is not"
4776                                         " supported for group 0");
4777                 if ((action_modify_field->src.offset +
4778                      action_modify_field->width > src_width) ||
4779                     (action_modify_field->src.offset % 32))
4780                         return rte_flow_error_set(error, EINVAL,
4781                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4782                                         "source offset is too big"
4783                                         " or not aligned to 4 bytes");
4784                 if (action_modify_field->src.level &&
4785                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4786                         return rte_flow_error_set(error, ENOTSUP,
4787                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4788                                         "inner header fields modification"
4789                                         " is not supported");
4790         }
4791         if ((action_modify_field->dst.field ==
4792              action_modify_field->src.field) &&
4793             (action_modify_field->dst.level ==
4794              action_modify_field->src.level))
4795                 return rte_flow_error_set(error, EINVAL,
4796                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4797                                 "source and destination fields"
4798                                 " cannot be the same");
4799         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4800             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4801                 return rte_flow_error_set(error, EINVAL,
4802                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4803                                 "immediate value or a pointer to it"
4804                                 " cannot be used as a destination");
4805         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4806             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4807                 return rte_flow_error_set(error, ENOTSUP,
4808                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4809                                 "modifications of an arbitrary"
4810                                 " place in a packet is not supported");
4811         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4812             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4813                 return rte_flow_error_set(error, ENOTSUP,
4814                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4815                                 "modifications of the 802.1Q Tag"
4816                                 " Identifier is not supported");
4817         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4818             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4819                 return rte_flow_error_set(error, ENOTSUP,
4820                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4821                                 "modifications of the VXLAN Network"
4822                                 " Identifier is not supported");
4823         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4824             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4825                 return rte_flow_error_set(error, ENOTSUP,
4826                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4827                                 "modifications of the GENEVE Network"
4828                                 " Identifier is not supported");
4829         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4830             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4831             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4832             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4833                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4834                     !mlx5_flow_ext_mreg_supported(dev))
4835                         return rte_flow_error_set(error, ENOTSUP,
4836                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                         "cannot modify mark or metadata without"
4838                                         " extended metadata register support");
4839         }
4840         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4841                 return rte_flow_error_set(error, ENOTSUP,
4842                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4843                                 "add and sub operations"
4844                                 " are not supported");
4845         return (action_modify_field->width / 32) +
4846                !!(action_modify_field->width % 32);
4847 }
4848
4849 /**
4850  * Validate jump action.
4851  *
4852  * @param[in] action
4853  *   Pointer to the jump action.
4854  * @param[in] action_flags
4855  *   Holds the actions detected until now.
4856  * @param[in] attributes
4857  *   Pointer to flow attributes
4858  * @param[in] external
4859  *   Action belongs to flow rule created by request external to PMD.
4860  * @param[out] error
4861  *   Pointer to error structure.
4862  *
4863  * @return
4864  *   0 on success, a negative errno value otherwise and rte_errno is set.
4865  */
4866 static int
4867 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4868                              const struct mlx5_flow_tunnel *tunnel,
4869                              const struct rte_flow_action *action,
4870                              uint64_t action_flags,
4871                              const struct rte_flow_attr *attributes,
4872                              bool external, struct rte_flow_error *error)
4873 {
4874         uint32_t target_group, table;
4875         int ret = 0;
4876         struct flow_grp_info grp_info = {
4877                 .external = !!external,
4878                 .transfer = !!attributes->transfer,
4879                 .fdb_def_rule = 1,
4880                 .std_tbl_fix = 0
4881         };
4882         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4883                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4884                 return rte_flow_error_set(error, EINVAL,
4885                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4886                                           "can't have 2 fate actions in"
4887                                           " same flow");
4888         if (!action->conf)
4889                 return rte_flow_error_set(error, EINVAL,
4890                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4891                                           NULL, "action configuration not set");
4892         target_group =
4893                 ((const struct rte_flow_action_jump *)action->conf)->group;
4894         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4895                                        &grp_info, error);
4896         if (ret)
4897                 return ret;
4898         if (attributes->group == target_group &&
4899             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4900                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4901                 return rte_flow_error_set(error, EINVAL,
4902                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4903                                           "target group must be other than"
4904                                           " the current flow group");
4905         return 0;
4906 }
4907
4908 /*
4909  * Validate the port_id action.
4910  *
4911  * @param[in] dev
4912  *   Pointer to rte_eth_dev structure.
4913  * @param[in] action_flags
4914  *   Bit-fields that holds the actions detected until now.
4915  * @param[in] action
4916  *   Port_id RTE action structure.
4917  * @param[in] attr
4918  *   Attributes of flow that includes this action.
4919  * @param[out] error
4920  *   Pointer to error structure.
4921  *
4922  * @return
4923  *   0 on success, a negative errno value otherwise and rte_errno is set.
4924  */
4925 static int
4926 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4927                                 uint64_t action_flags,
4928                                 const struct rte_flow_action *action,
4929                                 const struct rte_flow_attr *attr,
4930                                 struct rte_flow_error *error)
4931 {
4932         const struct rte_flow_action_port_id *port_id;
4933         struct mlx5_priv *act_priv;
4934         struct mlx5_priv *dev_priv;
4935         uint16_t port;
4936
4937         if (!attr->transfer)
4938                 return rte_flow_error_set(error, ENOTSUP,
4939                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4940                                           NULL,
4941                                           "port id action is valid in transfer"
4942                                           " mode only");
4943         if (!action || !action->conf)
4944                 return rte_flow_error_set(error, ENOTSUP,
4945                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4946                                           NULL,
4947                                           "port id action parameters must be"
4948                                           " specified");
4949         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4950                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4951                 return rte_flow_error_set(error, EINVAL,
4952                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4953                                           "can have only one fate actions in"
4954                                           " a flow");
4955         dev_priv = mlx5_dev_to_eswitch_info(dev);
4956         if (!dev_priv)
4957                 return rte_flow_error_set(error, rte_errno,
4958                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4959                                           NULL,
4960                                           "failed to obtain E-Switch info");
4961         port_id = action->conf;
4962         port = port_id->original ? dev->data->port_id : port_id->id;
4963         act_priv = mlx5_port_to_eswitch_info(port, false);
4964         if (!act_priv)
4965                 return rte_flow_error_set
4966                                 (error, rte_errno,
4967                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4968                                  "failed to obtain E-Switch port id for port");
4969         if (act_priv->domain_id != dev_priv->domain_id)
4970                 return rte_flow_error_set
4971                                 (error, EINVAL,
4972                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4973                                  "port does not belong to"
4974                                  " E-Switch being configured");
4975         return 0;
4976 }
4977
4978 /**
4979  * Get the maximum number of modify header actions.
4980  *
4981  * @param dev
4982  *   Pointer to rte_eth_dev structure.
4983  * @param flags
4984  *   Flags bits to check if root level.
4985  *
4986  * @return
4987  *   Max number of modify header actions device can support.
4988  */
4989 static inline unsigned int
4990 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4991                               uint64_t flags)
4992 {
4993         /*
4994          * There's no way to directly query the max capacity from FW.
4995          * The maximal value on root table should be assumed to be supported.
4996          */
4997         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4998                 return MLX5_MAX_MODIFY_NUM;
4999         else
5000                 return MLX5_ROOT_TBL_MODIFY_NUM;
5001 }
5002
5003 /**
5004  * Validate the meter action.
5005  *
5006  * @param[in] dev
5007  *   Pointer to rte_eth_dev structure.
5008  * @param[in] action_flags
5009  *   Bit-fields that holds the actions detected until now.
5010  * @param[in] action
5011  *   Pointer to the meter action.
5012  * @param[in] attr
5013  *   Attributes of flow that includes this action.
5014  * @param[out] error
5015  *   Pointer to error structure.
5016  *
5017  * @return
5018  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5019  */
5020 static int
5021 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5022                                 uint64_t action_flags,
5023                                 const struct rte_flow_action *action,
5024                                 const struct rte_flow_attr *attr,
5025                                 bool *def_policy,
5026                                 struct rte_flow_error *error)
5027 {
5028         struct mlx5_priv *priv = dev->data->dev_private;
5029         const struct rte_flow_action_meter *am = action->conf;
5030         struct mlx5_flow_meter_info *fm;
5031         struct mlx5_flow_meter_policy *mtr_policy;
5032         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5033
5034         if (!am)
5035                 return rte_flow_error_set(error, EINVAL,
5036                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5037                                           "meter action conf is NULL");
5038
5039         if (action_flags & MLX5_FLOW_ACTION_METER)
5040                 return rte_flow_error_set(error, ENOTSUP,
5041                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5042                                           "meter chaining not support");
5043         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5044                 return rte_flow_error_set(error, ENOTSUP,
5045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5046                                           "meter with jump not support");
5047         if (!priv->mtr_en)
5048                 return rte_flow_error_set(error, ENOTSUP,
5049                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5050                                           NULL,
5051                                           "meter action not supported");
5052         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5053         if (!fm)
5054                 return rte_flow_error_set(error, EINVAL,
5055                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5056                                           "Meter not found");
5057         /* aso meter can always be shared by different domains */
5058         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5059             !(fm->transfer == attr->transfer ||
5060               (!fm->ingress && !attr->ingress && attr->egress) ||
5061               (!fm->egress && !attr->egress && attr->ingress)))
5062                 return rte_flow_error_set(error, EINVAL,
5063                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5064                         "Flow attributes domain are either invalid "
5065                         "or have a domain conflict with current "
5066                         "meter attributes");
5067         if (fm->def_policy) {
5068                 if (!((attr->transfer &&
5069                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5070                         (attr->egress &&
5071                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5072                         (attr->ingress &&
5073                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5074                         return rte_flow_error_set(error, EINVAL,
5075                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5076                                           "Flow attributes domain "
5077                                           "have a conflict with current "
5078                                           "meter domain attributes");
5079                 *def_policy = true;
5080         } else {
5081                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5082                                                 fm->policy_id, NULL);
5083                 if (!mtr_policy)
5084                         return rte_flow_error_set(error, EINVAL,
5085                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5086                                           "Invalid policy id for meter ");
5087                 if (!((attr->transfer && mtr_policy->transfer) ||
5088                         (attr->egress && mtr_policy->egress) ||
5089                         (attr->ingress && mtr_policy->ingress)))
5090                         return rte_flow_error_set(error, EINVAL,
5091                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5092                                           "Flow attributes domain "
5093                                           "have a conflict with current "
5094                                           "meter domain attributes");
5095                 *def_policy = false;
5096         }
5097         return 0;
5098 }
5099
5100 /**
5101  * Validate the age action.
5102  *
5103  * @param[in] action_flags
5104  *   Holds the actions detected until now.
5105  * @param[in] action
5106  *   Pointer to the age action.
5107  * @param[in] dev
5108  *   Pointer to the Ethernet device structure.
5109  * @param[out] error
5110  *   Pointer to error structure.
5111  *
5112  * @return
5113  *   0 on success, a negative errno value otherwise and rte_errno is set.
5114  */
5115 static int
5116 flow_dv_validate_action_age(uint64_t action_flags,
5117                             const struct rte_flow_action *action,
5118                             struct rte_eth_dev *dev,
5119                             struct rte_flow_error *error)
5120 {
5121         struct mlx5_priv *priv = dev->data->dev_private;
5122         const struct rte_flow_action_age *age = action->conf;
5123
5124         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5125             !priv->sh->aso_age_mng))
5126                 return rte_flow_error_set(error, ENOTSUP,
5127                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5128                                           NULL,
5129                                           "age action not supported");
5130         if (!(action->conf))
5131                 return rte_flow_error_set(error, EINVAL,
5132                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5133                                           "configuration cannot be null");
5134         if (!(age->timeout))
5135                 return rte_flow_error_set(error, EINVAL,
5136                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5137                                           "invalid timeout value 0");
5138         if (action_flags & MLX5_FLOW_ACTION_AGE)
5139                 return rte_flow_error_set(error, EINVAL,
5140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5141                                           "duplicate age actions set");
5142         return 0;
5143 }
5144
5145 /**
5146  * Validate the modify-header IPv4 DSCP actions.
5147  *
5148  * @param[in] action_flags
5149  *   Holds the actions detected until now.
5150  * @param[in] action
5151  *   Pointer to the modify action.
5152  * @param[in] item_flags
5153  *   Holds the items detected.
5154  * @param[out] error
5155  *   Pointer to error structure.
5156  *
5157  * @return
5158  *   0 on success, a negative errno value otherwise and rte_errno is set.
5159  */
5160 static int
5161 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5162                                          const struct rte_flow_action *action,
5163                                          const uint64_t item_flags,
5164                                          struct rte_flow_error *error)
5165 {
5166         int ret = 0;
5167
5168         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5169         if (!ret) {
5170                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5171                         return rte_flow_error_set(error, EINVAL,
5172                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5173                                                   NULL,
5174                                                   "no ipv4 item in pattern");
5175         }
5176         return ret;
5177 }
5178
5179 /**
5180  * Validate the modify-header IPv6 DSCP actions.
5181  *
5182  * @param[in] action_flags
5183  *   Holds the actions detected until now.
5184  * @param[in] action
5185  *   Pointer to the modify action.
5186  * @param[in] item_flags
5187  *   Holds the items detected.
5188  * @param[out] error
5189  *   Pointer to error structure.
5190  *
5191  * @return
5192  *   0 on success, a negative errno value otherwise and rte_errno is set.
5193  */
5194 static int
5195 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5196                                          const struct rte_flow_action *action,
5197                                          const uint64_t item_flags,
5198                                          struct rte_flow_error *error)
5199 {
5200         int ret = 0;
5201
5202         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5203         if (!ret) {
5204                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5205                         return rte_flow_error_set(error, EINVAL,
5206                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5207                                                   NULL,
5208                                                   "no ipv6 item in pattern");
5209         }
5210         return ret;
5211 }
5212
5213 /**
5214  * Match modify-header resource.
5215  *
5216  * @param list
5217  *   Pointer to the hash list.
5218  * @param entry
5219  *   Pointer to exist resource entry object.
5220  * @param key
5221  *   Key of the new entry.
5222  * @param ctx
5223  *   Pointer to new modify-header resource.
5224  *
5225  * @return
5226  *   0 on matching, non-zero otherwise.
5227  */
5228 int
5229 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5230                         struct mlx5_hlist_entry *entry,
5231                         uint64_t key __rte_unused, void *cb_ctx)
5232 {
5233         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5234         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5235         struct mlx5_flow_dv_modify_hdr_resource *resource =
5236                         container_of(entry, typeof(*resource), entry);
5237         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5238
5239         key_len += ref->actions_num * sizeof(ref->actions[0]);
5240         return ref->actions_num != resource->actions_num ||
5241                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5242 }
5243
5244 struct mlx5_hlist_entry *
5245 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5246                          void *cb_ctx)
5247 {
5248         struct mlx5_dev_ctx_shared *sh = list->ctx;
5249         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5250         struct mlx5dv_dr_domain *ns;
5251         struct mlx5_flow_dv_modify_hdr_resource *entry;
5252         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5253         int ret;
5254         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5255         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5256
5257         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5258                             SOCKET_ID_ANY);
5259         if (!entry) {
5260                 rte_flow_error_set(ctx->error, ENOMEM,
5261                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5262                                    "cannot allocate resource memory");
5263                 return NULL;
5264         }
5265         rte_memcpy(&entry->ft_type,
5266                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5267                    key_len + data_len);
5268         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5269                 ns = sh->fdb_domain;
5270         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5271                 ns = sh->tx_domain;
5272         else
5273                 ns = sh->rx_domain;
5274         ret = mlx5_flow_os_create_flow_action_modify_header
5275                                         (sh->ctx, ns, entry,
5276                                          data_len, &entry->action);
5277         if (ret) {
5278                 mlx5_free(entry);
5279                 rte_flow_error_set(ctx->error, ENOMEM,
5280                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5281                                    NULL, "cannot create modification action");
5282                 return NULL;
5283         }
5284         return &entry->entry;
5285 }
5286
5287 /**
5288  * Validate the sample action.
5289  *
5290  * @param[in, out] action_flags
5291  *   Holds the actions detected until now.
5292  * @param[in] action
5293  *   Pointer to the sample action.
5294  * @param[in] dev
5295  *   Pointer to the Ethernet device structure.
5296  * @param[in] attr
5297  *   Attributes of flow that includes this action.
5298  * @param[in] item_flags
5299  *   Holds the items detected.
5300  * @param[in] rss
5301  *   Pointer to the RSS action.
5302  * @param[out] sample_rss
5303  *   Pointer to the RSS action in sample action list.
5304  * @param[out] count
5305  *   Pointer to the COUNT action in sample action list.
5306  * @param[out] fdb_mirror_limit
5307  *   Pointer to the FDB mirror limitation flag.
5308  * @param[out] error
5309  *   Pointer to error structure.
5310  *
5311  * @return
5312  *   0 on success, a negative errno value otherwise and rte_errno is set.
5313  */
5314 static int
5315 flow_dv_validate_action_sample(uint64_t *action_flags,
5316                                const struct rte_flow_action *action,
5317                                struct rte_eth_dev *dev,
5318                                const struct rte_flow_attr *attr,
5319                                uint64_t item_flags,
5320                                const struct rte_flow_action_rss *rss,
5321                                const struct rte_flow_action_rss **sample_rss,
5322                                const struct rte_flow_action_count **count,
5323                                int *fdb_mirror_limit,
5324                                struct rte_flow_error *error)
5325 {
5326         struct mlx5_priv *priv = dev->data->dev_private;
5327         struct mlx5_dev_config *dev_conf = &priv->config;
5328         const struct rte_flow_action_sample *sample = action->conf;
5329         const struct rte_flow_action *act;
5330         uint64_t sub_action_flags = 0;
5331         uint16_t queue_index = 0xFFFF;
5332         int actions_n = 0;
5333         int ret;
5334
5335         if (!sample)
5336                 return rte_flow_error_set(error, EINVAL,
5337                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5338                                           "configuration cannot be NULL");
5339         if (sample->ratio == 0)
5340                 return rte_flow_error_set(error, EINVAL,
5341                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5342                                           "ratio value starts from 1");
5343         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5344                 return rte_flow_error_set(error, ENOTSUP,
5345                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5346                                           NULL,
5347                                           "sample action not supported");
5348         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5349                 return rte_flow_error_set(error, EINVAL,
5350                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5351                                           "Multiple sample actions not "
5352                                           "supported");
5353         if (*action_flags & MLX5_FLOW_ACTION_METER)
5354                 return rte_flow_error_set(error, EINVAL,
5355                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5356                                           "wrong action order, meter should "
5357                                           "be after sample action");
5358         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5359                 return rte_flow_error_set(error, EINVAL,
5360                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5361                                           "wrong action order, jump should "
5362                                           "be after sample action");
5363         act = sample->actions;
5364         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5365                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5366                         return rte_flow_error_set(error, ENOTSUP,
5367                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5368                                                   act, "too many actions");
5369                 switch (act->type) {
5370                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5371                         ret = mlx5_flow_validate_action_queue(act,
5372                                                               sub_action_flags,
5373                                                               dev,
5374                                                               attr, error);
5375                         if (ret < 0)
5376                                 return ret;
5377                         queue_index = ((const struct rte_flow_action_queue *)
5378                                                         (act->conf))->index;
5379                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5380                         ++actions_n;
5381                         break;
5382                 case RTE_FLOW_ACTION_TYPE_RSS:
5383                         *sample_rss = act->conf;
5384                         ret = mlx5_flow_validate_action_rss(act,
5385                                                             sub_action_flags,
5386                                                             dev, attr,
5387                                                             item_flags,
5388                                                             error);
5389                         if (ret < 0)
5390                                 return ret;
5391                         if (rss && *sample_rss &&
5392                             ((*sample_rss)->level != rss->level ||
5393                             (*sample_rss)->types != rss->types))
5394                                 return rte_flow_error_set(error, ENOTSUP,
5395                                         RTE_FLOW_ERROR_TYPE_ACTION,
5396                                         NULL,
5397                                         "Can't use the different RSS types "
5398                                         "or level in the same flow");
5399                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5400                                 queue_index = (*sample_rss)->queue[0];
5401                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5402                         ++actions_n;
5403                         break;
5404                 case RTE_FLOW_ACTION_TYPE_MARK:
5405                         ret = flow_dv_validate_action_mark(dev, act,
5406                                                            sub_action_flags,
5407                                                            attr, error);
5408                         if (ret < 0)
5409                                 return ret;
5410                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5411                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5412                                                 MLX5_FLOW_ACTION_MARK_EXT;
5413                         else
5414                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5415                         ++actions_n;
5416                         break;
5417                 case RTE_FLOW_ACTION_TYPE_COUNT:
5418                         ret = flow_dv_validate_action_count
5419                                 (dev, is_shared_action_count(act),
5420                                  *action_flags | sub_action_flags,
5421                                  error);
5422                         if (ret < 0)
5423                                 return ret;
5424                         *count = act->conf;
5425                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5426                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5427                         ++actions_n;
5428                         break;
5429                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5430                         ret = flow_dv_validate_action_port_id(dev,
5431                                                               sub_action_flags,
5432                                                               act,
5433                                                               attr,
5434                                                               error);
5435                         if (ret)
5436                                 return ret;
5437                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5438                         ++actions_n;
5439                         break;
5440                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5441                         ret = flow_dv_validate_action_raw_encap_decap
5442                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5443                                  &actions_n, action, item_flags, error);
5444                         if (ret < 0)
5445                                 return ret;
5446                         ++actions_n;
5447                         break;
5448                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5449                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5450                         ret = flow_dv_validate_action_l2_encap(dev,
5451                                                                sub_action_flags,
5452                                                                act, attr,
5453                                                                error);
5454                         if (ret < 0)
5455                                 return ret;
5456                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5457                         ++actions_n;
5458                         break;
5459                 default:
5460                         return rte_flow_error_set(error, ENOTSUP,
5461                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5462                                                   NULL,
5463                                                   "Doesn't support optional "
5464                                                   "action");
5465                 }
5466         }
5467         if (attr->ingress && !attr->transfer) {
5468                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5469                                           MLX5_FLOW_ACTION_RSS)))
5470                         return rte_flow_error_set(error, EINVAL,
5471                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5472                                                   NULL,
5473                                                   "Ingress must has a dest "
5474                                                   "QUEUE for Sample");
5475         } else if (attr->egress && !attr->transfer) {
5476                 return rte_flow_error_set(error, ENOTSUP,
5477                                           RTE_FLOW_ERROR_TYPE_ACTION,
5478                                           NULL,
5479                                           "Sample Only support Ingress "
5480                                           "or E-Switch");
5481         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5482                 MLX5_ASSERT(attr->transfer);
5483                 if (sample->ratio > 1)
5484                         return rte_flow_error_set(error, ENOTSUP,
5485                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5486                                                   NULL,
5487                                                   "E-Switch doesn't support "
5488                                                   "any optional action "
5489                                                   "for sampling");
5490                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5491                         return rte_flow_error_set(error, ENOTSUP,
5492                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5493                                                   NULL,
5494                                                   "unsupported action QUEUE");
5495                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5496                         return rte_flow_error_set(error, ENOTSUP,
5497                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5498                                                   NULL,
5499                                                   "unsupported action QUEUE");
5500                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5501                         return rte_flow_error_set(error, EINVAL,
5502                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5503                                                   NULL,
5504                                                   "E-Switch must has a dest "
5505                                                   "port for mirroring");
5506                 if (!priv->config.hca_attr.reg_c_preserve &&
5507                      priv->representor_id != -1)
5508                         *fdb_mirror_limit = 1;
5509         }
5510         /* Continue validation for Xcap actions.*/
5511         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5512             (queue_index == 0xFFFF ||
5513              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5514                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5515                      MLX5_FLOW_XCAP_ACTIONS)
5516                         return rte_flow_error_set(error, ENOTSUP,
5517                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5518                                                   NULL, "encap and decap "
5519                                                   "combination aren't "
5520                                                   "supported");
5521                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5522                                                         MLX5_FLOW_ACTION_ENCAP))
5523                         return rte_flow_error_set(error, ENOTSUP,
5524                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5525                                                   NULL, "encap is not supported"
5526                                                   " for ingress traffic");
5527         }
5528         return 0;
5529 }
5530
5531 /**
5532  * Find existing modify-header resource or create and register a new one.
5533  *
5534  * @param dev[in, out]
5535  *   Pointer to rte_eth_dev structure.
5536  * @param[in, out] resource
5537  *   Pointer to modify-header resource.
5538  * @parm[in, out] dev_flow
5539  *   Pointer to the dev_flow.
5540  * @param[out] error
5541  *   pointer to error structure.
5542  *
5543  * @return
5544  *   0 on success otherwise -errno and errno is set.
5545  */
5546 static int
5547 flow_dv_modify_hdr_resource_register
5548                         (struct rte_eth_dev *dev,
5549                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5550                          struct mlx5_flow *dev_flow,
5551                          struct rte_flow_error *error)
5552 {
5553         struct mlx5_priv *priv = dev->data->dev_private;
5554         struct mlx5_dev_ctx_shared *sh = priv->sh;
5555         uint32_t key_len = sizeof(*resource) -
5556                            offsetof(typeof(*resource), ft_type) +
5557                            resource->actions_num * sizeof(resource->actions[0]);
5558         struct mlx5_hlist_entry *entry;
5559         struct mlx5_flow_cb_ctx ctx = {
5560                 .error = error,
5561                 .data = resource,
5562         };
5563         uint64_t key64;
5564
5565         resource->flags = dev_flow->dv.group ? 0 :
5566                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5567         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5568                                     resource->flags))
5569                 return rte_flow_error_set(error, EOVERFLOW,
5570                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5571                                           "too many modify header items");
5572         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5573         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5574         if (!entry)
5575                 return -rte_errno;
5576         resource = container_of(entry, typeof(*resource), entry);
5577         dev_flow->handle->dvh.modify_hdr = resource;
5578         return 0;
5579 }
5580
5581 /**
5582  * Get DV flow counter by index.
5583  *
5584  * @param[in] dev
5585  *   Pointer to the Ethernet device structure.
5586  * @param[in] idx
5587  *   mlx5 flow counter index in the container.
5588  * @param[out] ppool
5589  *   mlx5 flow counter pool in the container.
5590  *
5591  * @return
5592  *   Pointer to the counter, NULL otherwise.
5593  */
5594 static struct mlx5_flow_counter *
5595 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5596                            uint32_t idx,
5597                            struct mlx5_flow_counter_pool **ppool)
5598 {
5599         struct mlx5_priv *priv = dev->data->dev_private;
5600         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5601         struct mlx5_flow_counter_pool *pool;
5602
5603         /* Decrease to original index and clear shared bit. */
5604         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5605         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5606         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5607         MLX5_ASSERT(pool);
5608         if (ppool)
5609                 *ppool = pool;
5610         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5611 }
5612
5613 /**
5614  * Check the devx counter belongs to the pool.
5615  *
5616  * @param[in] pool
5617  *   Pointer to the counter pool.
5618  * @param[in] id
5619  *   The counter devx ID.
5620  *
5621  * @return
5622  *   True if counter belongs to the pool, false otherwise.
5623  */
5624 static bool
5625 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5626 {
5627         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5628                    MLX5_COUNTERS_PER_POOL;
5629
5630         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5631                 return true;
5632         return false;
5633 }
5634
5635 /**
5636  * Get a pool by devx counter ID.
5637  *
5638  * @param[in] cmng
5639  *   Pointer to the counter management.
5640  * @param[in] id
5641  *   The counter devx ID.
5642  *
5643  * @return
5644  *   The counter pool pointer if exists, NULL otherwise,
5645  */
5646 static struct mlx5_flow_counter_pool *
5647 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5648 {
5649         uint32_t i;
5650         struct mlx5_flow_counter_pool *pool = NULL;
5651
5652         rte_spinlock_lock(&cmng->pool_update_sl);
5653         /* Check last used pool. */
5654         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5655             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5656                 pool = cmng->pools[cmng->last_pool_idx];
5657                 goto out;
5658         }
5659         /* ID out of range means no suitable pool in the container. */
5660         if (id > cmng->max_id || id < cmng->min_id)
5661                 goto out;
5662         /*
5663          * Find the pool from the end of the container, since mostly counter
5664          * ID is sequence increasing, and the last pool should be the needed
5665          * one.
5666          */
5667         i = cmng->n_valid;
5668         while (i--) {
5669                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5670
5671                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5672                         pool = pool_tmp;
5673                         break;
5674                 }
5675         }
5676 out:
5677         rte_spinlock_unlock(&cmng->pool_update_sl);
5678         return pool;
5679 }
5680
5681 /**
5682  * Resize a counter container.
5683  *
5684  * @param[in] dev
5685  *   Pointer to the Ethernet device structure.
5686  *
5687  * @return
5688  *   0 on success, otherwise negative errno value and rte_errno is set.
5689  */
5690 static int
5691 flow_dv_container_resize(struct rte_eth_dev *dev)
5692 {
5693         struct mlx5_priv *priv = dev->data->dev_private;
5694         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5695         void *old_pools = cmng->pools;
5696         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5697         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5698         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5699
5700         if (!pools) {
5701                 rte_errno = ENOMEM;
5702                 return -ENOMEM;
5703         }
5704         if (old_pools)
5705                 memcpy(pools, old_pools, cmng->n *
5706                                        sizeof(struct mlx5_flow_counter_pool *));
5707         cmng->n = resize;
5708         cmng->pools = pools;
5709         if (old_pools)
5710                 mlx5_free(old_pools);
5711         return 0;
5712 }
5713
5714 /**
5715  * Query a devx flow counter.
5716  *
5717  * @param[in] dev
5718  *   Pointer to the Ethernet device structure.
5719  * @param[in] counter
5720  *   Index to the flow counter.
5721  * @param[out] pkts
5722  *   The statistics value of packets.
5723  * @param[out] bytes
5724  *   The statistics value of bytes.
5725  *
5726  * @return
5727  *   0 on success, otherwise a negative errno value and rte_errno is set.
5728  */
5729 static inline int
5730 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5731                      uint64_t *bytes)
5732 {
5733         struct mlx5_priv *priv = dev->data->dev_private;
5734         struct mlx5_flow_counter_pool *pool = NULL;
5735         struct mlx5_flow_counter *cnt;
5736         int offset;
5737
5738         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5739         MLX5_ASSERT(pool);
5740         if (priv->sh->cmng.counter_fallback)
5741                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5742                                         0, pkts, bytes, 0, NULL, NULL, 0);
5743         rte_spinlock_lock(&pool->sl);
5744         if (!pool->raw) {
5745                 *pkts = 0;
5746                 *bytes = 0;
5747         } else {
5748                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5749                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5750                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5751         }
5752         rte_spinlock_unlock(&pool->sl);
5753         return 0;
5754 }
5755
5756 /**
5757  * Create and initialize a new counter pool.
5758  *
5759  * @param[in] dev
5760  *   Pointer to the Ethernet device structure.
5761  * @param[out] dcs
5762  *   The devX counter handle.
5763  * @param[in] age
5764  *   Whether the pool is for counter that was allocated for aging.
5765  * @param[in/out] cont_cur
5766  *   Pointer to the container pointer, it will be update in pool resize.
5767  *
5768  * @return
5769  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5770  */
5771 static struct mlx5_flow_counter_pool *
5772 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5773                     uint32_t age)
5774 {
5775         struct mlx5_priv *priv = dev->data->dev_private;
5776         struct mlx5_flow_counter_pool *pool;
5777         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5778         bool fallback = priv->sh->cmng.counter_fallback;
5779         uint32_t size = sizeof(*pool);
5780
5781         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5782         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5783         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5784         if (!pool) {
5785                 rte_errno = ENOMEM;
5786                 return NULL;
5787         }
5788         pool->raw = NULL;
5789         pool->is_aged = !!age;
5790         pool->query_gen = 0;
5791         pool->min_dcs = dcs;
5792         rte_spinlock_init(&pool->sl);
5793         rte_spinlock_init(&pool->csl);
5794         TAILQ_INIT(&pool->counters[0]);
5795         TAILQ_INIT(&pool->counters[1]);
5796         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5797         rte_spinlock_lock(&cmng->pool_update_sl);
5798         pool->index = cmng->n_valid;
5799         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5800                 mlx5_free(pool);
5801                 rte_spinlock_unlock(&cmng->pool_update_sl);
5802                 return NULL;
5803         }
5804         cmng->pools[pool->index] = pool;
5805         cmng->n_valid++;
5806         if (unlikely(fallback)) {
5807                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5808
5809                 if (base < cmng->min_id)
5810                         cmng->min_id = base;
5811                 if (base > cmng->max_id)
5812                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5813                 cmng->last_pool_idx = pool->index;
5814         }
5815         rte_spinlock_unlock(&cmng->pool_update_sl);
5816         return pool;
5817 }
5818
5819 /**
5820  * Prepare a new counter and/or a new counter pool.
5821  *
5822  * @param[in] dev
5823  *   Pointer to the Ethernet device structure.
5824  * @param[out] cnt_free
5825  *   Where to put the pointer of a new counter.
5826  * @param[in] age
5827  *   Whether the pool is for counter that was allocated for aging.
5828  *
5829  * @return
5830  *   The counter pool pointer and @p cnt_free is set on success,
5831  *   NULL otherwise and rte_errno is set.
5832  */
5833 static struct mlx5_flow_counter_pool *
5834 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5835                              struct mlx5_flow_counter **cnt_free,
5836                              uint32_t age)
5837 {
5838         struct mlx5_priv *priv = dev->data->dev_private;
5839         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5840         struct mlx5_flow_counter_pool *pool;
5841         struct mlx5_counters tmp_tq;
5842         struct mlx5_devx_obj *dcs = NULL;
5843         struct mlx5_flow_counter *cnt;
5844         enum mlx5_counter_type cnt_type =
5845                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5846         bool fallback = priv->sh->cmng.counter_fallback;
5847         uint32_t i;
5848
5849         if (fallback) {
5850                 /* bulk_bitmap must be 0 for single counter allocation. */
5851                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5852                 if (!dcs)
5853                         return NULL;
5854                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5855                 if (!pool) {
5856                         pool = flow_dv_pool_create(dev, dcs, age);
5857                         if (!pool) {
5858                                 mlx5_devx_cmd_destroy(dcs);
5859                                 return NULL;
5860                         }
5861                 }
5862                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5863                 cnt = MLX5_POOL_GET_CNT(pool, i);
5864                 cnt->pool = pool;
5865                 cnt->dcs_when_free = dcs;
5866                 *cnt_free = cnt;
5867                 return pool;
5868         }
5869         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5870         if (!dcs) {
5871                 rte_errno = ENODATA;
5872                 return NULL;
5873         }
5874         pool = flow_dv_pool_create(dev, dcs, age);
5875         if (!pool) {
5876                 mlx5_devx_cmd_destroy(dcs);
5877                 return NULL;
5878         }
5879         TAILQ_INIT(&tmp_tq);
5880         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5881                 cnt = MLX5_POOL_GET_CNT(pool, i);
5882                 cnt->pool = pool;
5883                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5884         }
5885         rte_spinlock_lock(&cmng->csl[cnt_type]);
5886         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5887         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5888         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5889         (*cnt_free)->pool = pool;
5890         return pool;
5891 }
5892
5893 /**
5894  * Allocate a flow counter.
5895  *
5896  * @param[in] dev
5897  *   Pointer to the Ethernet device structure.
5898  * @param[in] age
5899  *   Whether the counter was allocated for aging.
5900  *
5901  * @return
5902  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5903  */
5904 static uint32_t
5905 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5906 {
5907         struct mlx5_priv *priv = dev->data->dev_private;
5908         struct mlx5_flow_counter_pool *pool = NULL;
5909         struct mlx5_flow_counter *cnt_free = NULL;
5910         bool fallback = priv->sh->cmng.counter_fallback;
5911         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5912         enum mlx5_counter_type cnt_type =
5913                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5914         uint32_t cnt_idx;
5915
5916         if (!priv->config.devx) {
5917                 rte_errno = ENOTSUP;
5918                 return 0;
5919         }
5920         /* Get free counters from container. */
5921         rte_spinlock_lock(&cmng->csl[cnt_type]);
5922         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5923         if (cnt_free)
5924                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5925         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5926         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5927                 goto err;
5928         pool = cnt_free->pool;
5929         if (fallback)
5930                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5931         /* Create a DV counter action only in the first time usage. */
5932         if (!cnt_free->action) {
5933                 uint16_t offset;
5934                 struct mlx5_devx_obj *dcs;
5935                 int ret;
5936
5937                 if (!fallback) {
5938                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5939                         dcs = pool->min_dcs;
5940                 } else {
5941                         offset = 0;
5942                         dcs = cnt_free->dcs_when_free;
5943                 }
5944                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5945                                                             &cnt_free->action);
5946                 if (ret) {
5947                         rte_errno = errno;
5948                         goto err;
5949                 }
5950         }
5951         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5952                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5953         /* Update the counter reset values. */
5954         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5955                                  &cnt_free->bytes))
5956                 goto err;
5957         if (!fallback && !priv->sh->cmng.query_thread_on)
5958                 /* Start the asynchronous batch query by the host thread. */
5959                 mlx5_set_query_alarm(priv->sh);
5960         /*
5961          * When the count action isn't shared (by ID), shared_info field is
5962          * used for indirect action API's refcnt.
5963          * When the counter action is not shared neither by ID nor by indirect
5964          * action API, shared info must be 1.
5965          */
5966         cnt_free->shared_info.refcnt = 1;
5967         return cnt_idx;
5968 err:
5969         if (cnt_free) {
5970                 cnt_free->pool = pool;
5971                 if (fallback)
5972                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5973                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5974                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5975                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5976         }
5977         return 0;
5978 }
5979
5980 /**
5981  * Allocate a shared flow counter.
5982  *
5983  * @param[in] ctx
5984  *   Pointer to the shared counter configuration.
5985  * @param[in] data
5986  *   Pointer to save the allocated counter index.
5987  *
5988  * @return
5989  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5990  */
5991
5992 static int32_t
5993 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5994 {
5995         struct mlx5_shared_counter_conf *conf = ctx;
5996         struct rte_eth_dev *dev = conf->dev;
5997         struct mlx5_flow_counter *cnt;
5998
5999         data->dword = flow_dv_counter_alloc(dev, 0);
6000         data->dword |= MLX5_CNT_SHARED_OFFSET;
6001         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6002         cnt->shared_info.id = conf->id;
6003         return 0;
6004 }
6005
6006 /**
6007  * Get a shared flow counter.
6008  *
6009  * @param[in] dev
6010  *   Pointer to the Ethernet device structure.
6011  * @param[in] id
6012  *   Counter identifier.
6013  *
6014  * @return
6015  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6016  */
6017 static uint32_t
6018 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6019 {
6020         struct mlx5_priv *priv = dev->data->dev_private;
6021         struct mlx5_shared_counter_conf conf = {
6022                 .dev = dev,
6023                 .id = id,
6024         };
6025         union mlx5_l3t_data data = {
6026                 .dword = 0,
6027         };
6028
6029         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6030                                flow_dv_counter_alloc_shared_cb, &conf);
6031         return data.dword;
6032 }
6033
6034 /**
6035  * Get age param from counter index.
6036  *
6037  * @param[in] dev
6038  *   Pointer to the Ethernet device structure.
6039  * @param[in] counter
6040  *   Index to the counter handler.
6041  *
6042  * @return
6043  *   The aging parameter specified for the counter index.
6044  */
6045 static struct mlx5_age_param*
6046 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6047                                 uint32_t counter)
6048 {
6049         struct mlx5_flow_counter *cnt;
6050         struct mlx5_flow_counter_pool *pool = NULL;
6051
6052         flow_dv_counter_get_by_idx(dev, counter, &pool);
6053         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6054         cnt = MLX5_POOL_GET_CNT(pool, counter);
6055         return MLX5_CNT_TO_AGE(cnt);
6056 }
6057
6058 /**
6059  * Remove a flow counter from aged counter list.
6060  *
6061  * @param[in] dev
6062  *   Pointer to the Ethernet device structure.
6063  * @param[in] counter
6064  *   Index to the counter handler.
6065  * @param[in] cnt
6066  *   Pointer to the counter handler.
6067  */
6068 static void
6069 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6070                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6071 {
6072         struct mlx5_age_info *age_info;
6073         struct mlx5_age_param *age_param;
6074         struct mlx5_priv *priv = dev->data->dev_private;
6075         uint16_t expected = AGE_CANDIDATE;
6076
6077         age_info = GET_PORT_AGE_INFO(priv);
6078         age_param = flow_dv_counter_idx_get_age(dev, counter);
6079         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6080                                          AGE_FREE, false, __ATOMIC_RELAXED,
6081                                          __ATOMIC_RELAXED)) {
6082                 /**
6083                  * We need the lock even it is age timeout,
6084                  * since counter may still in process.
6085                  */
6086                 rte_spinlock_lock(&age_info->aged_sl);
6087                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6088                 rte_spinlock_unlock(&age_info->aged_sl);
6089                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6090         }
6091 }
6092
6093 /**
6094  * Release a flow counter.
6095  *
6096  * @param[in] dev
6097  *   Pointer to the Ethernet device structure.
6098  * @param[in] counter
6099  *   Index to the counter handler.
6100  */
6101 static void
6102 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6103 {
6104         struct mlx5_priv *priv = dev->data->dev_private;
6105         struct mlx5_flow_counter_pool *pool = NULL;
6106         struct mlx5_flow_counter *cnt;
6107         enum mlx5_counter_type cnt_type;
6108
6109         if (!counter)
6110                 return;
6111         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6112         MLX5_ASSERT(pool);
6113         if (pool->is_aged) {
6114                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6115         } else {
6116                 /*
6117                  * If the counter action is shared by ID, the l3t_clear_entry
6118                  * function reduces its references counter. If after the
6119                  * reduction the action is still referenced, the function
6120                  * returns here and does not release it.
6121                  */
6122                 if (IS_LEGACY_SHARED_CNT(counter) &&
6123                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6124                                          cnt->shared_info.id))
6125                         return;
6126                 /*
6127                  * If the counter action is shared by indirect action API,
6128                  * the atomic function reduces its references counter.
6129                  * If after the reduction the action is still referenced, the
6130                  * function returns here and does not release it.
6131                  * When the counter action is not shared neither by ID nor by
6132                  * indirect action API, shared info is 1 before the reduction,
6133                  * so this condition is failed and function doesn't return here.
6134                  */
6135                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6136                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6137                                        __ATOMIC_RELAXED))
6138                         return;
6139         }
6140         cnt->pool = pool;
6141         /*
6142          * Put the counter back to list to be updated in none fallback mode.
6143          * Currently, we are using two list alternately, while one is in query,
6144          * add the freed counter to the other list based on the pool query_gen
6145          * value. After query finishes, add counter the list to the global
6146          * container counter list. The list changes while query starts. In
6147          * this case, lock will not be needed as query callback and release
6148          * function both operate with the different list.
6149          */
6150         if (!priv->sh->cmng.counter_fallback) {
6151                 rte_spinlock_lock(&pool->csl);
6152                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6153                 rte_spinlock_unlock(&pool->csl);
6154         } else {
6155                 cnt->dcs_when_free = cnt->dcs_when_active;
6156                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6157                                            MLX5_COUNTER_TYPE_ORIGIN;
6158                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6159                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6160                                   cnt, next);
6161                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6162         }
6163 }
6164
6165 /**
6166  * Resize a meter id container.
6167  *
6168  * @param[in] dev
6169  *   Pointer to the Ethernet device structure.
6170  *
6171  * @return
6172  *   0 on success, otherwise negative errno value and rte_errno is set.
6173  */
6174 static int
6175 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6176 {
6177         struct mlx5_priv *priv = dev->data->dev_private;
6178         struct mlx5_aso_mtr_pools_mng *pools_mng =
6179                                 &priv->sh->mtrmng->pools_mng;
6180         void *old_pools = pools_mng->pools;
6181         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6182         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6183         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6184
6185         if (!pools) {
6186                 rte_errno = ENOMEM;
6187                 return -ENOMEM;
6188         }
6189         if (!pools_mng->n)
6190                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6191                         mlx5_free(pools);
6192                         return -ENOMEM;
6193                 }
6194         if (old_pools)
6195                 memcpy(pools, old_pools, pools_mng->n *
6196                                        sizeof(struct mlx5_aso_mtr_pool *));
6197         pools_mng->n = resize;
6198         pools_mng->pools = pools;
6199         if (old_pools)
6200                 mlx5_free(old_pools);
6201         return 0;
6202 }
6203
6204 /**
6205  * Prepare a new meter and/or a new meter pool.
6206  *
6207  * @param[in] dev
6208  *   Pointer to the Ethernet device structure.
6209  * @param[out] mtr_free
6210  *   Where to put the pointer of a new meter.g.
6211  *
6212  * @return
6213  *   The meter pool pointer and @mtr_free is set on success,
6214  *   NULL otherwise and rte_errno is set.
6215  */
6216 static struct mlx5_aso_mtr_pool *
6217 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6218                              struct mlx5_aso_mtr **mtr_free)
6219 {
6220         struct mlx5_priv *priv = dev->data->dev_private;
6221         struct mlx5_aso_mtr_pools_mng *pools_mng =
6222                                 &priv->sh->mtrmng->pools_mng;
6223         struct mlx5_aso_mtr_pool *pool = NULL;
6224         struct mlx5_devx_obj *dcs = NULL;
6225         uint32_t i;
6226         uint32_t log_obj_size;
6227
6228         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6229         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6230                         priv->sh->pdn, log_obj_size);
6231         if (!dcs) {
6232                 rte_errno = ENODATA;
6233                 return NULL;
6234         }
6235         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6236         if (!pool) {
6237                 rte_errno = ENOMEM;
6238                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6239                 return NULL;
6240         }
6241         pool->devx_obj = dcs;
6242         pool->index = pools_mng->n_valid;
6243         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6244                 mlx5_free(pool);
6245                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6246                 return NULL;
6247         }
6248         pools_mng->pools[pool->index] = pool;
6249         pools_mng->n_valid++;
6250         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6251                 pool->mtrs[i].offset = i;
6252                 LIST_INSERT_HEAD(&pools_mng->meters,
6253                                                 &pool->mtrs[i], next);
6254         }
6255         pool->mtrs[0].offset = 0;
6256         *mtr_free = &pool->mtrs[0];
6257         return pool;
6258 }
6259
6260 /**
6261  * Release a flow meter into pool.
6262  *
6263  * @param[in] dev
6264  *   Pointer to the Ethernet device structure.
6265  * @param[in] mtr_idx
6266  *   Index to aso flow meter.
6267  */
6268 static void
6269 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6270 {
6271         struct mlx5_priv *priv = dev->data->dev_private;
6272         struct mlx5_aso_mtr_pools_mng *pools_mng =
6273                                 &priv->sh->mtrmng->pools_mng;
6274         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6275
6276         MLX5_ASSERT(aso_mtr);
6277         rte_spinlock_lock(&pools_mng->mtrsl);
6278         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6279         aso_mtr->state = ASO_METER_FREE;
6280         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6281         rte_spinlock_unlock(&pools_mng->mtrsl);
6282 }
6283
6284 /**
6285  * Allocate a aso flow meter.
6286  *
6287  * @param[in] dev
6288  *   Pointer to the Ethernet device structure.
6289  *
6290  * @return
6291  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6292  */
6293 static uint32_t
6294 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6295 {
6296         struct mlx5_priv *priv = dev->data->dev_private;
6297         struct mlx5_aso_mtr *mtr_free = NULL;
6298         struct mlx5_aso_mtr_pools_mng *pools_mng =
6299                                 &priv->sh->mtrmng->pools_mng;
6300         struct mlx5_aso_mtr_pool *pool;
6301         uint32_t mtr_idx = 0;
6302
6303         if (!priv->config.devx) {
6304                 rte_errno = ENOTSUP;
6305                 return 0;
6306         }
6307         /* Allocate the flow meter memory. */
6308         /* Get free meters from management. */
6309         rte_spinlock_lock(&pools_mng->mtrsl);
6310         mtr_free = LIST_FIRST(&pools_mng->meters);
6311         if (mtr_free)
6312                 LIST_REMOVE(mtr_free, next);
6313         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6314                 rte_spinlock_unlock(&pools_mng->mtrsl);
6315                 return 0;
6316         }
6317         mtr_free->state = ASO_METER_WAIT;
6318         rte_spinlock_unlock(&pools_mng->mtrsl);
6319         pool = container_of(mtr_free,
6320                         struct mlx5_aso_mtr_pool,
6321                         mtrs[mtr_free->offset]);
6322         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6323         if (!mtr_free->fm.meter_action) {
6324 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6325                 struct rte_flow_error error;
6326                 uint8_t reg_id;
6327
6328                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6329                 mtr_free->fm.meter_action =
6330                         mlx5_glue->dv_create_flow_action_aso
6331                                                 (priv->sh->rx_domain,
6332                                                  pool->devx_obj->obj,
6333                                                  mtr_free->offset,
6334                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6335                                                  reg_id - REG_C_0);
6336 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6337                 if (!mtr_free->fm.meter_action) {
6338                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6339                         return 0;
6340                 }
6341         }
6342         return mtr_idx;
6343 }
6344
6345 /**
6346  * Verify the @p attributes will be correctly understood by the NIC and store
6347  * them in the @p flow if everything is correct.
6348  *
6349  * @param[in] dev
6350  *   Pointer to dev struct.
6351  * @param[in] attributes
6352  *   Pointer to flow attributes
6353  * @param[in] external
6354  *   This flow rule is created by request external to PMD.
6355  * @param[out] error
6356  *   Pointer to error structure.
6357  *
6358  * @return
6359  *   - 0 on success and non root table.
6360  *   - 1 on success and root table.
6361  *   - a negative errno value otherwise and rte_errno is set.
6362  */
6363 static int
6364 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6365                             const struct mlx5_flow_tunnel *tunnel,
6366                             const struct rte_flow_attr *attributes,
6367                             const struct flow_grp_info *grp_info,
6368                             struct rte_flow_error *error)
6369 {
6370         struct mlx5_priv *priv = dev->data->dev_private;
6371         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6372         int ret = 0;
6373
6374 #ifndef HAVE_MLX5DV_DR
6375         RTE_SET_USED(tunnel);
6376         RTE_SET_USED(grp_info);
6377         if (attributes->group)
6378                 return rte_flow_error_set(error, ENOTSUP,
6379                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6380                                           NULL,
6381                                           "groups are not supported");
6382 #else
6383         uint32_t table = 0;
6384
6385         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6386                                        grp_info, error);
6387         if (ret)
6388                 return ret;
6389         if (!table)
6390                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6391 #endif
6392         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6393             attributes->priority > lowest_priority)
6394                 return rte_flow_error_set(error, ENOTSUP,
6395                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6396                                           NULL,
6397                                           "priority out of range");
6398         if (attributes->transfer) {
6399                 if (!priv->config.dv_esw_en)
6400                         return rte_flow_error_set
6401                                 (error, ENOTSUP,
6402                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6403                                  "E-Switch dr is not supported");
6404                 if (!(priv->representor || priv->master))
6405                         return rte_flow_error_set
6406                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6407                                  NULL, "E-Switch configuration can only be"
6408                                  " done by a master or a representor device");
6409                 if (attributes->egress)
6410                         return rte_flow_error_set
6411                                 (error, ENOTSUP,
6412                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6413                                  "egress is not supported");
6414         }
6415         if (!(attributes->egress ^ attributes->ingress))
6416                 return rte_flow_error_set(error, ENOTSUP,
6417                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6418                                           "must specify exactly one of "
6419                                           "ingress or egress");
6420         return ret;
6421 }
6422
6423 static uint16_t
6424 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6425                           const struct rte_flow_item *end)
6426 {
6427         const struct rte_flow_item *item = *head;
6428         uint16_t l3_protocol;
6429
6430         for (; item != end; item++) {
6431                 switch (item->type) {
6432                 default:
6433                         break;
6434                 case RTE_FLOW_ITEM_TYPE_IPV4:
6435                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6436                         goto l3_ok;
6437                 case RTE_FLOW_ITEM_TYPE_IPV6:
6438                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6439                         goto l3_ok;
6440                 case RTE_FLOW_ITEM_TYPE_ETH:
6441                         if (item->mask && item->spec) {
6442                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6443                                                             type, item,
6444                                                             l3_protocol);
6445                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6446                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6447                                         goto l3_ok;
6448                         }
6449                         break;
6450                 case RTE_FLOW_ITEM_TYPE_VLAN:
6451                         if (item->mask && item->spec) {
6452                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6453                                                             inner_type, item,
6454                                                             l3_protocol);
6455                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6456                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6457                                         goto l3_ok;
6458                         }
6459                         break;
6460                 }
6461         }
6462         return 0;
6463 l3_ok:
6464         *head = item;
6465         return l3_protocol;
6466 }
6467
6468 static uint8_t
6469 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6470                           const struct rte_flow_item *end)
6471 {
6472         const struct rte_flow_item *item = *head;
6473         uint8_t l4_protocol;
6474
6475         for (; item != end; item++) {
6476                 switch (item->type) {
6477                 default:
6478                         break;
6479                 case RTE_FLOW_ITEM_TYPE_TCP:
6480                         l4_protocol = IPPROTO_TCP;
6481                         goto l4_ok;
6482                 case RTE_FLOW_ITEM_TYPE_UDP:
6483                         l4_protocol = IPPROTO_UDP;
6484                         goto l4_ok;
6485                 case RTE_FLOW_ITEM_TYPE_IPV4:
6486                         if (item->mask && item->spec) {
6487                                 const struct rte_flow_item_ipv4 *mask, *spec;
6488
6489                                 mask = (typeof(mask))item->mask;
6490                                 spec = (typeof(spec))item->spec;
6491                                 l4_protocol = mask->hdr.next_proto_id &
6492                                               spec->hdr.next_proto_id;
6493                                 if (l4_protocol == IPPROTO_TCP ||
6494                                     l4_protocol == IPPROTO_UDP)
6495                                         goto l4_ok;
6496                         }
6497                         break;
6498                 case RTE_FLOW_ITEM_TYPE_IPV6:
6499                         if (item->mask && item->spec) {
6500                                 const struct rte_flow_item_ipv6 *mask, *spec;
6501                                 mask = (typeof(mask))item->mask;
6502                                 spec = (typeof(spec))item->spec;
6503                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6504                                 if (l4_protocol == IPPROTO_TCP ||
6505                                     l4_protocol == IPPROTO_UDP)
6506                                         goto l4_ok;
6507                         }
6508                         break;
6509                 }
6510         }
6511         return 0;
6512 l4_ok:
6513         *head = item;
6514         return l4_protocol;
6515 }
6516
6517 static int
6518 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6519                                 const struct rte_flow_item *rule_items,
6520                                 const struct rte_flow_item *integrity_item,
6521                                 struct rte_flow_error *error)
6522 {
6523         struct mlx5_priv *priv = dev->data->dev_private;
6524         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6525         const struct rte_flow_item_integrity *mask = (typeof(mask))
6526                                                      integrity_item->mask;
6527         const struct rte_flow_item_integrity *spec = (typeof(spec))
6528                                                      integrity_item->spec;
6529         uint32_t protocol;
6530
6531         if (!priv->config.hca_attr.pkt_integrity_match)
6532                 return rte_flow_error_set(error, ENOTSUP,
6533                                           RTE_FLOW_ERROR_TYPE_ITEM,
6534                                           integrity_item,
6535                                           "packet integrity integrity_item not supported");
6536         if (!mask)
6537                 mask = &rte_flow_item_integrity_mask;
6538         if (!mlx5_validate_integrity_item(mask))
6539                 return rte_flow_error_set(error, ENOTSUP,
6540                                           RTE_FLOW_ERROR_TYPE_ITEM,
6541                                           integrity_item,
6542                                           "unsupported integrity filter");
6543         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6544         if (spec->level > 1) {
6545                 if (!tunnel_item)
6546                         return rte_flow_error_set(error, ENOTSUP,
6547                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6548                                                   integrity_item,
6549                                                   "missing tunnel item");
6550                 item = tunnel_item;
6551                 end_item = mlx5_find_end_item(tunnel_item);
6552         } else {
6553                 end_item = tunnel_item ? tunnel_item :
6554                            mlx5_find_end_item(integrity_item);
6555         }
6556         if (mask->l3_ok || mask->ipv4_csum_ok) {
6557                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6558                 if (!protocol)
6559                         return rte_flow_error_set(error, EINVAL,
6560                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6561                                                   integrity_item,
6562                                                   "missing L3 protocol");
6563         }
6564         if (mask->l4_ok || mask->l4_csum_ok) {
6565                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6566                 if (!protocol)
6567                         return rte_flow_error_set(error, EINVAL,
6568                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6569                                                   integrity_item,
6570                                                   "missing L4 protocol");
6571         }
6572         return 0;
6573 }
6574
6575 /**
6576  * Internal validation function. For validating both actions and items.
6577  *
6578  * @param[in] dev
6579  *   Pointer to the rte_eth_dev structure.
6580  * @param[in] attr
6581  *   Pointer to the flow attributes.
6582  * @param[in] items
6583  *   Pointer to the list of items.
6584  * @param[in] actions
6585  *   Pointer to the list of actions.
6586  * @param[in] external
6587  *   This flow rule is created by request external to PMD.
6588  * @param[in] hairpin
6589  *   Number of hairpin TX actions, 0 means classic flow.
6590  * @param[out] error
6591  *   Pointer to the error structure.
6592  *
6593  * @return
6594  *   0 on success, a negative errno value otherwise and rte_errno is set.
6595  */
6596 static int
6597 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6598                  const struct rte_flow_item items[],
6599                  const struct rte_flow_action actions[],
6600                  bool external, int hairpin, struct rte_flow_error *error)
6601 {
6602         int ret;
6603         uint64_t action_flags = 0;
6604         uint64_t item_flags = 0;
6605         uint64_t last_item = 0;
6606         uint8_t next_protocol = 0xff;
6607         uint16_t ether_type = 0;
6608         int actions_n = 0;
6609         uint8_t item_ipv6_proto = 0;
6610         int fdb_mirror_limit = 0;
6611         int modify_after_mirror = 0;
6612         const struct rte_flow_item *geneve_item = NULL;
6613         const struct rte_flow_item *gre_item = NULL;
6614         const struct rte_flow_item *gtp_item = NULL;
6615         const struct rte_flow_action_raw_decap *decap;
6616         const struct rte_flow_action_raw_encap *encap;
6617         const struct rte_flow_action_rss *rss = NULL;
6618         const struct rte_flow_action_rss *sample_rss = NULL;
6619         const struct rte_flow_action_count *sample_count = NULL;
6620         const struct rte_flow_item_tcp nic_tcp_mask = {
6621                 .hdr = {
6622                         .tcp_flags = 0xFF,
6623                         .src_port = RTE_BE16(UINT16_MAX),
6624                         .dst_port = RTE_BE16(UINT16_MAX),
6625                 }
6626         };
6627         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6628                 .hdr = {
6629                         .src_addr =
6630                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6631                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6632                         .dst_addr =
6633                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6634                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6635                         .vtc_flow = RTE_BE32(0xffffffff),
6636                         .proto = 0xff,
6637                         .hop_limits = 0xff,
6638                 },
6639                 .has_frag_ext = 1,
6640         };
6641         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6642                 .hdr = {
6643                         .common = {
6644                                 .u32 =
6645                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6646                                         .type = 0xFF,
6647                                         }).u32),
6648                         },
6649                         .dummy[0] = 0xffffffff,
6650                 },
6651         };
6652         struct mlx5_priv *priv = dev->data->dev_private;
6653         struct mlx5_dev_config *dev_conf = &priv->config;
6654         uint16_t queue_index = 0xFFFF;
6655         const struct rte_flow_item_vlan *vlan_m = NULL;
6656         uint32_t rw_act_num = 0;
6657         uint64_t is_root;
6658         const struct mlx5_flow_tunnel *tunnel;
6659         enum mlx5_tof_rule_type tof_rule_type;
6660         struct flow_grp_info grp_info = {
6661                 .external = !!external,
6662                 .transfer = !!attr->transfer,
6663                 .fdb_def_rule = !!priv->fdb_def_rule,
6664                 .std_tbl_fix = true,
6665         };
6666         const struct rte_eth_hairpin_conf *conf;
6667         const struct rte_flow_item *rule_items = items;
6668         bool def_policy = false;
6669
6670         if (items == NULL)
6671                 return -1;
6672         tunnel = is_tunnel_offload_active(dev) ?
6673                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6674         if (tunnel) {
6675                 if (priv->representor)
6676                         return rte_flow_error_set
6677                                 (error, ENOTSUP,
6678                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6679                                  NULL, "decap not supported for VF representor");
6680                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6681                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6682                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6683                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6684                                         MLX5_FLOW_ACTION_DECAP;
6685                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6686                                         (dev, attr, tunnel, tof_rule_type);
6687         }
6688         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6689         if (ret < 0)
6690                 return ret;
6691         is_root = (uint64_t)ret;
6692         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6693                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6694                 int type = items->type;
6695
6696                 if (!mlx5_flow_os_item_supported(type))
6697                         return rte_flow_error_set(error, ENOTSUP,
6698                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6699                                                   NULL, "item not supported");
6700                 switch (type) {
6701                 case RTE_FLOW_ITEM_TYPE_VOID:
6702                         break;
6703                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6704                         ret = flow_dv_validate_item_port_id
6705                                         (dev, items, attr, item_flags, error);
6706                         if (ret < 0)
6707                                 return ret;
6708                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6709                         break;
6710                 case RTE_FLOW_ITEM_TYPE_ETH:
6711                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6712                                                           true, error);
6713                         if (ret < 0)
6714                                 return ret;
6715                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6716                                              MLX5_FLOW_LAYER_OUTER_L2;
6717                         if (items->mask != NULL && items->spec != NULL) {
6718                                 ether_type =
6719                                         ((const struct rte_flow_item_eth *)
6720                                          items->spec)->type;
6721                                 ether_type &=
6722                                         ((const struct rte_flow_item_eth *)
6723                                          items->mask)->type;
6724                                 ether_type = rte_be_to_cpu_16(ether_type);
6725                         } else {
6726                                 ether_type = 0;
6727                         }
6728                         break;
6729                 case RTE_FLOW_ITEM_TYPE_VLAN:
6730                         ret = flow_dv_validate_item_vlan(items, item_flags,
6731                                                          dev, error);
6732                         if (ret < 0)
6733                                 return ret;
6734                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6735                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6736                         if (items->mask != NULL && items->spec != NULL) {
6737                                 ether_type =
6738                                         ((const struct rte_flow_item_vlan *)
6739                                          items->spec)->inner_type;
6740                                 ether_type &=
6741                                         ((const struct rte_flow_item_vlan *)
6742                                          items->mask)->inner_type;
6743                                 ether_type = rte_be_to_cpu_16(ether_type);
6744                         } else {
6745                                 ether_type = 0;
6746                         }
6747                         /* Store outer VLAN mask for of_push_vlan action. */
6748                         if (!tunnel)
6749                                 vlan_m = items->mask;
6750                         break;
6751                 case RTE_FLOW_ITEM_TYPE_IPV4:
6752                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6753                                                   &item_flags, &tunnel);
6754                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6755                                                          last_item, ether_type,
6756                                                          error);
6757                         if (ret < 0)
6758                                 return ret;
6759                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6760                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6761                         if (items->mask != NULL &&
6762                             ((const struct rte_flow_item_ipv4 *)
6763                              items->mask)->hdr.next_proto_id) {
6764                                 next_protocol =
6765                                         ((const struct rte_flow_item_ipv4 *)
6766                                          (items->spec))->hdr.next_proto_id;
6767                                 next_protocol &=
6768                                         ((const struct rte_flow_item_ipv4 *)
6769                                          (items->mask))->hdr.next_proto_id;
6770                         } else {
6771                                 /* Reset for inner layer. */
6772                                 next_protocol = 0xff;
6773                         }
6774                         break;
6775                 case RTE_FLOW_ITEM_TYPE_IPV6:
6776                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6777                                                   &item_flags, &tunnel);
6778                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6779                                                            last_item,
6780                                                            ether_type,
6781                                                            &nic_ipv6_mask,
6782                                                            error);
6783                         if (ret < 0)
6784                                 return ret;
6785                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6786                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6787                         if (items->mask != NULL &&
6788                             ((const struct rte_flow_item_ipv6 *)
6789                              items->mask)->hdr.proto) {
6790                                 item_ipv6_proto =
6791                                         ((const struct rte_flow_item_ipv6 *)
6792                                          items->spec)->hdr.proto;
6793                                 next_protocol =
6794                                         ((const struct rte_flow_item_ipv6 *)
6795                                          items->spec)->hdr.proto;
6796                                 next_protocol &=
6797                                         ((const struct rte_flow_item_ipv6 *)
6798                                          items->mask)->hdr.proto;
6799                         } else {
6800                                 /* Reset for inner layer. */
6801                                 next_protocol = 0xff;
6802                         }
6803                         break;
6804                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6805                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6806                                                                   item_flags,
6807                                                                   error);
6808                         if (ret < 0)
6809                                 return ret;
6810                         last_item = tunnel ?
6811                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6812                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6813                         if (items->mask != NULL &&
6814                             ((const struct rte_flow_item_ipv6_frag_ext *)
6815                              items->mask)->hdr.next_header) {
6816                                 next_protocol =
6817                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6818                                  items->spec)->hdr.next_header;
6819                                 next_protocol &=
6820                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6821                                  items->mask)->hdr.next_header;
6822                         } else {
6823                                 /* Reset for inner layer. */
6824                                 next_protocol = 0xff;
6825                         }
6826                         break;
6827                 case RTE_FLOW_ITEM_TYPE_TCP:
6828                         ret = mlx5_flow_validate_item_tcp
6829                                                 (items, item_flags,
6830                                                  next_protocol,
6831                                                  &nic_tcp_mask,
6832                                                  error);
6833                         if (ret < 0)
6834                                 return ret;
6835                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6836                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6837                         break;
6838                 case RTE_FLOW_ITEM_TYPE_UDP:
6839                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6840                                                           next_protocol,
6841                                                           error);
6842                         if (ret < 0)
6843                                 return ret;
6844                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6845                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6846                         break;
6847                 case RTE_FLOW_ITEM_TYPE_GRE:
6848                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6849                                                           next_protocol, error);
6850                         if (ret < 0)
6851                                 return ret;
6852                         gre_item = items;
6853                         last_item = MLX5_FLOW_LAYER_GRE;
6854                         break;
6855                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6856                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6857                                                             next_protocol,
6858                                                             error);
6859                         if (ret < 0)
6860                                 return ret;
6861                         last_item = MLX5_FLOW_LAYER_NVGRE;
6862                         break;
6863                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6864                         ret = mlx5_flow_validate_item_gre_key
6865                                 (items, item_flags, gre_item, error);
6866                         if (ret < 0)
6867                                 return ret;
6868                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6869                         break;
6870                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6871                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6872                                                             error);
6873                         if (ret < 0)
6874                                 return ret;
6875                         last_item = MLX5_FLOW_LAYER_VXLAN;
6876                         break;
6877                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6878                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6879                                                                 item_flags, dev,
6880                                                                 error);
6881                         if (ret < 0)
6882                                 return ret;
6883                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6884                         break;
6885                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6886                         ret = mlx5_flow_validate_item_geneve(items,
6887                                                              item_flags, dev,
6888                                                              error);
6889                         if (ret < 0)
6890                                 return ret;
6891                         geneve_item = items;
6892                         last_item = MLX5_FLOW_LAYER_GENEVE;
6893                         break;
6894                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6895                         ret = mlx5_flow_validate_item_geneve_opt(items,
6896                                                                  last_item,
6897                                                                  geneve_item,
6898                                                                  dev,
6899                                                                  error);
6900                         if (ret < 0)
6901                                 return ret;
6902                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6903                         break;
6904                 case RTE_FLOW_ITEM_TYPE_MPLS:
6905                         ret = mlx5_flow_validate_item_mpls(dev, items,
6906                                                            item_flags,
6907                                                            last_item, error);
6908                         if (ret < 0)
6909                                 return ret;
6910                         last_item = MLX5_FLOW_LAYER_MPLS;
6911                         break;
6912
6913                 case RTE_FLOW_ITEM_TYPE_MARK:
6914                         ret = flow_dv_validate_item_mark(dev, items, attr,
6915                                                          error);
6916                         if (ret < 0)
6917                                 return ret;
6918                         last_item = MLX5_FLOW_ITEM_MARK;
6919                         break;
6920                 case RTE_FLOW_ITEM_TYPE_META:
6921                         ret = flow_dv_validate_item_meta(dev, items, attr,
6922                                                          error);
6923                         if (ret < 0)
6924                                 return ret;
6925                         last_item = MLX5_FLOW_ITEM_METADATA;
6926                         break;
6927                 case RTE_FLOW_ITEM_TYPE_ICMP:
6928                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6929                                                            next_protocol,
6930                                                            error);
6931                         if (ret < 0)
6932                                 return ret;
6933                         last_item = MLX5_FLOW_LAYER_ICMP;
6934                         break;
6935                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6936                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6937                                                             next_protocol,
6938                                                             error);
6939                         if (ret < 0)
6940                                 return ret;
6941                         item_ipv6_proto = IPPROTO_ICMPV6;
6942                         last_item = MLX5_FLOW_LAYER_ICMP6;
6943                         break;
6944                 case RTE_FLOW_ITEM_TYPE_TAG:
6945                         ret = flow_dv_validate_item_tag(dev, items,
6946                                                         attr, error);
6947                         if (ret < 0)
6948                                 return ret;
6949                         last_item = MLX5_FLOW_ITEM_TAG;
6950                         break;
6951                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6952                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6953                         break;
6954                 case RTE_FLOW_ITEM_TYPE_GTP:
6955                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6956                                                         error);
6957                         if (ret < 0)
6958                                 return ret;
6959                         gtp_item = items;
6960                         last_item = MLX5_FLOW_LAYER_GTP;
6961                         break;
6962                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6963                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6964                                                             gtp_item, attr,
6965                                                             error);
6966                         if (ret < 0)
6967                                 return ret;
6968                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6969                         break;
6970                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6971                         /* Capacity will be checked in the translate stage. */
6972                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6973                                                             last_item,
6974                                                             ether_type,
6975                                                             &nic_ecpri_mask,
6976                                                             error);
6977                         if (ret < 0)
6978                                 return ret;
6979                         last_item = MLX5_FLOW_LAYER_ECPRI;
6980                         break;
6981                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
6982                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
6983                                 return rte_flow_error_set
6984                                         (error, ENOTSUP,
6985                                          RTE_FLOW_ERROR_TYPE_ITEM,
6986                                          NULL, "multiple integrity items not supported");
6987                         ret = flow_dv_validate_item_integrity(dev, rule_items,
6988                                                               items, error);
6989                         if (ret < 0)
6990                                 return ret;
6991                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
6992                         break;
6993                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
6994                         ret = flow_dv_validate_item_aso_ct(dev, items,
6995                                                            &item_flags, error);
6996                         if (ret < 0)
6997                                 return ret;
6998                         break;
6999                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7000                         /* tunnel offload item was processed before
7001                          * list it here as a supported type
7002                          */
7003                         break;
7004                 default:
7005                         return rte_flow_error_set(error, ENOTSUP,
7006                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7007                                                   NULL, "item not supported");
7008                 }
7009                 item_flags |= last_item;
7010         }
7011         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7012                 int type = actions->type;
7013                 bool shared_count = false;
7014
7015                 if (!mlx5_flow_os_action_supported(type))
7016                         return rte_flow_error_set(error, ENOTSUP,
7017                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7018                                                   actions,
7019                                                   "action not supported");
7020                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7021                         return rte_flow_error_set(error, ENOTSUP,
7022                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7023                                                   actions, "too many actions");
7024                 if (action_flags &
7025                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7026                         return rte_flow_error_set(error, ENOTSUP,
7027                                 RTE_FLOW_ERROR_TYPE_ACTION,
7028                                 NULL, "meter action with policy "
7029                                 "must be the last action");
7030                 switch (type) {
7031                 case RTE_FLOW_ACTION_TYPE_VOID:
7032                         break;
7033                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7034                         ret = flow_dv_validate_action_port_id(dev,
7035                                                               action_flags,
7036                                                               actions,
7037                                                               attr,
7038                                                               error);
7039                         if (ret)
7040                                 return ret;
7041                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7042                         ++actions_n;
7043                         break;
7044                 case RTE_FLOW_ACTION_TYPE_FLAG:
7045                         ret = flow_dv_validate_action_flag(dev, action_flags,
7046                                                            attr, error);
7047                         if (ret < 0)
7048                                 return ret;
7049                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7050                                 /* Count all modify-header actions as one. */
7051                                 if (!(action_flags &
7052                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7053                                         ++actions_n;
7054                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7055                                                 MLX5_FLOW_ACTION_MARK_EXT;
7056                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7057                                         modify_after_mirror = 1;
7058
7059                         } else {
7060                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7061                                 ++actions_n;
7062                         }
7063                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7064                         break;
7065                 case RTE_FLOW_ACTION_TYPE_MARK:
7066                         ret = flow_dv_validate_action_mark(dev, actions,
7067                                                            action_flags,
7068                                                            attr, error);
7069                         if (ret < 0)
7070                                 return ret;
7071                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7072                                 /* Count all modify-header actions as one. */
7073                                 if (!(action_flags &
7074                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7075                                         ++actions_n;
7076                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7077                                                 MLX5_FLOW_ACTION_MARK_EXT;
7078                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7079                                         modify_after_mirror = 1;
7080                         } else {
7081                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7082                                 ++actions_n;
7083                         }
7084                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7085                         break;
7086                 case RTE_FLOW_ACTION_TYPE_SET_META:
7087                         ret = flow_dv_validate_action_set_meta(dev, actions,
7088                                                                action_flags,
7089                                                                attr, error);
7090                         if (ret < 0)
7091                                 return ret;
7092                         /* Count all modify-header actions as one action. */
7093                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7094                                 ++actions_n;
7095                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7096                                 modify_after_mirror = 1;
7097                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7098                         rw_act_num += MLX5_ACT_NUM_SET_META;
7099                         break;
7100                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7101                         ret = flow_dv_validate_action_set_tag(dev, actions,
7102                                                               action_flags,
7103                                                               attr, error);
7104                         if (ret < 0)
7105                                 return ret;
7106                         /* Count all modify-header actions as one action. */
7107                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7108                                 ++actions_n;
7109                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7110                                 modify_after_mirror = 1;
7111                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7112                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7113                         break;
7114                 case RTE_FLOW_ACTION_TYPE_DROP:
7115                         ret = mlx5_flow_validate_action_drop(action_flags,
7116                                                              attr, error);
7117                         if (ret < 0)
7118                                 return ret;
7119                         action_flags |= MLX5_FLOW_ACTION_DROP;
7120                         ++actions_n;
7121                         break;
7122                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7123                         ret = mlx5_flow_validate_action_queue(actions,
7124                                                               action_flags, dev,
7125                                                               attr, error);
7126                         if (ret < 0)
7127                                 return ret;
7128                         queue_index = ((const struct rte_flow_action_queue *)
7129                                                         (actions->conf))->index;
7130                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7131                         ++actions_n;
7132                         break;
7133                 case RTE_FLOW_ACTION_TYPE_RSS:
7134                         rss = actions->conf;
7135                         ret = mlx5_flow_validate_action_rss(actions,
7136                                                             action_flags, dev,
7137                                                             attr, item_flags,
7138                                                             error);
7139                         if (ret < 0)
7140                                 return ret;
7141                         if (rss && sample_rss &&
7142                             (sample_rss->level != rss->level ||
7143                             sample_rss->types != rss->types))
7144                                 return rte_flow_error_set(error, ENOTSUP,
7145                                         RTE_FLOW_ERROR_TYPE_ACTION,
7146                                         NULL,
7147                                         "Can't use the different RSS types "
7148                                         "or level in the same flow");
7149                         if (rss != NULL && rss->queue_num)
7150                                 queue_index = rss->queue[0];
7151                         action_flags |= MLX5_FLOW_ACTION_RSS;
7152                         ++actions_n;
7153                         break;
7154                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7155                         ret =
7156                         mlx5_flow_validate_action_default_miss(action_flags,
7157                                         attr, error);
7158                         if (ret < 0)
7159                                 return ret;
7160                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7161                         ++actions_n;
7162                         break;
7163                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7164                 case RTE_FLOW_ACTION_TYPE_COUNT:
7165                         shared_count = is_shared_action_count(actions);
7166                         ret = flow_dv_validate_action_count(dev, shared_count,
7167                                                             action_flags,
7168                                                             error);
7169                         if (ret < 0)
7170                                 return ret;
7171                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7172                         ++actions_n;
7173                         break;
7174                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7175                         if (flow_dv_validate_action_pop_vlan(dev,
7176                                                              action_flags,
7177                                                              actions,
7178                                                              item_flags, attr,
7179                                                              error))
7180                                 return -rte_errno;
7181                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7182                                 modify_after_mirror = 1;
7183                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7184                         ++actions_n;
7185                         break;
7186                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7187                         ret = flow_dv_validate_action_push_vlan(dev,
7188                                                                 action_flags,
7189                                                                 vlan_m,
7190                                                                 actions, attr,
7191                                                                 error);
7192                         if (ret < 0)
7193                                 return ret;
7194                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7195                                 modify_after_mirror = 1;
7196                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7197                         ++actions_n;
7198                         break;
7199                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7200                         ret = flow_dv_validate_action_set_vlan_pcp
7201                                                 (action_flags, actions, error);
7202                         if (ret < 0)
7203                                 return ret;
7204                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7205                                 modify_after_mirror = 1;
7206                         /* Count PCP with push_vlan command. */
7207                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7208                         break;
7209                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7210                         ret = flow_dv_validate_action_set_vlan_vid
7211                                                 (item_flags, action_flags,
7212                                                  actions, error);
7213                         if (ret < 0)
7214                                 return ret;
7215                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7216                                 modify_after_mirror = 1;
7217                         /* Count VID with push_vlan command. */
7218                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7219                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7220                         break;
7221                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7222                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7223                         ret = flow_dv_validate_action_l2_encap(dev,
7224                                                                action_flags,
7225                                                                actions, attr,
7226                                                                error);
7227                         if (ret < 0)
7228                                 return ret;
7229                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7230                         ++actions_n;
7231                         break;
7232                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7233                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7234                         ret = flow_dv_validate_action_decap(dev, action_flags,
7235                                                             actions, item_flags,
7236                                                             attr, error);
7237                         if (ret < 0)
7238                                 return ret;
7239                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7240                                 modify_after_mirror = 1;
7241                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7242                         ++actions_n;
7243                         break;
7244                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7245                         ret = flow_dv_validate_action_raw_encap_decap
7246                                 (dev, NULL, actions->conf, attr, &action_flags,
7247                                  &actions_n, actions, item_flags, error);
7248                         if (ret < 0)
7249                                 return ret;
7250                         break;
7251                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7252                         decap = actions->conf;
7253                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7254                                 ;
7255                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7256                                 encap = NULL;
7257                                 actions--;
7258                         } else {
7259                                 encap = actions->conf;
7260                         }
7261                         ret = flow_dv_validate_action_raw_encap_decap
7262                                            (dev,
7263                                             decap ? decap : &empty_decap, encap,
7264                                             attr, &action_flags, &actions_n,
7265                                             actions, item_flags, error);
7266                         if (ret < 0)
7267                                 return ret;
7268                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7269                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7270                                 modify_after_mirror = 1;
7271                         break;
7272                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7273                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7274                         ret = flow_dv_validate_action_modify_mac(action_flags,
7275                                                                  actions,
7276                                                                  item_flags,
7277                                                                  error);
7278                         if (ret < 0)
7279                                 return ret;
7280                         /* Count all modify-header actions as one action. */
7281                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7282                                 ++actions_n;
7283                         action_flags |= actions->type ==
7284                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7285                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7286                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7287                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7288                                 modify_after_mirror = 1;
7289                         /*
7290                          * Even if the source and destination MAC addresses have
7291                          * overlap in the header with 4B alignment, the convert
7292                          * function will handle them separately and 4 SW actions
7293                          * will be created. And 2 actions will be added each
7294                          * time no matter how many bytes of address will be set.
7295                          */
7296                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7297                         break;
7298                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7299                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7300                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7301                                                                   actions,
7302                                                                   item_flags,
7303                                                                   error);
7304                         if (ret < 0)
7305                                 return ret;
7306                         /* Count all modify-header actions as one action. */
7307                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7308                                 ++actions_n;
7309                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7310                                 modify_after_mirror = 1;
7311                         action_flags |= actions->type ==
7312                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7313                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7314                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7315                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7316                         break;
7317                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7318                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7319                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7320                                                                   actions,
7321                                                                   item_flags,
7322                                                                   error);
7323                         if (ret < 0)
7324                                 return ret;
7325                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7326                                 return rte_flow_error_set(error, ENOTSUP,
7327                                         RTE_FLOW_ERROR_TYPE_ACTION,
7328                                         actions,
7329                                         "Can't change header "
7330                                         "with ICMPv6 proto");
7331                         /* Count all modify-header actions as one action. */
7332                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7333                                 ++actions_n;
7334                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7335                                 modify_after_mirror = 1;
7336                         action_flags |= actions->type ==
7337                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7338                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7339                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7340                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7341                         break;
7342                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7343                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7344                         ret = flow_dv_validate_action_modify_tp(action_flags,
7345                                                                 actions,
7346                                                                 item_flags,
7347                                                                 error);
7348                         if (ret < 0)
7349                                 return ret;
7350                         /* Count all modify-header actions as one action. */
7351                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7352                                 ++actions_n;
7353                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7354                                 modify_after_mirror = 1;
7355                         action_flags |= actions->type ==
7356                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7357                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7358                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7359                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7360                         break;
7361                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7362                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7363                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7364                                                                  actions,
7365                                                                  item_flags,
7366                                                                  error);
7367                         if (ret < 0)
7368                                 return ret;
7369                         /* Count all modify-header actions as one action. */
7370                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7371                                 ++actions_n;
7372                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7373                                 modify_after_mirror = 1;
7374                         action_flags |= actions->type ==
7375                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7376                                                 MLX5_FLOW_ACTION_SET_TTL :
7377                                                 MLX5_FLOW_ACTION_DEC_TTL;
7378                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7379                         break;
7380                 case RTE_FLOW_ACTION_TYPE_JUMP:
7381                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7382                                                            action_flags,
7383                                                            attr, external,
7384                                                            error);
7385                         if (ret)
7386                                 return ret;
7387                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7388                             fdb_mirror_limit)
7389                                 return rte_flow_error_set(error, EINVAL,
7390                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7391                                                   NULL,
7392                                                   "sample and jump action combination is not supported");
7393                         ++actions_n;
7394                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7395                         break;
7396                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7397                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7398                         ret = flow_dv_validate_action_modify_tcp_seq
7399                                                                 (action_flags,
7400                                                                  actions,
7401                                                                  item_flags,
7402                                                                  error);
7403                         if (ret < 0)
7404                                 return ret;
7405                         /* Count all modify-header actions as one action. */
7406                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7407                                 ++actions_n;
7408                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7409                                 modify_after_mirror = 1;
7410                         action_flags |= actions->type ==
7411                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7412                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7413                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7414                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7415                         break;
7416                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7417                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7418                         ret = flow_dv_validate_action_modify_tcp_ack
7419                                                                 (action_flags,
7420                                                                  actions,
7421                                                                  item_flags,
7422                                                                  error);
7423                         if (ret < 0)
7424                                 return ret;
7425                         /* Count all modify-header actions as one action. */
7426                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7427                                 ++actions_n;
7428                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7429                                 modify_after_mirror = 1;
7430                         action_flags |= actions->type ==
7431                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7432                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7433                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7434                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7435                         break;
7436                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7437                         break;
7438                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7439                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7440                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7441                         break;
7442                 case RTE_FLOW_ACTION_TYPE_METER:
7443                         ret = mlx5_flow_validate_action_meter(dev,
7444                                                               action_flags,
7445                                                               actions, attr,
7446                                                               &def_policy,
7447                                                               error);
7448                         if (ret < 0)
7449                                 return ret;
7450                         action_flags |= MLX5_FLOW_ACTION_METER;
7451                         if (!def_policy)
7452                                 action_flags |=
7453                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7454                         ++actions_n;
7455                         /* Meter action will add one more TAG action. */
7456                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7457                         break;
7458                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7459                         if (!attr->transfer && !attr->group)
7460                                 return rte_flow_error_set(error, ENOTSUP,
7461                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7462                                                                            NULL,
7463                           "Shared ASO age action is not supported for group 0");
7464                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7465                                 return rte_flow_error_set
7466                                                   (error, EINVAL,
7467                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7468                                                    NULL,
7469                                                    "duplicate age actions set");
7470                         action_flags |= MLX5_FLOW_ACTION_AGE;
7471                         ++actions_n;
7472                         break;
7473                 case RTE_FLOW_ACTION_TYPE_AGE:
7474                         ret = flow_dv_validate_action_age(action_flags,
7475                                                           actions, dev,
7476                                                           error);
7477                         if (ret < 0)
7478                                 return ret;
7479                         /*
7480                          * Validate the regular AGE action (using counter)
7481                          * mutual exclusion with share counter actions.
7482                          */
7483                         if (!priv->sh->flow_hit_aso_en) {
7484                                 if (shared_count)
7485                                         return rte_flow_error_set
7486                                                 (error, EINVAL,
7487                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7488                                                 NULL,
7489                                                 "old age and shared count combination is not supported");
7490                                 if (sample_count)
7491                                         return rte_flow_error_set
7492                                                 (error, EINVAL,
7493                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7494                                                 NULL,
7495                                                 "old age action and count must be in the same sub flow");
7496                         }
7497                         action_flags |= MLX5_FLOW_ACTION_AGE;
7498                         ++actions_n;
7499                         break;
7500                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7501                         ret = flow_dv_validate_action_modify_ipv4_dscp
7502                                                          (action_flags,
7503                                                           actions,
7504                                                           item_flags,
7505                                                           error);
7506                         if (ret < 0)
7507                                 return ret;
7508                         /* Count all modify-header actions as one action. */
7509                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7510                                 ++actions_n;
7511                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7512                                 modify_after_mirror = 1;
7513                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7514                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7515                         break;
7516                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7517                         ret = flow_dv_validate_action_modify_ipv6_dscp
7518                                                                 (action_flags,
7519                                                                  actions,
7520                                                                  item_flags,
7521                                                                  error);
7522                         if (ret < 0)
7523                                 return ret;
7524                         /* Count all modify-header actions as one action. */
7525                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7526                                 ++actions_n;
7527                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7528                                 modify_after_mirror = 1;
7529                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7530                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7531                         break;
7532                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7533                         ret = flow_dv_validate_action_sample(&action_flags,
7534                                                              actions, dev,
7535                                                              attr, item_flags,
7536                                                              rss, &sample_rss,
7537                                                              &sample_count,
7538                                                              &fdb_mirror_limit,
7539                                                              error);
7540                         if (ret < 0)
7541                                 return ret;
7542                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7543                         ++actions_n;
7544                         break;
7545                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7546                         ret = flow_dv_validate_action_modify_field(dev,
7547                                                                    action_flags,
7548                                                                    actions,
7549                                                                    attr,
7550                                                                    error);
7551                         if (ret < 0)
7552                                 return ret;
7553                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7554                                 modify_after_mirror = 1;
7555                         /* Count all modify-header actions as one action. */
7556                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
7557                                 ++actions_n;
7558                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7559                         rw_act_num += ret;
7560                         break;
7561                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7562                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7563                                                              item_flags, attr,
7564                                                              error);
7565                         if (ret < 0)
7566                                 return ret;
7567                         action_flags |= MLX5_FLOW_ACTION_CT;
7568                         break;
7569                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7570                         /* tunnel offload action was processed before
7571                          * list it here as a supported type
7572                          */
7573                         break;
7574                 default:
7575                         return rte_flow_error_set(error, ENOTSUP,
7576                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7577                                                   actions,
7578                                                   "action not supported");
7579                 }
7580         }
7581         /*
7582          * Validate actions in flow rules
7583          * - Explicit decap action is prohibited by the tunnel offload API.
7584          * - Drop action in tunnel steer rule is prohibited by the API.
7585          * - Application cannot use MARK action because it's value can mask
7586          *   tunnel default miss nitification.
7587          * - JUMP in tunnel match rule has no support in current PMD
7588          *   implementation.
7589          * - TAG & META are reserved for future uses.
7590          */
7591         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7592                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7593                                             MLX5_FLOW_ACTION_MARK     |
7594                                             MLX5_FLOW_ACTION_SET_TAG  |
7595                                             MLX5_FLOW_ACTION_SET_META |
7596                                             MLX5_FLOW_ACTION_DROP;
7597
7598                 if (action_flags & bad_actions_mask)
7599                         return rte_flow_error_set
7600                                         (error, EINVAL,
7601                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7602                                         "Invalid RTE action in tunnel "
7603                                         "set decap rule");
7604                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7605                         return rte_flow_error_set
7606                                         (error, EINVAL,
7607                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7608                                         "tunnel set decap rule must terminate "
7609                                         "with JUMP");
7610                 if (!attr->ingress)
7611                         return rte_flow_error_set
7612                                         (error, EINVAL,
7613                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7614                                         "tunnel flows for ingress traffic only");
7615         }
7616         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7617                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7618                                             MLX5_FLOW_ACTION_MARK    |
7619                                             MLX5_FLOW_ACTION_SET_TAG |
7620                                             MLX5_FLOW_ACTION_SET_META;
7621
7622                 if (action_flags & bad_actions_mask)
7623                         return rte_flow_error_set
7624                                         (error, EINVAL,
7625                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7626                                         "Invalid RTE action in tunnel "
7627                                         "set match rule");
7628         }
7629         /*
7630          * Validate the drop action mutual exclusion with other actions.
7631          * Drop action is mutually-exclusive with any other action, except for
7632          * Count action.
7633          * Drop action compatibility with tunnel offload was already validated.
7634          */
7635         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7636                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7637         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7638             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7639                 return rte_flow_error_set(error, EINVAL,
7640                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7641                                           "Drop action is mutually-exclusive "
7642                                           "with any other action, except for "
7643                                           "Count action");
7644         /* Eswitch has few restrictions on using items and actions */
7645         if (attr->transfer) {
7646                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7647                     action_flags & MLX5_FLOW_ACTION_FLAG)
7648                         return rte_flow_error_set(error, ENOTSUP,
7649                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7650                                                   NULL,
7651                                                   "unsupported action FLAG");
7652                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7653                     action_flags & MLX5_FLOW_ACTION_MARK)
7654                         return rte_flow_error_set(error, ENOTSUP,
7655                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7656                                                   NULL,
7657                                                   "unsupported action MARK");
7658                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7659                         return rte_flow_error_set(error, ENOTSUP,
7660                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7661                                                   NULL,
7662                                                   "unsupported action QUEUE");
7663                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7664                         return rte_flow_error_set(error, ENOTSUP,
7665                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7666                                                   NULL,
7667                                                   "unsupported action RSS");
7668                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7669                         return rte_flow_error_set(error, EINVAL,
7670                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7671                                                   actions,
7672                                                   "no fate action is found");
7673         } else {
7674                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7675                         return rte_flow_error_set(error, EINVAL,
7676                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7677                                                   actions,
7678                                                   "no fate action is found");
7679         }
7680         /*
7681          * Continue validation for Xcap and VLAN actions.
7682          * If hairpin is working in explicit TX rule mode, there is no actions
7683          * splitting and the validation of hairpin ingress flow should be the
7684          * same as other standard flows.
7685          */
7686         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7687                              MLX5_FLOW_VLAN_ACTIONS)) &&
7688             (queue_index == 0xFFFF ||
7689              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7690              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7691              conf->tx_explicit != 0))) {
7692                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7693                     MLX5_FLOW_XCAP_ACTIONS)
7694                         return rte_flow_error_set(error, ENOTSUP,
7695                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7696                                                   NULL, "encap and decap "
7697                                                   "combination aren't supported");
7698                 if (!attr->transfer && attr->ingress) {
7699                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7700                                 return rte_flow_error_set
7701                                                 (error, ENOTSUP,
7702                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7703                                                  NULL, "encap is not supported"
7704                                                  " for ingress traffic");
7705                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7706                                 return rte_flow_error_set
7707                                                 (error, ENOTSUP,
7708                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7709                                                  NULL, "push VLAN action not "
7710                                                  "supported for ingress");
7711                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7712                                         MLX5_FLOW_VLAN_ACTIONS)
7713                                 return rte_flow_error_set
7714                                                 (error, ENOTSUP,
7715                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7716                                                  NULL, "no support for "
7717                                                  "multiple VLAN actions");
7718                 }
7719         }
7720         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7721                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7722                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7723                         attr->ingress)
7724                         return rte_flow_error_set
7725                                 (error, ENOTSUP,
7726                                 RTE_FLOW_ERROR_TYPE_ACTION,
7727                                 NULL, "fate action not supported for "
7728                                 "meter with policy");
7729                 if (attr->egress) {
7730                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7731                                 return rte_flow_error_set
7732                                         (error, ENOTSUP,
7733                                         RTE_FLOW_ERROR_TYPE_ACTION,
7734                                         NULL, "modify header action in egress "
7735                                         "cannot be done before meter action");
7736                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7737                                 return rte_flow_error_set
7738                                         (error, ENOTSUP,
7739                                         RTE_FLOW_ERROR_TYPE_ACTION,
7740                                         NULL, "encap action in egress "
7741                                         "cannot be done before meter action");
7742                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7743                                 return rte_flow_error_set
7744                                         (error, ENOTSUP,
7745                                         RTE_FLOW_ERROR_TYPE_ACTION,
7746                                         NULL, "push vlan action in egress "
7747                                         "cannot be done before meter action");
7748                 }
7749         }
7750         /*
7751          * Hairpin flow will add one more TAG action in TX implicit mode.
7752          * In TX explicit mode, there will be no hairpin flow ID.
7753          */
7754         if (hairpin > 0)
7755                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7756         /* extra metadata enabled: one more TAG action will be add. */
7757         if (dev_conf->dv_flow_en &&
7758             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7759             mlx5_flow_ext_mreg_supported(dev))
7760                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7761         if (rw_act_num >
7762                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7763                 return rte_flow_error_set(error, ENOTSUP,
7764                                           RTE_FLOW_ERROR_TYPE_ACTION,
7765                                           NULL, "too many header modify"
7766                                           " actions to support");
7767         }
7768         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7769         if (fdb_mirror_limit && modify_after_mirror)
7770                 return rte_flow_error_set(error, EINVAL,
7771                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7772                                 "sample before modify action is not supported");
7773         return 0;
7774 }
7775
7776 /**
7777  * Internal preparation function. Allocates the DV flow size,
7778  * this size is constant.
7779  *
7780  * @param[in] dev
7781  *   Pointer to the rte_eth_dev structure.
7782  * @param[in] attr
7783  *   Pointer to the flow attributes.
7784  * @param[in] items
7785  *   Pointer to the list of items.
7786  * @param[in] actions
7787  *   Pointer to the list of actions.
7788  * @param[out] error
7789  *   Pointer to the error structure.
7790  *
7791  * @return
7792  *   Pointer to mlx5_flow object on success,
7793  *   otherwise NULL and rte_errno is set.
7794  */
7795 static struct mlx5_flow *
7796 flow_dv_prepare(struct rte_eth_dev *dev,
7797                 const struct rte_flow_attr *attr __rte_unused,
7798                 const struct rte_flow_item items[] __rte_unused,
7799                 const struct rte_flow_action actions[] __rte_unused,
7800                 struct rte_flow_error *error)
7801 {
7802         uint32_t handle_idx = 0;
7803         struct mlx5_flow *dev_flow;
7804         struct mlx5_flow_handle *dev_handle;
7805         struct mlx5_priv *priv = dev->data->dev_private;
7806         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7807
7808         MLX5_ASSERT(wks);
7809         wks->skip_matcher_reg = 0;
7810         /* In case of corrupting the memory. */
7811         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7812                 rte_flow_error_set(error, ENOSPC,
7813                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7814                                    "not free temporary device flow");
7815                 return NULL;
7816         }
7817         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7818                                    &handle_idx);
7819         if (!dev_handle) {
7820                 rte_flow_error_set(error, ENOMEM,
7821                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7822                                    "not enough memory to create flow handle");
7823                 return NULL;
7824         }
7825         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7826         dev_flow = &wks->flows[wks->flow_idx++];
7827         memset(dev_flow, 0, sizeof(*dev_flow));
7828         dev_flow->handle = dev_handle;
7829         dev_flow->handle_idx = handle_idx;
7830         /*
7831          * In some old rdma-core releases, before continuing, a check of the
7832          * length of matching parameter will be done at first. It needs to use
7833          * the length without misc4 param. If the flow has misc4 support, then
7834          * the length needs to be adjusted accordingly. Each param member is
7835          * aligned with a 64B boundary naturally.
7836          */
7837         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7838                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7839         dev_flow->ingress = attr->ingress;
7840         dev_flow->dv.transfer = attr->transfer;
7841         return dev_flow;
7842 }
7843
7844 #ifdef RTE_LIBRTE_MLX5_DEBUG
7845 /**
7846  * Sanity check for match mask and value. Similar to check_valid_spec() in
7847  * kernel driver. If unmasked bit is present in value, it returns failure.
7848  *
7849  * @param match_mask
7850  *   pointer to match mask buffer.
7851  * @param match_value
7852  *   pointer to match value buffer.
7853  *
7854  * @return
7855  *   0 if valid, -EINVAL otherwise.
7856  */
7857 static int
7858 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7859 {
7860         uint8_t *m = match_mask;
7861         uint8_t *v = match_value;
7862         unsigned int i;
7863
7864         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7865                 if (v[i] & ~m[i]) {
7866                         DRV_LOG(ERR,
7867                                 "match_value differs from match_criteria"
7868                                 " %p[%u] != %p[%u]",
7869                                 match_value, i, match_mask, i);
7870                         return -EINVAL;
7871                 }
7872         }
7873         return 0;
7874 }
7875 #endif
7876
7877 /**
7878  * Add match of ip_version.
7879  *
7880  * @param[in] group
7881  *   Flow group.
7882  * @param[in] headers_v
7883  *   Values header pointer.
7884  * @param[in] headers_m
7885  *   Masks header pointer.
7886  * @param[in] ip_version
7887  *   The IP version to set.
7888  */
7889 static inline void
7890 flow_dv_set_match_ip_version(uint32_t group,
7891                              void *headers_v,
7892                              void *headers_m,
7893                              uint8_t ip_version)
7894 {
7895         if (group == 0)
7896                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7897         else
7898                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7899                          ip_version);
7900         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7901         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7902         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7903 }
7904
7905 /**
7906  * Add Ethernet item to matcher and to the value.
7907  *
7908  * @param[in, out] matcher
7909  *   Flow matcher.
7910  * @param[in, out] key
7911  *   Flow matcher value.
7912  * @param[in] item
7913  *   Flow pattern to translate.
7914  * @param[in] inner
7915  *   Item is inner pattern.
7916  */
7917 static void
7918 flow_dv_translate_item_eth(void *matcher, void *key,
7919                            const struct rte_flow_item *item, int inner,
7920                            uint32_t group)
7921 {
7922         const struct rte_flow_item_eth *eth_m = item->mask;
7923         const struct rte_flow_item_eth *eth_v = item->spec;
7924         const struct rte_flow_item_eth nic_mask = {
7925                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7926                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7927                 .type = RTE_BE16(0xffff),
7928                 .has_vlan = 0,
7929         };
7930         void *hdrs_m;
7931         void *hdrs_v;
7932         char *l24_v;
7933         unsigned int i;
7934
7935         if (!eth_v)
7936                 return;
7937         if (!eth_m)
7938                 eth_m = &nic_mask;
7939         if (inner) {
7940                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7941                                          inner_headers);
7942                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7943         } else {
7944                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7945                                          outer_headers);
7946                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7947         }
7948         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7949                &eth_m->dst, sizeof(eth_m->dst));
7950         /* The value must be in the range of the mask. */
7951         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7952         for (i = 0; i < sizeof(eth_m->dst); ++i)
7953                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7954         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7955                &eth_m->src, sizeof(eth_m->src));
7956         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7957         /* The value must be in the range of the mask. */
7958         for (i = 0; i < sizeof(eth_m->dst); ++i)
7959                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7960         /*
7961          * HW supports match on one Ethertype, the Ethertype following the last
7962          * VLAN tag of the packet (see PRM).
7963          * Set match on ethertype only if ETH header is not followed by VLAN.
7964          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7965          * ethertype, and use ip_version field instead.
7966          * eCPRI over Ether layer will use type value 0xAEFE.
7967          */
7968         if (eth_m->type == 0xFFFF) {
7969                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7970                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7971                 switch (eth_v->type) {
7972                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7973                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7974                         return;
7975                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7976                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7977                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7978                         return;
7979                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7980                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7981                         return;
7982                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7983                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7984                         return;
7985                 default:
7986                         break;
7987                 }
7988         }
7989         if (eth_m->has_vlan) {
7990                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7991                 if (eth_v->has_vlan) {
7992                         /*
7993                          * Here, when also has_more_vlan field in VLAN item is
7994                          * not set, only single-tagged packets will be matched.
7995                          */
7996                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7997                         return;
7998                 }
7999         }
8000         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8001                  rte_be_to_cpu_16(eth_m->type));
8002         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8003         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8004 }
8005
8006 /**
8007  * Add VLAN item to matcher and to the value.
8008  *
8009  * @param[in, out] dev_flow
8010  *   Flow descriptor.
8011  * @param[in, out] matcher
8012  *   Flow matcher.
8013  * @param[in, out] key
8014  *   Flow matcher value.
8015  * @param[in] item
8016  *   Flow pattern to translate.
8017  * @param[in] inner
8018  *   Item is inner pattern.
8019  */
8020 static void
8021 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8022                             void *matcher, void *key,
8023                             const struct rte_flow_item *item,
8024                             int inner, uint32_t group)
8025 {
8026         const struct rte_flow_item_vlan *vlan_m = item->mask;
8027         const struct rte_flow_item_vlan *vlan_v = item->spec;
8028         void *hdrs_m;
8029         void *hdrs_v;
8030         uint16_t tci_m;
8031         uint16_t tci_v;
8032
8033         if (inner) {
8034                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8035                                          inner_headers);
8036                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8037         } else {
8038                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8039                                          outer_headers);
8040                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8041                 /*
8042                  * This is workaround, masks are not supported,
8043                  * and pre-validated.
8044                  */
8045                 if (vlan_v)
8046                         dev_flow->handle->vf_vlan.tag =
8047                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8048         }
8049         /*
8050          * When VLAN item exists in flow, mark packet as tagged,
8051          * even if TCI is not specified.
8052          */
8053         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8054                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8055                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8056         }
8057         if (!vlan_v)
8058                 return;
8059         if (!vlan_m)
8060                 vlan_m = &rte_flow_item_vlan_mask;
8061         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8062         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8063         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8064         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8065         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8066         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8067         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8068         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8069         /*
8070          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8071          * ethertype, and use ip_version field instead.
8072          */
8073         if (vlan_m->inner_type == 0xFFFF) {
8074                 switch (vlan_v->inner_type) {
8075                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8076                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8077                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8078                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8079                         return;
8080                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8081                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8082                         return;
8083                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8084                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8085                         return;
8086                 default:
8087                         break;
8088                 }
8089         }
8090         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8091                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8092                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8093                 /* Only one vlan_tag bit can be set. */
8094                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8095                 return;
8096         }
8097         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8098                  rte_be_to_cpu_16(vlan_m->inner_type));
8099         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8100                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8101 }
8102
8103 /**
8104  * Add IPV4 item to matcher and to the value.
8105  *
8106  * @param[in, out] matcher
8107  *   Flow matcher.
8108  * @param[in, out] key
8109  *   Flow matcher value.
8110  * @param[in] item
8111  *   Flow pattern to translate.
8112  * @param[in] inner
8113  *   Item is inner pattern.
8114  * @param[in] group
8115  *   The group to insert the rule.
8116  */
8117 static void
8118 flow_dv_translate_item_ipv4(void *matcher, void *key,
8119                             const struct rte_flow_item *item,
8120                             int inner, uint32_t group)
8121 {
8122         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8123         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8124         const struct rte_flow_item_ipv4 nic_mask = {
8125                 .hdr = {
8126                         .src_addr = RTE_BE32(0xffffffff),
8127                         .dst_addr = RTE_BE32(0xffffffff),
8128                         .type_of_service = 0xff,
8129                         .next_proto_id = 0xff,
8130                         .time_to_live = 0xff,
8131                 },
8132         };
8133         void *headers_m;
8134         void *headers_v;
8135         char *l24_m;
8136         char *l24_v;
8137         uint8_t tos;
8138
8139         if (inner) {
8140                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8141                                          inner_headers);
8142                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8143         } else {
8144                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8145                                          outer_headers);
8146                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8147         }
8148         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8149         if (!ipv4_v)
8150                 return;
8151         if (!ipv4_m)
8152                 ipv4_m = &nic_mask;
8153         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8154                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8155         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8156                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8157         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8158         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8159         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8160                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8161         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8162                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8163         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8164         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8165         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8166         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8167                  ipv4_m->hdr.type_of_service);
8168         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8169         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8170                  ipv4_m->hdr.type_of_service >> 2);
8171         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8172         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8173                  ipv4_m->hdr.next_proto_id);
8174         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8175                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8176         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8177                  ipv4_m->hdr.time_to_live);
8178         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8179                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8180         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8181                  !!(ipv4_m->hdr.fragment_offset));
8182         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8183                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8184 }
8185
8186 /**
8187  * Add IPV6 item to matcher and to the value.
8188  *
8189  * @param[in, out] matcher
8190  *   Flow matcher.
8191  * @param[in, out] key
8192  *   Flow matcher value.
8193  * @param[in] item
8194  *   Flow pattern to translate.
8195  * @param[in] inner
8196  *   Item is inner pattern.
8197  * @param[in] group
8198  *   The group to insert the rule.
8199  */
8200 static void
8201 flow_dv_translate_item_ipv6(void *matcher, void *key,
8202                             const struct rte_flow_item *item,
8203                             int inner, uint32_t group)
8204 {
8205         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8206         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8207         const struct rte_flow_item_ipv6 nic_mask = {
8208                 .hdr = {
8209                         .src_addr =
8210                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8211                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8212                         .dst_addr =
8213                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8214                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8215                         .vtc_flow = RTE_BE32(0xffffffff),
8216                         .proto = 0xff,
8217                         .hop_limits = 0xff,
8218                 },
8219         };
8220         void *headers_m;
8221         void *headers_v;
8222         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8223         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8224         char *l24_m;
8225         char *l24_v;
8226         uint32_t vtc_m;
8227         uint32_t vtc_v;
8228         int i;
8229         int size;
8230
8231         if (inner) {
8232                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8233                                          inner_headers);
8234                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8235         } else {
8236                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8237                                          outer_headers);
8238                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8239         }
8240         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8241         if (!ipv6_v)
8242                 return;
8243         if (!ipv6_m)
8244                 ipv6_m = &nic_mask;
8245         size = sizeof(ipv6_m->hdr.dst_addr);
8246         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8247                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8248         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8249                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8250         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8251         for (i = 0; i < size; ++i)
8252                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8253         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8254                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8255         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8256                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8257         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8258         for (i = 0; i < size; ++i)
8259                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8260         /* TOS. */
8261         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8262         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8263         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8264         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8265         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8266         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8267         /* Label. */
8268         if (inner) {
8269                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8270                          vtc_m);
8271                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8272                          vtc_v);
8273         } else {
8274                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8275                          vtc_m);
8276                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8277                          vtc_v);
8278         }
8279         /* Protocol. */
8280         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8281                  ipv6_m->hdr.proto);
8282         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8283                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8284         /* Hop limit. */
8285         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8286                  ipv6_m->hdr.hop_limits);
8287         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8288                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8289         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8290                  !!(ipv6_m->has_frag_ext));
8291         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8292                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8293 }
8294
8295 /**
8296  * Add IPV6 fragment extension item to matcher and to the value.
8297  *
8298  * @param[in, out] matcher
8299  *   Flow matcher.
8300  * @param[in, out] key
8301  *   Flow matcher value.
8302  * @param[in] item
8303  *   Flow pattern to translate.
8304  * @param[in] inner
8305  *   Item is inner pattern.
8306  */
8307 static void
8308 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8309                                      const struct rte_flow_item *item,
8310                                      int inner)
8311 {
8312         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8313         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8314         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8315                 .hdr = {
8316                         .next_header = 0xff,
8317                         .frag_data = RTE_BE16(0xffff),
8318                 },
8319         };
8320         void *headers_m;
8321         void *headers_v;
8322
8323         if (inner) {
8324                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8325                                          inner_headers);
8326                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8327         } else {
8328                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8329                                          outer_headers);
8330                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8331         }
8332         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8333         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8334         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8335         if (!ipv6_frag_ext_v)
8336                 return;
8337         if (!ipv6_frag_ext_m)
8338                 ipv6_frag_ext_m = &nic_mask;
8339         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8340                  ipv6_frag_ext_m->hdr.next_header);
8341         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8342                  ipv6_frag_ext_v->hdr.next_header &
8343                  ipv6_frag_ext_m->hdr.next_header);
8344 }
8345
8346 /**
8347  * Add TCP item to matcher and to the value.
8348  *
8349  * @param[in, out] matcher
8350  *   Flow matcher.
8351  * @param[in, out] key
8352  *   Flow matcher value.
8353  * @param[in] item
8354  *   Flow pattern to translate.
8355  * @param[in] inner
8356  *   Item is inner pattern.
8357  */
8358 static void
8359 flow_dv_translate_item_tcp(void *matcher, void *key,
8360                            const struct rte_flow_item *item,
8361                            int inner)
8362 {
8363         const struct rte_flow_item_tcp *tcp_m = item->mask;
8364         const struct rte_flow_item_tcp *tcp_v = item->spec;
8365         void *headers_m;
8366         void *headers_v;
8367
8368         if (inner) {
8369                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8370                                          inner_headers);
8371                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8372         } else {
8373                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8374                                          outer_headers);
8375                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8376         }
8377         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8378         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8379         if (!tcp_v)
8380                 return;
8381         if (!tcp_m)
8382                 tcp_m = &rte_flow_item_tcp_mask;
8383         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8384                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8385         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8386                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8387         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8388                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8389         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8390                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8391         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8392                  tcp_m->hdr.tcp_flags);
8393         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8394                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8395 }
8396
8397 /**
8398  * Add UDP item to matcher and to the value.
8399  *
8400  * @param[in, out] matcher
8401  *   Flow matcher.
8402  * @param[in, out] key
8403  *   Flow matcher value.
8404  * @param[in] item
8405  *   Flow pattern to translate.
8406  * @param[in] inner
8407  *   Item is inner pattern.
8408  */
8409 static void
8410 flow_dv_translate_item_udp(void *matcher, void *key,
8411                            const struct rte_flow_item *item,
8412                            int inner)
8413 {
8414         const struct rte_flow_item_udp *udp_m = item->mask;
8415         const struct rte_flow_item_udp *udp_v = item->spec;
8416         void *headers_m;
8417         void *headers_v;
8418
8419         if (inner) {
8420                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8421                                          inner_headers);
8422                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8423         } else {
8424                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8425                                          outer_headers);
8426                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8427         }
8428         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8429         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8430         if (!udp_v)
8431                 return;
8432         if (!udp_m)
8433                 udp_m = &rte_flow_item_udp_mask;
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8435                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8436         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8437                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8438         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8439                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8440         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8441                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8442 }
8443
8444 /**
8445  * Add GRE optional Key item to matcher and to the value.
8446  *
8447  * @param[in, out] matcher
8448  *   Flow matcher.
8449  * @param[in, out] key
8450  *   Flow matcher value.
8451  * @param[in] item
8452  *   Flow pattern to translate.
8453  * @param[in] inner
8454  *   Item is inner pattern.
8455  */
8456 static void
8457 flow_dv_translate_item_gre_key(void *matcher, void *key,
8458                                    const struct rte_flow_item *item)
8459 {
8460         const rte_be32_t *key_m = item->mask;
8461         const rte_be32_t *key_v = item->spec;
8462         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8463         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8464         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8465
8466         /* GRE K bit must be on and should already be validated */
8467         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8468         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8469         if (!key_v)
8470                 return;
8471         if (!key_m)
8472                 key_m = &gre_key_default_mask;
8473         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8474                  rte_be_to_cpu_32(*key_m) >> 8);
8475         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8476                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8477         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8478                  rte_be_to_cpu_32(*key_m) & 0xFF);
8479         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8480                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8481 }
8482
8483 /**
8484  * Add GRE item to matcher and to the value.
8485  *
8486  * @param[in, out] matcher
8487  *   Flow matcher.
8488  * @param[in, out] key
8489  *   Flow matcher value.
8490  * @param[in] item
8491  *   Flow pattern to translate.
8492  * @param[in] inner
8493  *   Item is inner pattern.
8494  */
8495 static void
8496 flow_dv_translate_item_gre(void *matcher, void *key,
8497                            const struct rte_flow_item *item,
8498                            int inner)
8499 {
8500         const struct rte_flow_item_gre *gre_m = item->mask;
8501         const struct rte_flow_item_gre *gre_v = item->spec;
8502         void *headers_m;
8503         void *headers_v;
8504         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8505         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8506         struct {
8507                 union {
8508                         __extension__
8509                         struct {
8510                                 uint16_t version:3;
8511                                 uint16_t rsvd0:9;
8512                                 uint16_t s_present:1;
8513                                 uint16_t k_present:1;
8514                                 uint16_t rsvd_bit1:1;
8515                                 uint16_t c_present:1;
8516                         };
8517                         uint16_t value;
8518                 };
8519         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8520
8521         if (inner) {
8522                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8523                                          inner_headers);
8524                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8525         } else {
8526                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8527                                          outer_headers);
8528                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8529         }
8530         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8531         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8532         if (!gre_v)
8533                 return;
8534         if (!gre_m)
8535                 gre_m = &rte_flow_item_gre_mask;
8536         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8537                  rte_be_to_cpu_16(gre_m->protocol));
8538         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8539                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8540         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8541         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8542         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8543                  gre_crks_rsvd0_ver_m.c_present);
8544         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8545                  gre_crks_rsvd0_ver_v.c_present &
8546                  gre_crks_rsvd0_ver_m.c_present);
8547         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8548                  gre_crks_rsvd0_ver_m.k_present);
8549         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8550                  gre_crks_rsvd0_ver_v.k_present &
8551                  gre_crks_rsvd0_ver_m.k_present);
8552         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8553                  gre_crks_rsvd0_ver_m.s_present);
8554         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8555                  gre_crks_rsvd0_ver_v.s_present &
8556                  gre_crks_rsvd0_ver_m.s_present);
8557 }
8558
8559 /**
8560  * Add NVGRE item to matcher and to the value.
8561  *
8562  * @param[in, out] matcher
8563  *   Flow matcher.
8564  * @param[in, out] key
8565  *   Flow matcher value.
8566  * @param[in] item
8567  *   Flow pattern to translate.
8568  * @param[in] inner
8569  *   Item is inner pattern.
8570  */
8571 static void
8572 flow_dv_translate_item_nvgre(void *matcher, void *key,
8573                              const struct rte_flow_item *item,
8574                              int inner)
8575 {
8576         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8577         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8578         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8579         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8580         const char *tni_flow_id_m;
8581         const char *tni_flow_id_v;
8582         char *gre_key_m;
8583         char *gre_key_v;
8584         int size;
8585         int i;
8586
8587         /* For NVGRE, GRE header fields must be set with defined values. */
8588         const struct rte_flow_item_gre gre_spec = {
8589                 .c_rsvd0_ver = RTE_BE16(0x2000),
8590                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8591         };
8592         const struct rte_flow_item_gre gre_mask = {
8593                 .c_rsvd0_ver = RTE_BE16(0xB000),
8594                 .protocol = RTE_BE16(UINT16_MAX),
8595         };
8596         const struct rte_flow_item gre_item = {
8597                 .spec = &gre_spec,
8598                 .mask = &gre_mask,
8599                 .last = NULL,
8600         };
8601         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8602         if (!nvgre_v)
8603                 return;
8604         if (!nvgre_m)
8605                 nvgre_m = &rte_flow_item_nvgre_mask;
8606         tni_flow_id_m = (const char *)nvgre_m->tni;
8607         tni_flow_id_v = (const char *)nvgre_v->tni;
8608         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8609         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8610         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8611         memcpy(gre_key_m, tni_flow_id_m, size);
8612         for (i = 0; i < size; ++i)
8613                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8614 }
8615
8616 /**
8617  * Add VXLAN item to matcher and to the value.
8618  *
8619  * @param[in, out] matcher
8620  *   Flow matcher.
8621  * @param[in, out] key
8622  *   Flow matcher value.
8623  * @param[in] item
8624  *   Flow pattern to translate.
8625  * @param[in] inner
8626  *   Item is inner pattern.
8627  */
8628 static void
8629 flow_dv_translate_item_vxlan(void *matcher, void *key,
8630                              const struct rte_flow_item *item,
8631                              int inner)
8632 {
8633         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8634         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8635         void *headers_m;
8636         void *headers_v;
8637         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8638         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8639         char *vni_m;
8640         char *vni_v;
8641         uint16_t dport;
8642         int size;
8643         int i;
8644
8645         if (inner) {
8646                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8647                                          inner_headers);
8648                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8649         } else {
8650                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8651                                          outer_headers);
8652                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8653         }
8654         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8655                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8656         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8657                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8658                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8659         }
8660         if (!vxlan_v)
8661                 return;
8662         if (!vxlan_m)
8663                 vxlan_m = &rte_flow_item_vxlan_mask;
8664         size = sizeof(vxlan_m->vni);
8665         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8666         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8667         memcpy(vni_m, vxlan_m->vni, size);
8668         for (i = 0; i < size; ++i)
8669                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8670 }
8671
8672 /**
8673  * Add VXLAN-GPE item to matcher and to the value.
8674  *
8675  * @param[in, out] matcher
8676  *   Flow matcher.
8677  * @param[in, out] key
8678  *   Flow matcher value.
8679  * @param[in] item
8680  *   Flow pattern to translate.
8681  * @param[in] inner
8682  *   Item is inner pattern.
8683  */
8684
8685 static void
8686 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8687                                  const struct rte_flow_item *item, int inner)
8688 {
8689         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8690         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8691         void *headers_m;
8692         void *headers_v;
8693         void *misc_m =
8694                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8695         void *misc_v =
8696                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8697         char *vni_m;
8698         char *vni_v;
8699         uint16_t dport;
8700         int size;
8701         int i;
8702         uint8_t flags_m = 0xff;
8703         uint8_t flags_v = 0xc;
8704
8705         if (inner) {
8706                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8707                                          inner_headers);
8708                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8709         } else {
8710                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8711                                          outer_headers);
8712                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8713         }
8714         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8715                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8716         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8717                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8718                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8719         }
8720         if (!vxlan_v)
8721                 return;
8722         if (!vxlan_m)
8723                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8724         size = sizeof(vxlan_m->vni);
8725         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8726         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8727         memcpy(vni_m, vxlan_m->vni, size);
8728         for (i = 0; i < size; ++i)
8729                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8730         if (vxlan_m->flags) {
8731                 flags_m = vxlan_m->flags;
8732                 flags_v = vxlan_v->flags;
8733         }
8734         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8735         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8736         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8737                  vxlan_m->protocol);
8738         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8739                  vxlan_v->protocol);
8740 }
8741
8742 /**
8743  * Add Geneve item to matcher and to the value.
8744  *
8745  * @param[in, out] matcher
8746  *   Flow matcher.
8747  * @param[in, out] key
8748  *   Flow matcher value.
8749  * @param[in] item
8750  *   Flow pattern to translate.
8751  * @param[in] inner
8752  *   Item is inner pattern.
8753  */
8754
8755 static void
8756 flow_dv_translate_item_geneve(void *matcher, void *key,
8757                               const struct rte_flow_item *item, int inner)
8758 {
8759         const struct rte_flow_item_geneve *geneve_m = item->mask;
8760         const struct rte_flow_item_geneve *geneve_v = item->spec;
8761         void *headers_m;
8762         void *headers_v;
8763         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8764         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8765         uint16_t dport;
8766         uint16_t gbhdr_m;
8767         uint16_t gbhdr_v;
8768         char *vni_m;
8769         char *vni_v;
8770         size_t size, i;
8771
8772         if (inner) {
8773                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8774                                          inner_headers);
8775                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8776         } else {
8777                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8778                                          outer_headers);
8779                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8780         }
8781         dport = MLX5_UDP_PORT_GENEVE;
8782         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8783                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8784                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8785         }
8786         if (!geneve_v)
8787                 return;
8788         if (!geneve_m)
8789                 geneve_m = &rte_flow_item_geneve_mask;
8790         size = sizeof(geneve_m->vni);
8791         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8792         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8793         memcpy(vni_m, geneve_m->vni, size);
8794         for (i = 0; i < size; ++i)
8795                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8796         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8797                  rte_be_to_cpu_16(geneve_m->protocol));
8798         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8799                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8800         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8801         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8802         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8803                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8804         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8805                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8806         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8807                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8808         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8809                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8810                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8811 }
8812
8813 /**
8814  * Create Geneve TLV option resource.
8815  *
8816  * @param dev[in, out]
8817  *   Pointer to rte_eth_dev structure.
8818  * @param[in, out] tag_be24
8819  *   Tag value in big endian then R-shift 8.
8820  * @parm[in, out] dev_flow
8821  *   Pointer to the dev_flow.
8822  * @param[out] error
8823  *   pointer to error structure.
8824  *
8825  * @return
8826  *   0 on success otherwise -errno and errno is set.
8827  */
8828
8829 int
8830 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8831                                              const struct rte_flow_item *item,
8832                                              struct rte_flow_error *error)
8833 {
8834         struct mlx5_priv *priv = dev->data->dev_private;
8835         struct mlx5_dev_ctx_shared *sh = priv->sh;
8836         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8837                         sh->geneve_tlv_option_resource;
8838         struct mlx5_devx_obj *obj;
8839         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8840         int ret = 0;
8841
8842         if (!geneve_opt_v)
8843                 return -1;
8844         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8845         if (geneve_opt_resource != NULL) {
8846                 if (geneve_opt_resource->option_class ==
8847                         geneve_opt_v->option_class &&
8848                         geneve_opt_resource->option_type ==
8849                         geneve_opt_v->option_type &&
8850                         geneve_opt_resource->length ==
8851                         geneve_opt_v->option_len) {
8852                         /* We already have GENVE TLV option obj allocated. */
8853                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8854                                            __ATOMIC_RELAXED);
8855                 } else {
8856                         ret = rte_flow_error_set(error, ENOMEM,
8857                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8858                                 "Only one GENEVE TLV option supported");
8859                         goto exit;
8860                 }
8861         } else {
8862                 /* Create a GENEVE TLV object and resource. */
8863                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8864                                 geneve_opt_v->option_class,
8865                                 geneve_opt_v->option_type,
8866                                 geneve_opt_v->option_len);
8867                 if (!obj) {
8868                         ret = rte_flow_error_set(error, ENODATA,
8869                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8870                                 "Failed to create GENEVE TLV Devx object");
8871                         goto exit;
8872                 }
8873                 sh->geneve_tlv_option_resource =
8874                                 mlx5_malloc(MLX5_MEM_ZERO,
8875                                                 sizeof(*geneve_opt_resource),
8876                                                 0, SOCKET_ID_ANY);
8877                 if (!sh->geneve_tlv_option_resource) {
8878                         claim_zero(mlx5_devx_cmd_destroy(obj));
8879                         ret = rte_flow_error_set(error, ENOMEM,
8880                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8881                                 "GENEVE TLV object memory allocation failed");
8882                         goto exit;
8883                 }
8884                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8885                 geneve_opt_resource->obj = obj;
8886                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8887                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8888                 geneve_opt_resource->length = geneve_opt_v->option_len;
8889                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8890                                 __ATOMIC_RELAXED);
8891         }
8892 exit:
8893         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8894         return ret;
8895 }
8896
8897 /**
8898  * Add Geneve TLV option item to matcher.
8899  *
8900  * @param[in, out] dev
8901  *   Pointer to rte_eth_dev structure.
8902  * @param[in, out] matcher
8903  *   Flow matcher.
8904  * @param[in, out] key
8905  *   Flow matcher value.
8906  * @param[in] item
8907  *   Flow pattern to translate.
8908  * @param[out] error
8909  *   Pointer to error structure.
8910  */
8911 static int
8912 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8913                                   void *key, const struct rte_flow_item *item,
8914                                   struct rte_flow_error *error)
8915 {
8916         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8917         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8918         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8919         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8920         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8921                         misc_parameters_3);
8922         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8923         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8924         int ret = 0;
8925
8926         if (!geneve_opt_v)
8927                 return -1;
8928         if (!geneve_opt_m)
8929                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8930         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8931                                                            error);
8932         if (ret) {
8933                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8934                 return ret;
8935         }
8936         /*
8937          * Set the option length in GENEVE header if not requested.
8938          * The GENEVE TLV option length is expressed by the option length field
8939          * in the GENEVE header.
8940          * If the option length was not requested but the GENEVE TLV option item
8941          * is present we set the option length field implicitly.
8942          */
8943         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8944                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8945                          MLX5_GENEVE_OPTLEN_MASK);
8946                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8947                          geneve_opt_v->option_len + 1);
8948         }
8949         /* Set the data. */
8950         if (geneve_opt_v->data) {
8951                 memcpy(&opt_data_key, geneve_opt_v->data,
8952                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8953                                 sizeof(opt_data_key)));
8954                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8955                                 sizeof(opt_data_key));
8956                 memcpy(&opt_data_mask, geneve_opt_m->data,
8957                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8958                                 sizeof(opt_data_mask)));
8959                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8960                                 sizeof(opt_data_mask));
8961                 MLX5_SET(fte_match_set_misc3, misc3_m,
8962                                 geneve_tlv_option_0_data,
8963                                 rte_be_to_cpu_32(opt_data_mask));
8964                 MLX5_SET(fte_match_set_misc3, misc3_v,
8965                                 geneve_tlv_option_0_data,
8966                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8967         }
8968         return ret;
8969 }
8970
8971 /**
8972  * Add MPLS item to matcher and to the value.
8973  *
8974  * @param[in, out] matcher
8975  *   Flow matcher.
8976  * @param[in, out] key
8977  *   Flow matcher value.
8978  * @param[in] item
8979  *   Flow pattern to translate.
8980  * @param[in] prev_layer
8981  *   The protocol layer indicated in previous item.
8982  * @param[in] inner
8983  *   Item is inner pattern.
8984  */
8985 static void
8986 flow_dv_translate_item_mpls(void *matcher, void *key,
8987                             const struct rte_flow_item *item,
8988                             uint64_t prev_layer,
8989                             int inner)
8990 {
8991         const uint32_t *in_mpls_m = item->mask;
8992         const uint32_t *in_mpls_v = item->spec;
8993         uint32_t *out_mpls_m = 0;
8994         uint32_t *out_mpls_v = 0;
8995         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8996         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8997         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8998                                      misc_parameters_2);
8999         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9000         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9001         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9002
9003         switch (prev_layer) {
9004         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9005                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9006                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9007                          MLX5_UDP_PORT_MPLS);
9008                 break;
9009         case MLX5_FLOW_LAYER_GRE:
9010                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9011                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9012                          RTE_ETHER_TYPE_MPLS);
9013                 break;
9014         default:
9015                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
9016                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
9017                          IPPROTO_MPLS);
9018                 break;
9019         }
9020         if (!in_mpls_v)
9021                 return;
9022         if (!in_mpls_m)
9023                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9024         switch (prev_layer) {
9025         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9026                 out_mpls_m =
9027                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9028                                                  outer_first_mpls_over_udp);
9029                 out_mpls_v =
9030                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9031                                                  outer_first_mpls_over_udp);
9032                 break;
9033         case MLX5_FLOW_LAYER_GRE:
9034                 out_mpls_m =
9035                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9036                                                  outer_first_mpls_over_gre);
9037                 out_mpls_v =
9038                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9039                                                  outer_first_mpls_over_gre);
9040                 break;
9041         default:
9042                 /* Inner MPLS not over GRE is not supported. */
9043                 if (!inner) {
9044                         out_mpls_m =
9045                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9046                                                          misc2_m,
9047                                                          outer_first_mpls);
9048                         out_mpls_v =
9049                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9050                                                          misc2_v,
9051                                                          outer_first_mpls);
9052                 }
9053                 break;
9054         }
9055         if (out_mpls_m && out_mpls_v) {
9056                 *out_mpls_m = *in_mpls_m;
9057                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9058         }
9059 }
9060
9061 /**
9062  * Add metadata register item to matcher
9063  *
9064  * @param[in, out] matcher
9065  *   Flow matcher.
9066  * @param[in, out] key
9067  *   Flow matcher value.
9068  * @param[in] reg_type
9069  *   Type of device metadata register
9070  * @param[in] value
9071  *   Register value
9072  * @param[in] mask
9073  *   Register mask
9074  */
9075 static void
9076 flow_dv_match_meta_reg(void *matcher, void *key,
9077                        enum modify_reg reg_type,
9078                        uint32_t data, uint32_t mask)
9079 {
9080         void *misc2_m =
9081                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9082         void *misc2_v =
9083                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9084         uint32_t temp;
9085
9086         data &= mask;
9087         switch (reg_type) {
9088         case REG_A:
9089                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9090                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9091                 break;
9092         case REG_B:
9093                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9094                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9095                 break;
9096         case REG_C_0:
9097                 /*
9098                  * The metadata register C0 field might be divided into
9099                  * source vport index and META item value, we should set
9100                  * this field according to specified mask, not as whole one.
9101                  */
9102                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9103                 temp |= mask;
9104                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9105                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9106                 temp &= ~mask;
9107                 temp |= data;
9108                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9109                 break;
9110         case REG_C_1:
9111                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9112                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9113                 break;
9114         case REG_C_2:
9115                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9116                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9117                 break;
9118         case REG_C_3:
9119                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9120                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9121                 break;
9122         case REG_C_4:
9123                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9124                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9125                 break;
9126         case REG_C_5:
9127                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9128                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9129                 break;
9130         case REG_C_6:
9131                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9132                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9133                 break;
9134         case REG_C_7:
9135                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9136                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9137                 break;
9138         default:
9139                 MLX5_ASSERT(false);
9140                 break;
9141         }
9142 }
9143
9144 /**
9145  * Add MARK item to matcher
9146  *
9147  * @param[in] dev
9148  *   The device to configure through.
9149  * @param[in, out] matcher
9150  *   Flow matcher.
9151  * @param[in, out] key
9152  *   Flow matcher value.
9153  * @param[in] item
9154  *   Flow pattern to translate.
9155  */
9156 static void
9157 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9158                             void *matcher, void *key,
9159                             const struct rte_flow_item *item)
9160 {
9161         struct mlx5_priv *priv = dev->data->dev_private;
9162         const struct rte_flow_item_mark *mark;
9163         uint32_t value;
9164         uint32_t mask;
9165
9166         mark = item->mask ? (const void *)item->mask :
9167                             &rte_flow_item_mark_mask;
9168         mask = mark->id & priv->sh->dv_mark_mask;
9169         mark = (const void *)item->spec;
9170         MLX5_ASSERT(mark);
9171         value = mark->id & priv->sh->dv_mark_mask & mask;
9172         if (mask) {
9173                 enum modify_reg reg;
9174
9175                 /* Get the metadata register index for the mark. */
9176                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9177                 MLX5_ASSERT(reg > 0);
9178                 if (reg == REG_C_0) {
9179                         struct mlx5_priv *priv = dev->data->dev_private;
9180                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9181                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9182
9183                         mask &= msk_c0;
9184                         mask <<= shl_c0;
9185                         value <<= shl_c0;
9186                 }
9187                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9188         }
9189 }
9190
9191 /**
9192  * Add META item to matcher
9193  *
9194  * @param[in] dev
9195  *   The devich to configure through.
9196  * @param[in, out] matcher
9197  *   Flow matcher.
9198  * @param[in, out] key
9199  *   Flow matcher value.
9200  * @param[in] attr
9201  *   Attributes of flow that includes this item.
9202  * @param[in] item
9203  *   Flow pattern to translate.
9204  */
9205 static void
9206 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9207                             void *matcher, void *key,
9208                             const struct rte_flow_attr *attr,
9209                             const struct rte_flow_item *item)
9210 {
9211         const struct rte_flow_item_meta *meta_m;
9212         const struct rte_flow_item_meta *meta_v;
9213
9214         meta_m = (const void *)item->mask;
9215         if (!meta_m)
9216                 meta_m = &rte_flow_item_meta_mask;
9217         meta_v = (const void *)item->spec;
9218         if (meta_v) {
9219                 int reg;
9220                 uint32_t value = meta_v->data;
9221                 uint32_t mask = meta_m->data;
9222
9223                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9224                 if (reg < 0)
9225                         return;
9226                 MLX5_ASSERT(reg != REG_NON);
9227                 /*
9228                  * In datapath code there is no endianness
9229                  * coversions for perfromance reasons, all
9230                  * pattern conversions are done in rte_flow.
9231                  */
9232                 value = rte_cpu_to_be_32(value);
9233                 mask = rte_cpu_to_be_32(mask);
9234                 if (reg == REG_C_0) {
9235                         struct mlx5_priv *priv = dev->data->dev_private;
9236                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9237                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9238 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
9239                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
9240
9241                         value >>= shr_c0;
9242                         mask >>= shr_c0;
9243 #endif
9244                         value <<= shl_c0;
9245                         mask <<= shl_c0;
9246                         MLX5_ASSERT(msk_c0);
9247                         MLX5_ASSERT(!(~msk_c0 & mask));
9248                 }
9249                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9250         }
9251 }
9252
9253 /**
9254  * Add vport metadata Reg C0 item to matcher
9255  *
9256  * @param[in, out] matcher
9257  *   Flow matcher.
9258  * @param[in, out] key
9259  *   Flow matcher value.
9260  * @param[in] reg
9261  *   Flow pattern to translate.
9262  */
9263 static void
9264 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9265                                   uint32_t value, uint32_t mask)
9266 {
9267         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9268 }
9269
9270 /**
9271  * Add tag item to matcher
9272  *
9273  * @param[in] dev
9274  *   The devich to configure through.
9275  * @param[in, out] matcher
9276  *   Flow matcher.
9277  * @param[in, out] key
9278  *   Flow matcher value.
9279  * @param[in] item
9280  *   Flow pattern to translate.
9281  */
9282 static void
9283 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9284                                 void *matcher, void *key,
9285                                 const struct rte_flow_item *item)
9286 {
9287         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9288         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9289         uint32_t mask, value;
9290
9291         MLX5_ASSERT(tag_v);
9292         value = tag_v->data;
9293         mask = tag_m ? tag_m->data : UINT32_MAX;
9294         if (tag_v->id == REG_C_0) {
9295                 struct mlx5_priv *priv = dev->data->dev_private;
9296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9298
9299                 mask &= msk_c0;
9300                 mask <<= shl_c0;
9301                 value <<= shl_c0;
9302         }
9303         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9304 }
9305
9306 /**
9307  * Add TAG item to matcher
9308  *
9309  * @param[in] dev
9310  *   The devich to configure through.
9311  * @param[in, out] matcher
9312  *   Flow matcher.
9313  * @param[in, out] key
9314  *   Flow matcher value.
9315  * @param[in] item
9316  *   Flow pattern to translate.
9317  */
9318 static void
9319 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9320                            void *matcher, void *key,
9321                            const struct rte_flow_item *item)
9322 {
9323         const struct rte_flow_item_tag *tag_v = item->spec;
9324         const struct rte_flow_item_tag *tag_m = item->mask;
9325         enum modify_reg reg;
9326
9327         MLX5_ASSERT(tag_v);
9328         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9329         /* Get the metadata register index for the tag. */
9330         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9331         MLX5_ASSERT(reg > 0);
9332         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9333 }
9334
9335 /**
9336  * Add source vport match to the specified matcher.
9337  *
9338  * @param[in, out] matcher
9339  *   Flow matcher.
9340  * @param[in, out] key
9341  *   Flow matcher value.
9342  * @param[in] port
9343  *   Source vport value to match
9344  * @param[in] mask
9345  *   Mask
9346  */
9347 static void
9348 flow_dv_translate_item_source_vport(void *matcher, void *key,
9349                                     int16_t port, uint16_t mask)
9350 {
9351         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9352         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9353
9354         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9355         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9356 }
9357
9358 /**
9359  * Translate port-id item to eswitch match on  port-id.
9360  *
9361  * @param[in] dev
9362  *   The devich to configure through.
9363  * @param[in, out] matcher
9364  *   Flow matcher.
9365  * @param[in, out] key
9366  *   Flow matcher value.
9367  * @param[in] item
9368  *   Flow pattern to translate.
9369  * @param[in]
9370  *   Flow attributes.
9371  *
9372  * @return
9373  *   0 on success, a negative errno value otherwise.
9374  */
9375 static int
9376 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9377                                void *key, const struct rte_flow_item *item,
9378                                const struct rte_flow_attr *attr)
9379 {
9380         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9381         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9382         struct mlx5_priv *priv;
9383         uint16_t mask, id;
9384
9385         mask = pid_m ? pid_m->id : 0xffff;
9386         id = pid_v ? pid_v->id : dev->data->port_id;
9387         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9388         if (!priv)
9389                 return -rte_errno;
9390         /*
9391          * Translate to vport field or to metadata, depending on mode.
9392          * Kernel can use either misc.source_port or half of C0 metadata
9393          * register.
9394          */
9395         if (priv->vport_meta_mask) {
9396                 /*
9397                  * Provide the hint for SW steering library
9398                  * to insert the flow into ingress domain and
9399                  * save the extra vport match.
9400                  */
9401                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9402                     priv->pf_bond < 0 && attr->transfer)
9403                         flow_dv_translate_item_source_vport
9404                                 (matcher, key, priv->vport_id, mask);
9405                 /*
9406                  * We should always set the vport metadata register,
9407                  * otherwise the SW steering library can drop
9408                  * the rule if wire vport metadata value is not zero,
9409                  * it depends on kernel configuration.
9410                  */
9411                 flow_dv_translate_item_meta_vport(matcher, key,
9412                                                   priv->vport_meta_tag,
9413                                                   priv->vport_meta_mask);
9414         } else {
9415                 flow_dv_translate_item_source_vport(matcher, key,
9416                                                     priv->vport_id, mask);
9417         }
9418         return 0;
9419 }
9420
9421 /**
9422  * Add ICMP6 item to matcher and to the value.
9423  *
9424  * @param[in, out] matcher
9425  *   Flow matcher.
9426  * @param[in, out] key
9427  *   Flow matcher value.
9428  * @param[in] item
9429  *   Flow pattern to translate.
9430  * @param[in] inner
9431  *   Item is inner pattern.
9432  */
9433 static void
9434 flow_dv_translate_item_icmp6(void *matcher, void *key,
9435                               const struct rte_flow_item *item,
9436                               int inner)
9437 {
9438         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9439         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9440         void *headers_m;
9441         void *headers_v;
9442         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9443                                      misc_parameters_3);
9444         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9445         if (inner) {
9446                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9447                                          inner_headers);
9448                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9449         } else {
9450                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9451                                          outer_headers);
9452                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9453         }
9454         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9455         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9456         if (!icmp6_v)
9457                 return;
9458         if (!icmp6_m)
9459                 icmp6_m = &rte_flow_item_icmp6_mask;
9460         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9461         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9462                  icmp6_v->type & icmp6_m->type);
9463         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9464         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9465                  icmp6_v->code & icmp6_m->code);
9466 }
9467
9468 /**
9469  * Add ICMP item to matcher and to the value.
9470  *
9471  * @param[in, out] matcher
9472  *   Flow matcher.
9473  * @param[in, out] key
9474  *   Flow matcher value.
9475  * @param[in] item
9476  *   Flow pattern to translate.
9477  * @param[in] inner
9478  *   Item is inner pattern.
9479  */
9480 static void
9481 flow_dv_translate_item_icmp(void *matcher, void *key,
9482                             const struct rte_flow_item *item,
9483                             int inner)
9484 {
9485         const struct rte_flow_item_icmp *icmp_m = item->mask;
9486         const struct rte_flow_item_icmp *icmp_v = item->spec;
9487         uint32_t icmp_header_data_m = 0;
9488         uint32_t icmp_header_data_v = 0;
9489         void *headers_m;
9490         void *headers_v;
9491         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9492                                      misc_parameters_3);
9493         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9494         if (inner) {
9495                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9496                                          inner_headers);
9497                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9498         } else {
9499                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9500                                          outer_headers);
9501                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9502         }
9503         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9504         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9505         if (!icmp_v)
9506                 return;
9507         if (!icmp_m)
9508                 icmp_m = &rte_flow_item_icmp_mask;
9509         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9510                  icmp_m->hdr.icmp_type);
9511         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9512                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9513         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9514                  icmp_m->hdr.icmp_code);
9515         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9516                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9517         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9518         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9519         if (icmp_header_data_m) {
9520                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9521                 icmp_header_data_v |=
9522                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9523                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9524                          icmp_header_data_m);
9525                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9526                          icmp_header_data_v & icmp_header_data_m);
9527         }
9528 }
9529
9530 /**
9531  * Add GTP item to matcher and to the value.
9532  *
9533  * @param[in, out] matcher
9534  *   Flow matcher.
9535  * @param[in, out] key
9536  *   Flow matcher value.
9537  * @param[in] item
9538  *   Flow pattern to translate.
9539  * @param[in] inner
9540  *   Item is inner pattern.
9541  */
9542 static void
9543 flow_dv_translate_item_gtp(void *matcher, void *key,
9544                            const struct rte_flow_item *item, int inner)
9545 {
9546         const struct rte_flow_item_gtp *gtp_m = item->mask;
9547         const struct rte_flow_item_gtp *gtp_v = item->spec;
9548         void *headers_m;
9549         void *headers_v;
9550         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9551                                      misc_parameters_3);
9552         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9553         uint16_t dport = RTE_GTPU_UDP_PORT;
9554
9555         if (inner) {
9556                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9557                                          inner_headers);
9558                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9559         } else {
9560                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9561                                          outer_headers);
9562                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9563         }
9564         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9565                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9566                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9567         }
9568         if (!gtp_v)
9569                 return;
9570         if (!gtp_m)
9571                 gtp_m = &rte_flow_item_gtp_mask;
9572         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9573                  gtp_m->v_pt_rsv_flags);
9574         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9575                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9576         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9577         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9578                  gtp_v->msg_type & gtp_m->msg_type);
9579         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9580                  rte_be_to_cpu_32(gtp_m->teid));
9581         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9582                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9583 }
9584
9585 /**
9586  * Add GTP PSC item to matcher.
9587  *
9588  * @param[in, out] matcher
9589  *   Flow matcher.
9590  * @param[in, out] key
9591  *   Flow matcher value.
9592  * @param[in] item
9593  *   Flow pattern to translate.
9594  */
9595 static int
9596 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9597                                const struct rte_flow_item *item)
9598 {
9599         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9600         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9601         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9602                         misc_parameters_3);
9603         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9604         union {
9605                 uint32_t w32;
9606                 struct {
9607                         uint16_t seq_num;
9608                         uint8_t npdu_num;
9609                         uint8_t next_ext_header_type;
9610                 };
9611         } dw_2;
9612         uint8_t gtp_flags;
9613
9614         /* Always set E-flag match on one, regardless of GTP item settings. */
9615         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9616         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9617         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9618         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9619         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9620         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9621         /*Set next extension header type. */
9622         dw_2.seq_num = 0;
9623         dw_2.npdu_num = 0;
9624         dw_2.next_ext_header_type = 0xff;
9625         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9626                  rte_cpu_to_be_32(dw_2.w32));
9627         dw_2.seq_num = 0;
9628         dw_2.npdu_num = 0;
9629         dw_2.next_ext_header_type = 0x85;
9630         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9631                  rte_cpu_to_be_32(dw_2.w32));
9632         if (gtp_psc_v) {
9633                 union {
9634                         uint32_t w32;
9635                         struct {
9636                                 uint8_t len;
9637                                 uint8_t type_flags;
9638                                 uint8_t qfi;
9639                                 uint8_t reserved;
9640                         };
9641                 } dw_0;
9642
9643                 /*Set extension header PDU type and Qos. */
9644                 if (!gtp_psc_m)
9645                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9646                 dw_0.w32 = 0;
9647                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9648                 dw_0.qfi = gtp_psc_m->qfi;
9649                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9650                          rte_cpu_to_be_32(dw_0.w32));
9651                 dw_0.w32 = 0;
9652                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9653                                                         gtp_psc_m->pdu_type);
9654                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9655                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9656                          rte_cpu_to_be_32(dw_0.w32));
9657         }
9658         return 0;
9659 }
9660
9661 /**
9662  * Add eCPRI item to matcher and to the value.
9663  *
9664  * @param[in] dev
9665  *   The devich to configure through.
9666  * @param[in, out] matcher
9667  *   Flow matcher.
9668  * @param[in, out] key
9669  *   Flow matcher value.
9670  * @param[in] item
9671  *   Flow pattern to translate.
9672  * @param[in] samples
9673  *   Sample IDs to be used in the matching.
9674  */
9675 static void
9676 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9677                              void *key, const struct rte_flow_item *item)
9678 {
9679         struct mlx5_priv *priv = dev->data->dev_private;
9680         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9681         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9682         struct rte_ecpri_common_hdr common;
9683         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9684                                      misc_parameters_4);
9685         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9686         uint32_t *samples;
9687         void *dw_m;
9688         void *dw_v;
9689
9690         if (!ecpri_v)
9691                 return;
9692         if (!ecpri_m)
9693                 ecpri_m = &rte_flow_item_ecpri_mask;
9694         /*
9695          * Maximal four DW samples are supported in a single matching now.
9696          * Two are used now for a eCPRI matching:
9697          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9698          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9699          *    if any.
9700          */
9701         if (!ecpri_m->hdr.common.u32)
9702                 return;
9703         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9704         /* Need to take the whole DW as the mask to fill the entry. */
9705         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9706                             prog_sample_field_value_0);
9707         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9708                             prog_sample_field_value_0);
9709         /* Already big endian (network order) in the header. */
9710         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9711         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9712         /* Sample#0, used for matching type, offset 0. */
9713         MLX5_SET(fte_match_set_misc4, misc4_m,
9714                  prog_sample_field_id_0, samples[0]);
9715         /* It makes no sense to set the sample ID in the mask field. */
9716         MLX5_SET(fte_match_set_misc4, misc4_v,
9717                  prog_sample_field_id_0, samples[0]);
9718         /*
9719          * Checking if message body part needs to be matched.
9720          * Some wildcard rules only matching type field should be supported.
9721          */
9722         if (ecpri_m->hdr.dummy[0]) {
9723                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9724                 switch (common.type) {
9725                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9726                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9727                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9728                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9729                                             prog_sample_field_value_1);
9730                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9731                                             prog_sample_field_value_1);
9732                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9733                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9734                                             ecpri_m->hdr.dummy[0];
9735                         /* Sample#1, to match message body, offset 4. */
9736                         MLX5_SET(fte_match_set_misc4, misc4_m,
9737                                  prog_sample_field_id_1, samples[1]);
9738                         MLX5_SET(fte_match_set_misc4, misc4_v,
9739                                  prog_sample_field_id_1, samples[1]);
9740                         break;
9741                 default:
9742                         /* Others, do not match any sample ID. */
9743                         break;
9744                 }
9745         }
9746 }
9747
9748 /*
9749  * Add connection tracking status item to matcher
9750  *
9751  * @param[in] dev
9752  *   The devich to configure through.
9753  * @param[in, out] matcher
9754  *   Flow matcher.
9755  * @param[in, out] key
9756  *   Flow matcher value.
9757  * @param[in] item
9758  *   Flow pattern to translate.
9759  */
9760 static void
9761 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9762                               void *matcher, void *key,
9763                               const struct rte_flow_item *item)
9764 {
9765         uint32_t reg_value = 0;
9766         int reg_id;
9767         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9768         uint32_t reg_mask = 0;
9769         const struct rte_flow_item_conntrack *spec = item->spec;
9770         const struct rte_flow_item_conntrack *mask = item->mask;
9771         uint32_t flags;
9772         struct rte_flow_error error;
9773
9774         if (!mask)
9775                 mask = &rte_flow_item_conntrack_mask;
9776         if (!spec || !mask->flags)
9777                 return;
9778         flags = spec->flags & mask->flags;
9779         /* The conflict should be checked in the validation. */
9780         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9781                 reg_value |= MLX5_CT_SYNDROME_VALID;
9782         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9783                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9784         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9785                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9786         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9787                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9788         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9789                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9790         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9791                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9792                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9793                 reg_mask |= 0xc0;
9794         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9795                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9796         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9797                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9798         /* The REG_C_x value could be saved during startup. */
9799         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9800         if (reg_id == REG_NON)
9801                 return;
9802         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9803                                reg_value, reg_mask);
9804 }
9805
9806 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9807
9808 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9809         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9810                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9811
9812 /**
9813  * Calculate flow matcher enable bitmap.
9814  *
9815  * @param match_criteria
9816  *   Pointer to flow matcher criteria.
9817  *
9818  * @return
9819  *   Bitmap of enabled fields.
9820  */
9821 static uint8_t
9822 flow_dv_matcher_enable(uint32_t *match_criteria)
9823 {
9824         uint8_t match_criteria_enable;
9825
9826         match_criteria_enable =
9827                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9828                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9829         match_criteria_enable |=
9830                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9831                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9832         match_criteria_enable |=
9833                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9834                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9835         match_criteria_enable |=
9836                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9837                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9838         match_criteria_enable |=
9839                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9840                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9841         match_criteria_enable |=
9842                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9843                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9844         return match_criteria_enable;
9845 }
9846
9847 struct mlx5_hlist_entry *
9848 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9849 {
9850         struct mlx5_dev_ctx_shared *sh = list->ctx;
9851         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9852         struct rte_eth_dev *dev = ctx->dev;
9853         struct mlx5_flow_tbl_data_entry *tbl_data;
9854         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9855         struct rte_flow_error *error = ctx->error;
9856         union mlx5_flow_tbl_key key = { .v64 = key64 };
9857         struct mlx5_flow_tbl_resource *tbl;
9858         void *domain;
9859         uint32_t idx = 0;
9860         int ret;
9861
9862         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9863         if (!tbl_data) {
9864                 rte_flow_error_set(error, ENOMEM,
9865                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9866                                    NULL,
9867                                    "cannot allocate flow table data entry");
9868                 return NULL;
9869         }
9870         tbl_data->idx = idx;
9871         tbl_data->tunnel = tt_prm->tunnel;
9872         tbl_data->group_id = tt_prm->group_id;
9873         tbl_data->external = !!tt_prm->external;
9874         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9875         tbl_data->is_egress = !!key.is_egress;
9876         tbl_data->is_transfer = !!key.is_fdb;
9877         tbl_data->dummy = !!key.dummy;
9878         tbl_data->level = key.level;
9879         tbl_data->id = key.id;
9880         tbl = &tbl_data->tbl;
9881         if (key.dummy)
9882                 return &tbl_data->entry;
9883         if (key.is_fdb)
9884                 domain = sh->fdb_domain;
9885         else if (key.is_egress)
9886                 domain = sh->tx_domain;
9887         else
9888                 domain = sh->rx_domain;
9889         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9890         if (ret) {
9891                 rte_flow_error_set(error, ENOMEM,
9892                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9893                                    NULL, "cannot create flow table object");
9894                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9895                 return NULL;
9896         }
9897         if (key.level != 0) {
9898                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9899                                         (tbl->obj, &tbl_data->jump.action);
9900                 if (ret) {
9901                         rte_flow_error_set(error, ENOMEM,
9902                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9903                                            NULL,
9904                                            "cannot create flow jump action");
9905                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9906                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9907                         return NULL;
9908                 }
9909         }
9910         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9911               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9912               key.level, key.id);
9913         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9914                              flow_dv_matcher_create_cb,
9915                              flow_dv_matcher_match_cb,
9916                              flow_dv_matcher_remove_cb);
9917         return &tbl_data->entry;
9918 }
9919
9920 int
9921 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9922                      struct mlx5_hlist_entry *entry, uint64_t key64,
9923                      void *cb_ctx __rte_unused)
9924 {
9925         struct mlx5_flow_tbl_data_entry *tbl_data =
9926                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9927         union mlx5_flow_tbl_key key = { .v64 = key64 };
9928
9929         return tbl_data->level != key.level ||
9930                tbl_data->id != key.id ||
9931                tbl_data->dummy != key.dummy ||
9932                tbl_data->is_transfer != !!key.is_fdb ||
9933                tbl_data->is_egress != !!key.is_egress;
9934 }
9935
9936 /**
9937  * Get a flow table.
9938  *
9939  * @param[in, out] dev
9940  *   Pointer to rte_eth_dev structure.
9941  * @param[in] table_level
9942  *   Table level to use.
9943  * @param[in] egress
9944  *   Direction of the table.
9945  * @param[in] transfer
9946  *   E-Switch or NIC flow.
9947  * @param[in] dummy
9948  *   Dummy entry for dv API.
9949  * @param[in] table_id
9950  *   Table id to use.
9951  * @param[out] error
9952  *   pointer to error structure.
9953  *
9954  * @return
9955  *   Returns tables resource based on the index, NULL in case of failed.
9956  */
9957 struct mlx5_flow_tbl_resource *
9958 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9959                          uint32_t table_level, uint8_t egress,
9960                          uint8_t transfer,
9961                          bool external,
9962                          const struct mlx5_flow_tunnel *tunnel,
9963                          uint32_t group_id, uint8_t dummy,
9964                          uint32_t table_id,
9965                          struct rte_flow_error *error)
9966 {
9967         struct mlx5_priv *priv = dev->data->dev_private;
9968         union mlx5_flow_tbl_key table_key = {
9969                 {
9970                         .level = table_level,
9971                         .id = table_id,
9972                         .reserved = 0,
9973                         .dummy = !!dummy,
9974                         .is_fdb = !!transfer,
9975                         .is_egress = !!egress,
9976                 }
9977         };
9978         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9979                 .tunnel = tunnel,
9980                 .group_id = group_id,
9981                 .external = external,
9982         };
9983         struct mlx5_flow_cb_ctx ctx = {
9984                 .dev = dev,
9985                 .error = error,
9986                 .data = &tt_prm,
9987         };
9988         struct mlx5_hlist_entry *entry;
9989         struct mlx5_flow_tbl_data_entry *tbl_data;
9990
9991         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9992         if (!entry) {
9993                 rte_flow_error_set(error, ENOMEM,
9994                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9995                                    "cannot get table");
9996                 return NULL;
9997         }
9998         DRV_LOG(DEBUG, "table_level %u table_id %u "
9999                 "tunnel %u group %u registered.",
10000                 table_level, table_id,
10001                 tunnel ? tunnel->tunnel_id : 0, group_id);
10002         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10003         return &tbl_data->tbl;
10004 }
10005
10006 void
10007 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10008                       struct mlx5_hlist_entry *entry)
10009 {
10010         struct mlx5_dev_ctx_shared *sh = list->ctx;
10011         struct mlx5_flow_tbl_data_entry *tbl_data =
10012                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10013
10014         MLX5_ASSERT(entry && sh);
10015         if (tbl_data->jump.action)
10016                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10017         if (tbl_data->tbl.obj)
10018                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10019         if (tbl_data->tunnel_offload && tbl_data->external) {
10020                 struct mlx5_hlist_entry *he;
10021                 struct mlx5_hlist *tunnel_grp_hash;
10022                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10023                 union tunnel_tbl_key tunnel_key = {
10024                         .tunnel_id = tbl_data->tunnel ?
10025                                         tbl_data->tunnel->tunnel_id : 0,
10026                         .group = tbl_data->group_id
10027                 };
10028                 uint32_t table_level = tbl_data->level;
10029
10030                 tunnel_grp_hash = tbl_data->tunnel ?
10031                                         tbl_data->tunnel->groups :
10032                                         thub->groups;
10033                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10034                 if (he)
10035                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10036                 DRV_LOG(DEBUG,
10037                         "table_level %u id %u tunnel %u group %u released.",
10038                         table_level,
10039                         tbl_data->id,
10040                         tbl_data->tunnel ?
10041                         tbl_data->tunnel->tunnel_id : 0,
10042                         tbl_data->group_id);
10043         }
10044         mlx5_cache_list_destroy(&tbl_data->matchers);
10045         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10046 }
10047
10048 /**
10049  * Release a flow table.
10050  *
10051  * @param[in] sh
10052  *   Pointer to device shared structure.
10053  * @param[in] tbl
10054  *   Table resource to be released.
10055  *
10056  * @return
10057  *   Returns 0 if table was released, else return 1;
10058  */
10059 static int
10060 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10061                              struct mlx5_flow_tbl_resource *tbl)
10062 {
10063         struct mlx5_flow_tbl_data_entry *tbl_data =
10064                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10065
10066         if (!tbl)
10067                 return 0;
10068         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10069 }
10070
10071 int
10072 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
10073                          struct mlx5_cache_entry *entry, void *cb_ctx)
10074 {
10075         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10076         struct mlx5_flow_dv_matcher *ref = ctx->data;
10077         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10078                                                         entry);
10079
10080         return cur->crc != ref->crc ||
10081                cur->priority != ref->priority ||
10082                memcmp((const void *)cur->mask.buf,
10083                       (const void *)ref->mask.buf, ref->mask.size);
10084 }
10085
10086 struct mlx5_cache_entry *
10087 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
10088                           struct mlx5_cache_entry *entry __rte_unused,
10089                           void *cb_ctx)
10090 {
10091         struct mlx5_dev_ctx_shared *sh = list->ctx;
10092         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10093         struct mlx5_flow_dv_matcher *ref = ctx->data;
10094         struct mlx5_flow_dv_matcher *cache;
10095         struct mlx5dv_flow_matcher_attr dv_attr = {
10096                 .type = IBV_FLOW_ATTR_NORMAL,
10097                 .match_mask = (void *)&ref->mask,
10098         };
10099         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10100                                                             typeof(*tbl), tbl);
10101         int ret;
10102
10103         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
10104         if (!cache) {
10105                 rte_flow_error_set(ctx->error, ENOMEM,
10106                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10107                                    "cannot create matcher");
10108                 return NULL;
10109         }
10110         *cache = *ref;
10111         dv_attr.match_criteria_enable =
10112                 flow_dv_matcher_enable(cache->mask.buf);
10113         dv_attr.priority = ref->priority;
10114         if (tbl->is_egress)
10115                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10116         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10117                                                &cache->matcher_object);
10118         if (ret) {
10119                 mlx5_free(cache);
10120                 rte_flow_error_set(ctx->error, ENOMEM,
10121                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10122                                    "cannot create matcher");
10123                 return NULL;
10124         }
10125         return &cache->entry;
10126 }
10127
10128 /**
10129  * Register the flow matcher.
10130  *
10131  * @param[in, out] dev
10132  *   Pointer to rte_eth_dev structure.
10133  * @param[in, out] matcher
10134  *   Pointer to flow matcher.
10135  * @param[in, out] key
10136  *   Pointer to flow table key.
10137  * @parm[in, out] dev_flow
10138  *   Pointer to the dev_flow.
10139  * @param[out] error
10140  *   pointer to error structure.
10141  *
10142  * @return
10143  *   0 on success otherwise -errno and errno is set.
10144  */
10145 static int
10146 flow_dv_matcher_register(struct rte_eth_dev *dev,
10147                          struct mlx5_flow_dv_matcher *ref,
10148                          union mlx5_flow_tbl_key *key,
10149                          struct mlx5_flow *dev_flow,
10150                          const struct mlx5_flow_tunnel *tunnel,
10151                          uint32_t group_id,
10152                          struct rte_flow_error *error)
10153 {
10154         struct mlx5_cache_entry *entry;
10155         struct mlx5_flow_dv_matcher *cache;
10156         struct mlx5_flow_tbl_resource *tbl;
10157         struct mlx5_flow_tbl_data_entry *tbl_data;
10158         struct mlx5_flow_cb_ctx ctx = {
10159                 .error = error,
10160                 .data = ref,
10161         };
10162
10163         /**
10164          * tunnel offload API requires this registration for cases when
10165          * tunnel match rule was inserted before tunnel set rule.
10166          */
10167         tbl = flow_dv_tbl_resource_get(dev, key->level,
10168                                        key->is_egress, key->is_fdb,
10169                                        dev_flow->external, tunnel,
10170                                        group_id, 0, key->id, error);
10171         if (!tbl)
10172                 return -rte_errno;      /* No need to refill the error info */
10173         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10174         ref->tbl = tbl;
10175         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
10176         if (!entry) {
10177                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10178                 return rte_flow_error_set(error, ENOMEM,
10179                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10180                                           "cannot allocate ref memory");
10181         }
10182         cache = container_of(entry, typeof(*cache), entry);
10183         dev_flow->handle->dvh.matcher = cache;
10184         return 0;
10185 }
10186
10187 struct mlx5_hlist_entry *
10188 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10189 {
10190         struct mlx5_dev_ctx_shared *sh = list->ctx;
10191         struct rte_flow_error *error = ctx;
10192         struct mlx5_flow_dv_tag_resource *entry;
10193         uint32_t idx = 0;
10194         int ret;
10195
10196         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10197         if (!entry) {
10198                 rte_flow_error_set(error, ENOMEM,
10199                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10200                                    "cannot allocate resource memory");
10201                 return NULL;
10202         }
10203         entry->idx = idx;
10204         entry->tag_id = key;
10205         ret = mlx5_flow_os_create_flow_action_tag(key,
10206                                                   &entry->action);
10207         if (ret) {
10208                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10209                 rte_flow_error_set(error, ENOMEM,
10210                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10211                                    NULL, "cannot create action");
10212                 return NULL;
10213         }
10214         return &entry->entry;
10215 }
10216
10217 int
10218 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10219                      struct mlx5_hlist_entry *entry, uint64_t key,
10220                      void *cb_ctx __rte_unused)
10221 {
10222         struct mlx5_flow_dv_tag_resource *tag =
10223                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10224
10225         return key != tag->tag_id;
10226 }
10227
10228 /**
10229  * Find existing tag resource or create and register a new one.
10230  *
10231  * @param dev[in, out]
10232  *   Pointer to rte_eth_dev structure.
10233  * @param[in, out] tag_be24
10234  *   Tag value in big endian then R-shift 8.
10235  * @parm[in, out] dev_flow
10236  *   Pointer to the dev_flow.
10237  * @param[out] error
10238  *   pointer to error structure.
10239  *
10240  * @return
10241  *   0 on success otherwise -errno and errno is set.
10242  */
10243 static int
10244 flow_dv_tag_resource_register
10245                         (struct rte_eth_dev *dev,
10246                          uint32_t tag_be24,
10247                          struct mlx5_flow *dev_flow,
10248                          struct rte_flow_error *error)
10249 {
10250         struct mlx5_priv *priv = dev->data->dev_private;
10251         struct mlx5_flow_dv_tag_resource *cache_resource;
10252         struct mlx5_hlist_entry *entry;
10253
10254         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10255         if (entry) {
10256                 cache_resource = container_of
10257                         (entry, struct mlx5_flow_dv_tag_resource, entry);
10258                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
10259                 dev_flow->dv.tag_resource = cache_resource;
10260                 return 0;
10261         }
10262         return -rte_errno;
10263 }
10264
10265 void
10266 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10267                       struct mlx5_hlist_entry *entry)
10268 {
10269         struct mlx5_dev_ctx_shared *sh = list->ctx;
10270         struct mlx5_flow_dv_tag_resource *tag =
10271                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10272
10273         MLX5_ASSERT(tag && sh && tag->action);
10274         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10275         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10276         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10277 }
10278
10279 /**
10280  * Release the tag.
10281  *
10282  * @param dev
10283  *   Pointer to Ethernet device.
10284  * @param tag_idx
10285  *   Tag index.
10286  *
10287  * @return
10288  *   1 while a reference on it exists, 0 when freed.
10289  */
10290 static int
10291 flow_dv_tag_release(struct rte_eth_dev *dev,
10292                     uint32_t tag_idx)
10293 {
10294         struct mlx5_priv *priv = dev->data->dev_private;
10295         struct mlx5_flow_dv_tag_resource *tag;
10296
10297         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10298         if (!tag)
10299                 return 0;
10300         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10301                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10302         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10303 }
10304
10305 /**
10306  * Translate port ID action to vport.
10307  *
10308  * @param[in] dev
10309  *   Pointer to rte_eth_dev structure.
10310  * @param[in] action
10311  *   Pointer to the port ID action.
10312  * @param[out] dst_port_id
10313  *   The target port ID.
10314  * @param[out] error
10315  *   Pointer to the error structure.
10316  *
10317  * @return
10318  *   0 on success, a negative errno value otherwise and rte_errno is set.
10319  */
10320 static int
10321 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10322                                  const struct rte_flow_action *action,
10323                                  uint32_t *dst_port_id,
10324                                  struct rte_flow_error *error)
10325 {
10326         uint32_t port;
10327         struct mlx5_priv *priv;
10328         const struct rte_flow_action_port_id *conf =
10329                         (const struct rte_flow_action_port_id *)action->conf;
10330
10331         port = conf->original ? dev->data->port_id : conf->id;
10332         priv = mlx5_port_to_eswitch_info(port, false);
10333         if (!priv)
10334                 return rte_flow_error_set(error, -rte_errno,
10335                                           RTE_FLOW_ERROR_TYPE_ACTION,
10336                                           NULL,
10337                                           "No eswitch info was found for port");
10338 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
10339         /*
10340          * This parameter is transferred to
10341          * mlx5dv_dr_action_create_dest_ib_port().
10342          */
10343         *dst_port_id = priv->dev_port;
10344 #else
10345         /*
10346          * Legacy mode, no LAG configurations is supported.
10347          * This parameter is transferred to
10348          * mlx5dv_dr_action_create_dest_vport().
10349          */
10350         *dst_port_id = priv->vport_id;
10351 #endif
10352         return 0;
10353 }
10354
10355 /**
10356  * Create a counter with aging configuration.
10357  *
10358  * @param[in] dev
10359  *   Pointer to rte_eth_dev structure.
10360  * @param[in] dev_flow
10361  *   Pointer to the mlx5_flow.
10362  * @param[out] count
10363  *   Pointer to the counter action configuration.
10364  * @param[in] age
10365  *   Pointer to the aging action configuration.
10366  *
10367  * @return
10368  *   Index to flow counter on success, 0 otherwise.
10369  */
10370 static uint32_t
10371 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10372                                 struct mlx5_flow *dev_flow,
10373                                 const struct rte_flow_action_count *count,
10374                                 const struct rte_flow_action_age *age)
10375 {
10376         uint32_t counter;
10377         struct mlx5_age_param *age_param;
10378
10379         if (count && count->shared)
10380                 counter = flow_dv_counter_get_shared(dev, count->id);
10381         else
10382                 counter = flow_dv_counter_alloc(dev, !!age);
10383         if (!counter || age == NULL)
10384                 return counter;
10385         age_param = flow_dv_counter_idx_get_age(dev, counter);
10386         age_param->context = age->context ? age->context :
10387                 (void *)(uintptr_t)(dev_flow->flow_idx);
10388         age_param->timeout = age->timeout;
10389         age_param->port_id = dev->data->port_id;
10390         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10391         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10392         return counter;
10393 }
10394
10395 /**
10396  * Add Tx queue matcher
10397  *
10398  * @param[in] dev
10399  *   Pointer to the dev struct.
10400  * @param[in, out] matcher
10401  *   Flow matcher.
10402  * @param[in, out] key
10403  *   Flow matcher value.
10404  * @param[in] item
10405  *   Flow pattern to translate.
10406  * @param[in] inner
10407  *   Item is inner pattern.
10408  */
10409 static void
10410 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10411                                 void *matcher, void *key,
10412                                 const struct rte_flow_item *item)
10413 {
10414         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10415         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10416         void *misc_m =
10417                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10418         void *misc_v =
10419                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10420         struct mlx5_txq_ctrl *txq;
10421         uint32_t queue;
10422
10423
10424         queue_m = (const void *)item->mask;
10425         if (!queue_m)
10426                 return;
10427         queue_v = (const void *)item->spec;
10428         if (!queue_v)
10429                 return;
10430         txq = mlx5_txq_get(dev, queue_v->queue);
10431         if (!txq)
10432                 return;
10433         queue = txq->obj->sq->id;
10434         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10435         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10436                  queue & queue_m->queue);
10437         mlx5_txq_release(dev, queue_v->queue);
10438 }
10439
10440 /**
10441  * Set the hash fields according to the @p flow information.
10442  *
10443  * @param[in] dev_flow
10444  *   Pointer to the mlx5_flow.
10445  * @param[in] rss_desc
10446  *   Pointer to the mlx5_flow_rss_desc.
10447  */
10448 static void
10449 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10450                        struct mlx5_flow_rss_desc *rss_desc)
10451 {
10452         uint64_t items = dev_flow->handle->layers;
10453         int rss_inner = 0;
10454         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10455
10456         dev_flow->hash_fields = 0;
10457 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10458         if (rss_desc->level >= 2) {
10459                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10460                 rss_inner = 1;
10461         }
10462 #endif
10463         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10464             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10465                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10466                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10467                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10468                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10469                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10470                         else
10471                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10472                 }
10473         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10474                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10475                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10476                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10477                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10478                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10479                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10480                         else
10481                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10482                 }
10483         }
10484         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10485             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10486                 if (rss_types & ETH_RSS_UDP) {
10487                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10488                                 dev_flow->hash_fields |=
10489                                                 IBV_RX_HASH_SRC_PORT_UDP;
10490                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10491                                 dev_flow->hash_fields |=
10492                                                 IBV_RX_HASH_DST_PORT_UDP;
10493                         else
10494                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10495                 }
10496         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10497                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10498                 if (rss_types & ETH_RSS_TCP) {
10499                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10500                                 dev_flow->hash_fields |=
10501                                                 IBV_RX_HASH_SRC_PORT_TCP;
10502                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10503                                 dev_flow->hash_fields |=
10504                                                 IBV_RX_HASH_DST_PORT_TCP;
10505                         else
10506                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10507                 }
10508         }
10509 }
10510
10511 /**
10512  * Prepare an Rx Hash queue.
10513  *
10514  * @param dev
10515  *   Pointer to Ethernet device.
10516  * @param[in] dev_flow
10517  *   Pointer to the mlx5_flow.
10518  * @param[in] rss_desc
10519  *   Pointer to the mlx5_flow_rss_desc.
10520  * @param[out] hrxq_idx
10521  *   Hash Rx queue index.
10522  *
10523  * @return
10524  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10525  */
10526 static struct mlx5_hrxq *
10527 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10528                      struct mlx5_flow *dev_flow,
10529                      struct mlx5_flow_rss_desc *rss_desc,
10530                      uint32_t *hrxq_idx)
10531 {
10532         struct mlx5_priv *priv = dev->data->dev_private;
10533         struct mlx5_flow_handle *dh = dev_flow->handle;
10534         struct mlx5_hrxq *hrxq;
10535
10536         MLX5_ASSERT(rss_desc->queue_num);
10537         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10538         rss_desc->hash_fields = dev_flow->hash_fields;
10539         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10540         rss_desc->shared_rss = 0;
10541         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10542         if (!*hrxq_idx)
10543                 return NULL;
10544         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10545                               *hrxq_idx);
10546         return hrxq;
10547 }
10548
10549 /**
10550  * Release sample sub action resource.
10551  *
10552  * @param[in, out] dev
10553  *   Pointer to rte_eth_dev structure.
10554  * @param[in] act_res
10555  *   Pointer to sample sub action resource.
10556  */
10557 static void
10558 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10559                                    struct mlx5_flow_sub_actions_idx *act_res)
10560 {
10561         if (act_res->rix_hrxq) {
10562                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10563                 act_res->rix_hrxq = 0;
10564         }
10565         if (act_res->rix_encap_decap) {
10566                 flow_dv_encap_decap_resource_release(dev,
10567                                                      act_res->rix_encap_decap);
10568                 act_res->rix_encap_decap = 0;
10569         }
10570         if (act_res->rix_port_id_action) {
10571                 flow_dv_port_id_action_resource_release(dev,
10572                                                 act_res->rix_port_id_action);
10573                 act_res->rix_port_id_action = 0;
10574         }
10575         if (act_res->rix_tag) {
10576                 flow_dv_tag_release(dev, act_res->rix_tag);
10577                 act_res->rix_tag = 0;
10578         }
10579         if (act_res->rix_jump) {
10580                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10581                 act_res->rix_jump = 0;
10582         }
10583 }
10584
10585 int
10586 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10587                         struct mlx5_cache_entry *entry, void *cb_ctx)
10588 {
10589         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10590         struct rte_eth_dev *dev = ctx->dev;
10591         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10592         struct mlx5_flow_dv_sample_resource *cache_resource =
10593                         container_of(entry, typeof(*cache_resource), entry);
10594
10595         if (resource->ratio == cache_resource->ratio &&
10596             resource->ft_type == cache_resource->ft_type &&
10597             resource->ft_id == cache_resource->ft_id &&
10598             resource->set_action == cache_resource->set_action &&
10599             !memcmp((void *)&resource->sample_act,
10600                     (void *)&cache_resource->sample_act,
10601                     sizeof(struct mlx5_flow_sub_actions_list))) {
10602                 /*
10603                  * Existing sample action should release the prepared
10604                  * sub-actions reference counter.
10605                  */
10606                 flow_dv_sample_sub_actions_release(dev,
10607                                                 &resource->sample_idx);
10608                 return 0;
10609         }
10610         return 1;
10611 }
10612
10613 struct mlx5_cache_entry *
10614 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10615                          struct mlx5_cache_entry *entry __rte_unused,
10616                          void *cb_ctx)
10617 {
10618         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10619         struct rte_eth_dev *dev = ctx->dev;
10620         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10621         void **sample_dv_actions = resource->sub_actions;
10622         struct mlx5_flow_dv_sample_resource *cache_resource;
10623         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10624         struct mlx5_priv *priv = dev->data->dev_private;
10625         struct mlx5_dev_ctx_shared *sh = priv->sh;
10626         struct mlx5_flow_tbl_resource *tbl;
10627         uint32_t idx = 0;
10628         const uint32_t next_ft_step = 1;
10629         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10630         uint8_t is_egress = 0;
10631         uint8_t is_transfer = 0;
10632         struct rte_flow_error *error = ctx->error;
10633
10634         /* Register new sample resource. */
10635         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10636         if (!cache_resource) {
10637                 rte_flow_error_set(error, ENOMEM,
10638                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10639                                           NULL,
10640                                           "cannot allocate resource memory");
10641                 return NULL;
10642         }
10643         *cache_resource = *resource;
10644         /* Create normal path table level */
10645         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10646                 is_transfer = 1;
10647         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10648                 is_egress = 1;
10649         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10650                                         is_egress, is_transfer,
10651                                         true, NULL, 0, 0, 0, error);
10652         if (!tbl) {
10653                 rte_flow_error_set(error, ENOMEM,
10654                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10655                                           NULL,
10656                                           "fail to create normal path table "
10657                                           "for sample");
10658                 goto error;
10659         }
10660         cache_resource->normal_path_tbl = tbl;
10661         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10662                 if (!sh->default_miss_action) {
10663                         rte_flow_error_set(error, ENOMEM,
10664                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10665                                                 NULL,
10666                                                 "default miss action was not "
10667                                                 "created");
10668                         goto error;
10669                 }
10670                 sample_dv_actions[resource->sample_act.actions_num++] =
10671                                                 sh->default_miss_action;
10672         }
10673         /* Create a DR sample action */
10674         sampler_attr.sample_ratio = cache_resource->ratio;
10675         sampler_attr.default_next_table = tbl->obj;
10676         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10677         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10678                                                         &sample_dv_actions[0];
10679         sampler_attr.action = cache_resource->set_action;
10680         if (mlx5_os_flow_dr_create_flow_action_sampler
10681                         (&sampler_attr, &cache_resource->verbs_action)) {
10682                 rte_flow_error_set(error, ENOMEM,
10683                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10684                                         NULL, "cannot create sample action");
10685                 goto error;
10686         }
10687         cache_resource->idx = idx;
10688         cache_resource->dev = dev;
10689         return &cache_resource->entry;
10690 error:
10691         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10692                 flow_dv_sample_sub_actions_release(dev,
10693                                                    &cache_resource->sample_idx);
10694         if (cache_resource->normal_path_tbl)
10695                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10696                                 cache_resource->normal_path_tbl);
10697         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10698         return NULL;
10699
10700 }
10701
10702 /**
10703  * Find existing sample resource or create and register a new one.
10704  *
10705  * @param[in, out] dev
10706  *   Pointer to rte_eth_dev structure.
10707  * @param[in] resource
10708  *   Pointer to sample resource.
10709  * @parm[in, out] dev_flow
10710  *   Pointer to the dev_flow.
10711  * @param[out] error
10712  *   pointer to error structure.
10713  *
10714  * @return
10715  *   0 on success otherwise -errno and errno is set.
10716  */
10717 static int
10718 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10719                          struct mlx5_flow_dv_sample_resource *resource,
10720                          struct mlx5_flow *dev_flow,
10721                          struct rte_flow_error *error)
10722 {
10723         struct mlx5_flow_dv_sample_resource *cache_resource;
10724         struct mlx5_cache_entry *entry;
10725         struct mlx5_priv *priv = dev->data->dev_private;
10726         struct mlx5_flow_cb_ctx ctx = {
10727                 .dev = dev,
10728                 .error = error,
10729                 .data = resource,
10730         };
10731
10732         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10733         if (!entry)
10734                 return -rte_errno;
10735         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10736         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10737         dev_flow->dv.sample_res = cache_resource;
10738         return 0;
10739 }
10740
10741 int
10742 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10743                             struct mlx5_cache_entry *entry, void *cb_ctx)
10744 {
10745         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10746         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10747         struct rte_eth_dev *dev = ctx->dev;
10748         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10749                         container_of(entry, typeof(*cache_resource), entry);
10750         uint32_t idx = 0;
10751
10752         if (resource->num_of_dest == cache_resource->num_of_dest &&
10753             resource->ft_type == cache_resource->ft_type &&
10754             !memcmp((void *)cache_resource->sample_act,
10755                     (void *)resource->sample_act,
10756                    (resource->num_of_dest *
10757                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10758                 /*
10759                  * Existing sample action should release the prepared
10760                  * sub-actions reference counter.
10761                  */
10762                 for (idx = 0; idx < resource->num_of_dest; idx++)
10763                         flow_dv_sample_sub_actions_release(dev,
10764                                         &resource->sample_idx[idx]);
10765                 return 0;
10766         }
10767         return 1;
10768 }
10769
10770 struct mlx5_cache_entry *
10771 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10772                          struct mlx5_cache_entry *entry __rte_unused,
10773                          void *cb_ctx)
10774 {
10775         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10776         struct rte_eth_dev *dev = ctx->dev;
10777         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10778         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10779         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10780         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10781         struct mlx5_priv *priv = dev->data->dev_private;
10782         struct mlx5_dev_ctx_shared *sh = priv->sh;
10783         struct mlx5_flow_sub_actions_list *sample_act;
10784         struct mlx5dv_dr_domain *domain;
10785         uint32_t idx = 0, res_idx = 0;
10786         struct rte_flow_error *error = ctx->error;
10787         uint64_t action_flags;
10788         int ret;
10789
10790         /* Register new destination array resource. */
10791         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10792                                             &res_idx);
10793         if (!cache_resource) {
10794                 rte_flow_error_set(error, ENOMEM,
10795                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10796                                           NULL,
10797                                           "cannot allocate resource memory");
10798                 return NULL;
10799         }
10800         *cache_resource = *resource;
10801         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10802                 domain = sh->fdb_domain;
10803         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10804                 domain = sh->rx_domain;
10805         else
10806                 domain = sh->tx_domain;
10807         for (idx = 0; idx < resource->num_of_dest; idx++) {
10808                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10809                                  mlx5_malloc(MLX5_MEM_ZERO,
10810                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10811                                  0, SOCKET_ID_ANY);
10812                 if (!dest_attr[idx]) {
10813                         rte_flow_error_set(error, ENOMEM,
10814                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10815                                            NULL,
10816                                            "cannot allocate resource memory");
10817                         goto error;
10818                 }
10819                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10820                 sample_act = &resource->sample_act[idx];
10821                 action_flags = sample_act->action_flags;
10822                 switch (action_flags) {
10823                 case MLX5_FLOW_ACTION_QUEUE:
10824                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10825                         break;
10826                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10827                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10828                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10829                         dest_attr[idx]->dest_reformat->reformat =
10830                                         sample_act->dr_encap_action;
10831                         dest_attr[idx]->dest_reformat->dest =
10832                                         sample_act->dr_port_id_action;
10833                         break;
10834                 case MLX5_FLOW_ACTION_PORT_ID:
10835                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10836                         break;
10837                 case MLX5_FLOW_ACTION_JUMP:
10838                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10839                         break;
10840                 default:
10841                         rte_flow_error_set(error, EINVAL,
10842                                            RTE_FLOW_ERROR_TYPE_ACTION,
10843                                            NULL,
10844                                            "unsupported actions type");
10845                         goto error;
10846                 }
10847         }
10848         /* create a dest array actioin */
10849         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10850                                                 (domain,
10851                                                  cache_resource->num_of_dest,
10852                                                  dest_attr,
10853                                                  &cache_resource->action);
10854         if (ret) {
10855                 rte_flow_error_set(error, ENOMEM,
10856                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10857                                    NULL,
10858                                    "cannot create destination array action");
10859                 goto error;
10860         }
10861         cache_resource->idx = res_idx;
10862         cache_resource->dev = dev;
10863         for (idx = 0; idx < resource->num_of_dest; idx++)
10864                 mlx5_free(dest_attr[idx]);
10865         return &cache_resource->entry;
10866 error:
10867         for (idx = 0; idx < resource->num_of_dest; idx++) {
10868                 flow_dv_sample_sub_actions_release(dev,
10869                                 &cache_resource->sample_idx[idx]);
10870                 if (dest_attr[idx])
10871                         mlx5_free(dest_attr[idx]);
10872         }
10873
10874         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10875         return NULL;
10876 }
10877
10878 /**
10879  * Find existing destination array resource or create and register a new one.
10880  *
10881  * @param[in, out] dev
10882  *   Pointer to rte_eth_dev structure.
10883  * @param[in] resource
10884  *   Pointer to destination array resource.
10885  * @parm[in, out] dev_flow
10886  *   Pointer to the dev_flow.
10887  * @param[out] error
10888  *   pointer to error structure.
10889  *
10890  * @return
10891  *   0 on success otherwise -errno and errno is set.
10892  */
10893 static int
10894 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10895                          struct mlx5_flow_dv_dest_array_resource *resource,
10896                          struct mlx5_flow *dev_flow,
10897                          struct rte_flow_error *error)
10898 {
10899         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10900         struct mlx5_priv *priv = dev->data->dev_private;
10901         struct mlx5_cache_entry *entry;
10902         struct mlx5_flow_cb_ctx ctx = {
10903                 .dev = dev,
10904                 .error = error,
10905                 .data = resource,
10906         };
10907
10908         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10909         if (!entry)
10910                 return -rte_errno;
10911         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10912         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10913         dev_flow->dv.dest_array_res = cache_resource;
10914         return 0;
10915 }
10916
10917 /**
10918  * Convert Sample action to DV specification.
10919  *
10920  * @param[in] dev
10921  *   Pointer to rte_eth_dev structure.
10922  * @param[in] action
10923  *   Pointer to sample action structure.
10924  * @param[in, out] dev_flow
10925  *   Pointer to the mlx5_flow.
10926  * @param[in] attr
10927  *   Pointer to the flow attributes.
10928  * @param[in, out] num_of_dest
10929  *   Pointer to the num of destination.
10930  * @param[in, out] sample_actions
10931  *   Pointer to sample actions list.
10932  * @param[in, out] res
10933  *   Pointer to sample resource.
10934  * @param[out] error
10935  *   Pointer to the error structure.
10936  *
10937  * @return
10938  *   0 on success, a negative errno value otherwise and rte_errno is set.
10939  */
10940 static int
10941 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10942                                 const struct rte_flow_action_sample *action,
10943                                 struct mlx5_flow *dev_flow,
10944                                 const struct rte_flow_attr *attr,
10945                                 uint32_t *num_of_dest,
10946                                 void **sample_actions,
10947                                 struct mlx5_flow_dv_sample_resource *res,
10948                                 struct rte_flow_error *error)
10949 {
10950         struct mlx5_priv *priv = dev->data->dev_private;
10951         const struct rte_flow_action *sub_actions;
10952         struct mlx5_flow_sub_actions_list *sample_act;
10953         struct mlx5_flow_sub_actions_idx *sample_idx;
10954         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10955         struct rte_flow *flow = dev_flow->flow;
10956         struct mlx5_flow_rss_desc *rss_desc;
10957         uint64_t action_flags = 0;
10958
10959         MLX5_ASSERT(wks);
10960         rss_desc = &wks->rss_desc;
10961         sample_act = &res->sample_act;
10962         sample_idx = &res->sample_idx;
10963         res->ratio = action->ratio;
10964         sub_actions = action->actions;
10965         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10966                 int type = sub_actions->type;
10967                 uint32_t pre_rix = 0;
10968                 void *pre_r;
10969                 switch (type) {
10970                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10971                 {
10972                         const struct rte_flow_action_queue *queue;
10973                         struct mlx5_hrxq *hrxq;
10974                         uint32_t hrxq_idx;
10975
10976                         queue = sub_actions->conf;
10977                         rss_desc->queue_num = 1;
10978                         rss_desc->queue[0] = queue->index;
10979                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10980                                                     rss_desc, &hrxq_idx);
10981                         if (!hrxq)
10982                                 return rte_flow_error_set
10983                                         (error, rte_errno,
10984                                          RTE_FLOW_ERROR_TYPE_ACTION,
10985                                          NULL,
10986                                          "cannot create fate queue");
10987                         sample_act->dr_queue_action = hrxq->action;
10988                         sample_idx->rix_hrxq = hrxq_idx;
10989                         sample_actions[sample_act->actions_num++] =
10990                                                 hrxq->action;
10991                         (*num_of_dest)++;
10992                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10993                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10994                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10995                         dev_flow->handle->fate_action =
10996                                         MLX5_FLOW_FATE_QUEUE;
10997                         break;
10998                 }
10999                 case RTE_FLOW_ACTION_TYPE_RSS:
11000                 {
11001                         struct mlx5_hrxq *hrxq;
11002                         uint32_t hrxq_idx;
11003                         const struct rte_flow_action_rss *rss;
11004                         const uint8_t *rss_key;
11005
11006                         rss = sub_actions->conf;
11007                         memcpy(rss_desc->queue, rss->queue,
11008                                rss->queue_num * sizeof(uint16_t));
11009                         rss_desc->queue_num = rss->queue_num;
11010                         /* NULL RSS key indicates default RSS key. */
11011                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11012                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11013                         /*
11014                          * rss->level and rss.types should be set in advance
11015                          * when expanding items for RSS.
11016                          */
11017                         flow_dv_hashfields_set(dev_flow, rss_desc);
11018                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11019                                                     rss_desc, &hrxq_idx);
11020                         if (!hrxq)
11021                                 return rte_flow_error_set
11022                                         (error, rte_errno,
11023                                          RTE_FLOW_ERROR_TYPE_ACTION,
11024                                          NULL,
11025                                          "cannot create fate queue");
11026                         sample_act->dr_queue_action = hrxq->action;
11027                         sample_idx->rix_hrxq = hrxq_idx;
11028                         sample_actions[sample_act->actions_num++] =
11029                                                 hrxq->action;
11030                         (*num_of_dest)++;
11031                         action_flags |= MLX5_FLOW_ACTION_RSS;
11032                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11033                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11034                         dev_flow->handle->fate_action =
11035                                         MLX5_FLOW_FATE_QUEUE;
11036                         break;
11037                 }
11038                 case RTE_FLOW_ACTION_TYPE_MARK:
11039                 {
11040                         uint32_t tag_be = mlx5_flow_mark_set
11041                                 (((const struct rte_flow_action_mark *)
11042                                 (sub_actions->conf))->id);
11043
11044                         dev_flow->handle->mark = 1;
11045                         pre_rix = dev_flow->handle->dvh.rix_tag;
11046                         /* Save the mark resource before sample */
11047                         pre_r = dev_flow->dv.tag_resource;
11048                         if (flow_dv_tag_resource_register(dev, tag_be,
11049                                                   dev_flow, error))
11050                                 return -rte_errno;
11051                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11052                         sample_act->dr_tag_action =
11053                                 dev_flow->dv.tag_resource->action;
11054                         sample_idx->rix_tag =
11055                                 dev_flow->handle->dvh.rix_tag;
11056                         sample_actions[sample_act->actions_num++] =
11057                                                 sample_act->dr_tag_action;
11058                         /* Recover the mark resource after sample */
11059                         dev_flow->dv.tag_resource = pre_r;
11060                         dev_flow->handle->dvh.rix_tag = pre_rix;
11061                         action_flags |= MLX5_FLOW_ACTION_MARK;
11062                         break;
11063                 }
11064                 case RTE_FLOW_ACTION_TYPE_COUNT:
11065                 {
11066                         if (!flow->counter) {
11067                                 flow->counter =
11068                                         flow_dv_translate_create_counter(dev,
11069                                                 dev_flow, sub_actions->conf,
11070                                                 0);
11071                                 if (!flow->counter)
11072                                         return rte_flow_error_set
11073                                                 (error, rte_errno,
11074                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11075                                                 NULL,
11076                                                 "cannot create counter"
11077                                                 " object.");
11078                         }
11079                         sample_act->dr_cnt_action =
11080                                   (flow_dv_counter_get_by_idx(dev,
11081                                   flow->counter, NULL))->action;
11082                         sample_actions[sample_act->actions_num++] =
11083                                                 sample_act->dr_cnt_action;
11084                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11085                         break;
11086                 }
11087                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11088                 {
11089                         struct mlx5_flow_dv_port_id_action_resource
11090                                         port_id_resource;
11091                         uint32_t port_id = 0;
11092
11093                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11094                         /* Save the port id resource before sample */
11095                         pre_rix = dev_flow->handle->rix_port_id_action;
11096                         pre_r = dev_flow->dv.port_id_action;
11097                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11098                                                              &port_id, error))
11099                                 return -rte_errno;
11100                         port_id_resource.port_id = port_id;
11101                         if (flow_dv_port_id_action_resource_register
11102                             (dev, &port_id_resource, dev_flow, error))
11103                                 return -rte_errno;
11104                         sample_act->dr_port_id_action =
11105                                 dev_flow->dv.port_id_action->action;
11106                         sample_idx->rix_port_id_action =
11107                                 dev_flow->handle->rix_port_id_action;
11108                         sample_actions[sample_act->actions_num++] =
11109                                                 sample_act->dr_port_id_action;
11110                         /* Recover the port id resource after sample */
11111                         dev_flow->dv.port_id_action = pre_r;
11112                         dev_flow->handle->rix_port_id_action = pre_rix;
11113                         (*num_of_dest)++;
11114                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11115                         break;
11116                 }
11117                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11118                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11119                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11120                         /* Save the encap resource before sample */
11121                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11122                         pre_r = dev_flow->dv.encap_decap;
11123                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11124                                                            dev_flow,
11125                                                            attr->transfer,
11126                                                            error))
11127                                 return -rte_errno;
11128                         sample_act->dr_encap_action =
11129                                 dev_flow->dv.encap_decap->action;
11130                         sample_idx->rix_encap_decap =
11131                                 dev_flow->handle->dvh.rix_encap_decap;
11132                         sample_actions[sample_act->actions_num++] =
11133                                                 sample_act->dr_encap_action;
11134                         /* Recover the encap resource after sample */
11135                         dev_flow->dv.encap_decap = pre_r;
11136                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11137                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11138                         break;
11139                 default:
11140                         return rte_flow_error_set(error, EINVAL,
11141                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11142                                 NULL,
11143                                 "Not support for sampler action");
11144                 }
11145         }
11146         sample_act->action_flags = action_flags;
11147         res->ft_id = dev_flow->dv.group;
11148         if (attr->transfer) {
11149                 union {
11150                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11151                         uint64_t set_action;
11152                 } action_ctx = { .set_action = 0 };
11153
11154                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11155                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11156                          MLX5_MODIFICATION_TYPE_SET);
11157                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11158                          MLX5_MODI_META_REG_C_0);
11159                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11160                          priv->vport_meta_tag);
11161                 res->set_action = action_ctx.set_action;
11162         } else if (attr->ingress) {
11163                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11164         } else {
11165                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11166         }
11167         return 0;
11168 }
11169
11170 /**
11171  * Convert Sample action to DV specification.
11172  *
11173  * @param[in] dev
11174  *   Pointer to rte_eth_dev structure.
11175  * @param[in, out] dev_flow
11176  *   Pointer to the mlx5_flow.
11177  * @param[in] num_of_dest
11178  *   The num of destination.
11179  * @param[in, out] res
11180  *   Pointer to sample resource.
11181  * @param[in, out] mdest_res
11182  *   Pointer to destination array resource.
11183  * @param[in] sample_actions
11184  *   Pointer to sample path actions list.
11185  * @param[in] action_flags
11186  *   Holds the actions detected until now.
11187  * @param[out] error
11188  *   Pointer to the error structure.
11189  *
11190  * @return
11191  *   0 on success, a negative errno value otherwise and rte_errno is set.
11192  */
11193 static int
11194 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11195                              struct mlx5_flow *dev_flow,
11196                              uint32_t num_of_dest,
11197                              struct mlx5_flow_dv_sample_resource *res,
11198                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11199                              void **sample_actions,
11200                              uint64_t action_flags,
11201                              struct rte_flow_error *error)
11202 {
11203         /* update normal path action resource into last index of array */
11204         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11205         struct mlx5_flow_sub_actions_list *sample_act =
11206                                         &mdest_res->sample_act[dest_index];
11207         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11208         struct mlx5_flow_rss_desc *rss_desc;
11209         uint32_t normal_idx = 0;
11210         struct mlx5_hrxq *hrxq;
11211         uint32_t hrxq_idx;
11212
11213         MLX5_ASSERT(wks);
11214         rss_desc = &wks->rss_desc;
11215         if (num_of_dest > 1) {
11216                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11217                         /* Handle QP action for mirroring */
11218                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11219                                                     rss_desc, &hrxq_idx);
11220                         if (!hrxq)
11221                                 return rte_flow_error_set
11222                                      (error, rte_errno,
11223                                       RTE_FLOW_ERROR_TYPE_ACTION,
11224                                       NULL,
11225                                       "cannot create rx queue");
11226                         normal_idx++;
11227                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11228                         sample_act->dr_queue_action = hrxq->action;
11229                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11230                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11231                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11232                 }
11233                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11234                         normal_idx++;
11235                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11236                                 dev_flow->handle->dvh.rix_encap_decap;
11237                         sample_act->dr_encap_action =
11238                                 dev_flow->dv.encap_decap->action;
11239                         dev_flow->handle->dvh.rix_encap_decap = 0;
11240                 }
11241                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11242                         normal_idx++;
11243                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11244                                 dev_flow->handle->rix_port_id_action;
11245                         sample_act->dr_port_id_action =
11246                                 dev_flow->dv.port_id_action->action;
11247                         dev_flow->handle->rix_port_id_action = 0;
11248                 }
11249                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11250                         normal_idx++;
11251                         mdest_res->sample_idx[dest_index].rix_jump =
11252                                 dev_flow->handle->rix_jump;
11253                         sample_act->dr_jump_action =
11254                                 dev_flow->dv.jump->action;
11255                         dev_flow->handle->rix_jump = 0;
11256                 }
11257                 sample_act->actions_num = normal_idx;
11258                 /* update sample action resource into first index of array */
11259                 mdest_res->ft_type = res->ft_type;
11260                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11261                                 sizeof(struct mlx5_flow_sub_actions_idx));
11262                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11263                                 sizeof(struct mlx5_flow_sub_actions_list));
11264                 mdest_res->num_of_dest = num_of_dest;
11265                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11266                                                          dev_flow, error))
11267                         return rte_flow_error_set(error, EINVAL,
11268                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11269                                                   NULL, "can't create sample "
11270                                                   "action");
11271         } else {
11272                 res->sub_actions = sample_actions;
11273                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11274                         return rte_flow_error_set(error, EINVAL,
11275                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11276                                                   NULL,
11277                                                   "can't create sample action");
11278         }
11279         return 0;
11280 }
11281
11282 /**
11283  * Remove an ASO age action from age actions list.
11284  *
11285  * @param[in] dev
11286  *   Pointer to the Ethernet device structure.
11287  * @param[in] age
11288  *   Pointer to the aso age action handler.
11289  */
11290 static void
11291 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11292                                 struct mlx5_aso_age_action *age)
11293 {
11294         struct mlx5_age_info *age_info;
11295         struct mlx5_age_param *age_param = &age->age_params;
11296         struct mlx5_priv *priv = dev->data->dev_private;
11297         uint16_t expected = AGE_CANDIDATE;
11298
11299         age_info = GET_PORT_AGE_INFO(priv);
11300         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11301                                          AGE_FREE, false, __ATOMIC_RELAXED,
11302                                          __ATOMIC_RELAXED)) {
11303                 /**
11304                  * We need the lock even it is age timeout,
11305                  * since age action may still in process.
11306                  */
11307                 rte_spinlock_lock(&age_info->aged_sl);
11308                 LIST_REMOVE(age, next);
11309                 rte_spinlock_unlock(&age_info->aged_sl);
11310                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11311         }
11312 }
11313
11314 /**
11315  * Release an ASO age action.
11316  *
11317  * @param[in] dev
11318  *   Pointer to the Ethernet device structure.
11319  * @param[in] age_idx
11320  *   Index of ASO age action to release.
11321  * @param[in] flow
11322  *   True if the release operation is during flow destroy operation.
11323  *   False if the release operation is during action destroy operation.
11324  *
11325  * @return
11326  *   0 when age action was removed, otherwise the number of references.
11327  */
11328 static int
11329 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11330 {
11331         struct mlx5_priv *priv = dev->data->dev_private;
11332         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11333         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11334         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11335
11336         if (!ret) {
11337                 flow_dv_aso_age_remove_from_age(dev, age);
11338                 rte_spinlock_lock(&mng->free_sl);
11339                 LIST_INSERT_HEAD(&mng->free, age, next);
11340                 rte_spinlock_unlock(&mng->free_sl);
11341         }
11342         return ret;
11343 }
11344
11345 /**
11346  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11347  *
11348  * @param[in] dev
11349  *   Pointer to the Ethernet device structure.
11350  *
11351  * @return
11352  *   0 on success, otherwise negative errno value and rte_errno is set.
11353  */
11354 static int
11355 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11356 {
11357         struct mlx5_priv *priv = dev->data->dev_private;
11358         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11359         void *old_pools = mng->pools;
11360         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11361         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11362         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11363
11364         if (!pools) {
11365                 rte_errno = ENOMEM;
11366                 return -ENOMEM;
11367         }
11368         if (old_pools) {
11369                 memcpy(pools, old_pools,
11370                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11371                 mlx5_free(old_pools);
11372         } else {
11373                 /* First ASO flow hit allocation - starting ASO data-path. */
11374                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11375
11376                 if (ret) {
11377                         mlx5_free(pools);
11378                         return ret;
11379                 }
11380         }
11381         mng->n = resize;
11382         mng->pools = pools;
11383         return 0;
11384 }
11385
11386 /**
11387  * Create and initialize a new ASO aging pool.
11388  *
11389  * @param[in] dev
11390  *   Pointer to the Ethernet device structure.
11391  * @param[out] age_free
11392  *   Where to put the pointer of a new age action.
11393  *
11394  * @return
11395  *   The age actions pool pointer and @p age_free is set on success,
11396  *   NULL otherwise and rte_errno is set.
11397  */
11398 static struct mlx5_aso_age_pool *
11399 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11400                         struct mlx5_aso_age_action **age_free)
11401 {
11402         struct mlx5_priv *priv = dev->data->dev_private;
11403         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11404         struct mlx5_aso_age_pool *pool = NULL;
11405         struct mlx5_devx_obj *obj = NULL;
11406         uint32_t i;
11407
11408         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11409                                                     priv->sh->pdn);
11410         if (!obj) {
11411                 rte_errno = ENODATA;
11412                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11413                 return NULL;
11414         }
11415         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11416         if (!pool) {
11417                 claim_zero(mlx5_devx_cmd_destroy(obj));
11418                 rte_errno = ENOMEM;
11419                 return NULL;
11420         }
11421         pool->flow_hit_aso_obj = obj;
11422         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11423         rte_spinlock_lock(&mng->resize_sl);
11424         pool->index = mng->next;
11425         /* Resize pools array if there is no room for the new pool in it. */
11426         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11427                 claim_zero(mlx5_devx_cmd_destroy(obj));
11428                 mlx5_free(pool);
11429                 rte_spinlock_unlock(&mng->resize_sl);
11430                 return NULL;
11431         }
11432         mng->pools[pool->index] = pool;
11433         mng->next++;
11434         rte_spinlock_unlock(&mng->resize_sl);
11435         /* Assign the first action in the new pool, the rest go to free list. */
11436         *age_free = &pool->actions[0];
11437         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11438                 pool->actions[i].offset = i;
11439                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11440         }
11441         return pool;
11442 }
11443
11444 /**
11445  * Allocate a ASO aging bit.
11446  *
11447  * @param[in] dev
11448  *   Pointer to the Ethernet device structure.
11449  * @param[out] error
11450  *   Pointer to the error structure.
11451  *
11452  * @return
11453  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11454  */
11455 static uint32_t
11456 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11457 {
11458         struct mlx5_priv *priv = dev->data->dev_private;
11459         const struct mlx5_aso_age_pool *pool;
11460         struct mlx5_aso_age_action *age_free = NULL;
11461         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11462
11463         MLX5_ASSERT(mng);
11464         /* Try to get the next free age action bit. */
11465         rte_spinlock_lock(&mng->free_sl);
11466         age_free = LIST_FIRST(&mng->free);
11467         if (age_free) {
11468                 LIST_REMOVE(age_free, next);
11469         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11470                 rte_spinlock_unlock(&mng->free_sl);
11471                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11472                                    NULL, "failed to create ASO age pool");
11473                 return 0; /* 0 is an error. */
11474         }
11475         rte_spinlock_unlock(&mng->free_sl);
11476         pool = container_of
11477           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11478                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11479                                                                        actions);
11480         if (!age_free->dr_action) {
11481                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11482                                                  error);
11483
11484                 if (reg_c < 0) {
11485                         rte_flow_error_set(error, rte_errno,
11486                                            RTE_FLOW_ERROR_TYPE_ACTION,
11487                                            NULL, "failed to get reg_c "
11488                                            "for ASO flow hit");
11489                         return 0; /* 0 is an error. */
11490                 }
11491 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11492                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11493                                 (priv->sh->rx_domain,
11494                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11495                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11496                                  (reg_c - REG_C_0));
11497 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11498                 if (!age_free->dr_action) {
11499                         rte_errno = errno;
11500                         rte_spinlock_lock(&mng->free_sl);
11501                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11502                         rte_spinlock_unlock(&mng->free_sl);
11503                         rte_flow_error_set(error, rte_errno,
11504                                            RTE_FLOW_ERROR_TYPE_ACTION,
11505                                            NULL, "failed to create ASO "
11506                                            "flow hit action");
11507                         return 0; /* 0 is an error. */
11508                 }
11509         }
11510         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11511         return pool->index | ((age_free->offset + 1) << 16);
11512 }
11513
11514 /**
11515  * Initialize flow ASO age parameters.
11516  *
11517  * @param[in] dev
11518  *   Pointer to rte_eth_dev structure.
11519  * @param[in] age_idx
11520  *   Index of ASO age action.
11521  * @param[in] context
11522  *   Pointer to flow counter age context.
11523  * @param[in] timeout
11524  *   Aging timeout in seconds.
11525  *
11526  */
11527 static void
11528 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11529                             uint32_t age_idx,
11530                             void *context,
11531                             uint32_t timeout)
11532 {
11533         struct mlx5_aso_age_action *aso_age;
11534
11535         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11536         MLX5_ASSERT(aso_age);
11537         aso_age->age_params.context = context;
11538         aso_age->age_params.timeout = timeout;
11539         aso_age->age_params.port_id = dev->data->port_id;
11540         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11541                          __ATOMIC_RELAXED);
11542         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11543                          __ATOMIC_RELAXED);
11544 }
11545
11546 static void
11547 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11548                                const struct rte_flow_item_integrity *value,
11549                                void *headers_m, void *headers_v)
11550 {
11551         if (mask->l4_ok) {
11552                 /* application l4_ok filter aggregates all hardware l4 filters
11553                  * therefore hw l4_checksum_ok must be implicitly added here.
11554                  */
11555                 struct rte_flow_item_integrity local_item;
11556
11557                 local_item.l4_csum_ok = 1;
11558                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11559                          local_item.l4_csum_ok);
11560                 if (value->l4_ok) {
11561                         /* application l4_ok = 1 matches sets both hw flags
11562                          * l4_ok and l4_checksum_ok flags to 1.
11563                          */
11564                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11565                                  l4_checksum_ok, local_item.l4_csum_ok);
11566                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11567                                  mask->l4_ok);
11568                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11569                                  value->l4_ok);
11570                 } else {
11571                         /* application l4_ok = 0 matches on hw flag
11572                          * l4_checksum_ok = 0 only.
11573                          */
11574                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11575                                  l4_checksum_ok, 0);
11576                 }
11577         } else if (mask->l4_csum_ok) {
11578                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11579                          mask->l4_csum_ok);
11580                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11581                          value->l4_csum_ok);
11582         }
11583 }
11584
11585 static void
11586 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11587                                const struct rte_flow_item_integrity *value,
11588                                void *headers_m, void *headers_v,
11589                                bool is_ipv4)
11590 {
11591         if (mask->l3_ok) {
11592                 /* application l3_ok filter aggregates all hardware l3 filters
11593                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11594                  */
11595                 struct rte_flow_item_integrity local_item;
11596
11597                 local_item.ipv4_csum_ok = !!is_ipv4;
11598                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11599                          local_item.ipv4_csum_ok);
11600                 if (value->l3_ok) {
11601                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11602                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11603                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11604                                  mask->l3_ok);
11605                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11606                                  value->l3_ok);
11607                 } else {
11608                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11609                                  ipv4_checksum_ok, 0);
11610                 }
11611         } else if (mask->ipv4_csum_ok) {
11612                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11613                          mask->ipv4_csum_ok);
11614                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11615                          value->ipv4_csum_ok);
11616         }
11617 }
11618
11619 static void
11620 flow_dv_translate_item_integrity(void *matcher, void *key,
11621                                  const struct rte_flow_item *head_item,
11622                                  const struct rte_flow_item *integrity_item)
11623 {
11624         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11625         const struct rte_flow_item_integrity *value = integrity_item->spec;
11626         const struct rte_flow_item *tunnel_item, *end_item, *item;
11627         void *headers_m;
11628         void *headers_v;
11629         uint32_t l3_protocol;
11630
11631         if (!value)
11632                 return;
11633         if (!mask)
11634                 mask = &rte_flow_item_integrity_mask;
11635         if (value->level > 1) {
11636                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11637                                          inner_headers);
11638                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11639         } else {
11640                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11641                                          outer_headers);
11642                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11643         }
11644         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11645         if (value->level > 1) {
11646                 /* tunnel item was verified during the item validation */
11647                 item = tunnel_item;
11648                 end_item = mlx5_find_end_item(tunnel_item);
11649         } else {
11650                 item = head_item;
11651                 end_item = tunnel_item ? tunnel_item :
11652                            mlx5_find_end_item(integrity_item);
11653         }
11654         l3_protocol = mask->l3_ok ?
11655                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11656         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11657                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11658         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11659 }
11660
11661 /**
11662  * Prepares DV flow counter with aging configuration.
11663  * Gets it by index when exists, creates a new one when doesn't.
11664  *
11665  * @param[in] dev
11666  *   Pointer to rte_eth_dev structure.
11667  * @param[in] dev_flow
11668  *   Pointer to the mlx5_flow.
11669  * @param[in, out] flow
11670  *   Pointer to the sub flow.
11671  * @param[in] count
11672  *   Pointer to the counter action configuration.
11673  * @param[in] age
11674  *   Pointer to the aging action configuration.
11675  * @param[out] error
11676  *   Pointer to the error structure.
11677  *
11678  * @return
11679  *   Pointer to the counter, NULL otherwise.
11680  */
11681 static struct mlx5_flow_counter *
11682 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11683                         struct mlx5_flow *dev_flow,
11684                         struct rte_flow *flow,
11685                         const struct rte_flow_action_count *count,
11686                         const struct rte_flow_action_age *age,
11687                         struct rte_flow_error *error)
11688 {
11689         if (!flow->counter) {
11690                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11691                                                                  count, age);
11692                 if (!flow->counter) {
11693                         rte_flow_error_set(error, rte_errno,
11694                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11695                                            "cannot create counter object.");
11696                         return NULL;
11697                 }
11698         }
11699         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11700 }
11701
11702 /*
11703  * Release an ASO CT action by its own device.
11704  *
11705  * @param[in] dev
11706  *   Pointer to the Ethernet device structure.
11707  * @param[in] idx
11708  *   Index of ASO CT action to release.
11709  *
11710  * @return
11711  *   0 when CT action was removed, otherwise the number of references.
11712  */
11713 static inline int
11714 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
11715 {
11716         struct mlx5_priv *priv = dev->data->dev_private;
11717         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11718         uint32_t ret;
11719         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11720         enum mlx5_aso_ct_state state =
11721                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
11722
11723         /* Cannot release when CT is in the ASO SQ. */
11724         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
11725                 return -1;
11726         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
11727         if (!ret) {
11728                 if (ct->dr_action_orig) {
11729 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11730                         claim_zero(mlx5_glue->destroy_flow_action
11731                                         (ct->dr_action_orig));
11732 #endif
11733                         ct->dr_action_orig = NULL;
11734                 }
11735                 if (ct->dr_action_rply) {
11736 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11737                         claim_zero(mlx5_glue->destroy_flow_action
11738                                         (ct->dr_action_rply));
11739 #endif
11740                         ct->dr_action_rply = NULL;
11741                 }
11742                 /* Clear the state to free, no need in 1st allocation. */
11743                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
11744                 rte_spinlock_lock(&mng->ct_sl);
11745                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
11746                 rte_spinlock_unlock(&mng->ct_sl);
11747         }
11748         return (int)ret;
11749 }
11750
11751 static inline int
11752 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
11753 {
11754         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
11755         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
11756         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
11757         RTE_SET_USED(dev);
11758
11759         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
11760         if (dev->data->dev_started != 1)
11761                 return -1;
11762         return flow_dv_aso_ct_dev_release(owndev, idx);
11763 }
11764
11765 /*
11766  * Resize the ASO CT pools array by 64 pools.
11767  *
11768  * @param[in] dev
11769  *   Pointer to the Ethernet device structure.
11770  *
11771  * @return
11772  *   0 on success, otherwise negative errno value and rte_errno is set.
11773  */
11774 static int
11775 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
11776 {
11777         struct mlx5_priv *priv = dev->data->dev_private;
11778         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11779         void *old_pools = mng->pools;
11780         /* Magic number now, need a macro. */
11781         uint32_t resize = mng->n + 64;
11782         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
11783         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11784
11785         if (!pools) {
11786                 rte_errno = ENOMEM;
11787                 return -rte_errno;
11788         }
11789         rte_rwlock_write_lock(&mng->resize_rwl);
11790         /* ASO SQ/QP was already initialized in the startup. */
11791         if (old_pools) {
11792                 /* Realloc could be an alternative choice. */
11793                 rte_memcpy(pools, old_pools,
11794                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
11795                 mlx5_free(old_pools);
11796         }
11797         mng->n = resize;
11798         mng->pools = pools;
11799         rte_rwlock_write_unlock(&mng->resize_rwl);
11800         return 0;
11801 }
11802
11803 /*
11804  * Create and initialize a new ASO CT pool.
11805  *
11806  * @param[in] dev
11807  *   Pointer to the Ethernet device structure.
11808  * @param[out] ct_free
11809  *   Where to put the pointer of a new CT action.
11810  *
11811  * @return
11812  *   The CT actions pool pointer and @p ct_free is set on success,
11813  *   NULL otherwise and rte_errno is set.
11814  */
11815 static struct mlx5_aso_ct_pool *
11816 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
11817                        struct mlx5_aso_ct_action **ct_free)
11818 {
11819         struct mlx5_priv *priv = dev->data->dev_private;
11820         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11821         struct mlx5_aso_ct_pool *pool = NULL;
11822         struct mlx5_devx_obj *obj = NULL;
11823         uint32_t i;
11824         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
11825
11826         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
11827                                                 priv->sh->pdn, log_obj_size);
11828         if (!obj) {
11829                 rte_errno = ENODATA;
11830                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11831                 return NULL;
11832         }
11833         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11834         if (!pool) {
11835                 rte_errno = ENOMEM;
11836                 claim_zero(mlx5_devx_cmd_destroy(obj));
11837                 return NULL;
11838         }
11839         pool->devx_obj = obj;
11840         pool->index = mng->next;
11841         /* Resize pools array if there is no room for the new pool in it. */
11842         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
11843                 claim_zero(mlx5_devx_cmd_destroy(obj));
11844                 mlx5_free(pool);
11845                 return NULL;
11846         }
11847         mng->pools[pool->index] = pool;
11848         mng->next++;
11849         /* Assign the first action in the new pool, the rest go to free list. */
11850         *ct_free = &pool->actions[0];
11851         /* Lock outside, the list operation is safe here. */
11852         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
11853                 /* refcnt is 0 when allocating the memory. */
11854                 pool->actions[i].offset = i;
11855                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
11856         }
11857         return pool;
11858 }
11859
11860 /*
11861  * Allocate a ASO CT action from free list.
11862  *
11863  * @param[in] dev
11864  *   Pointer to the Ethernet device structure.
11865  * @param[out] error
11866  *   Pointer to the error structure.
11867  *
11868  * @return
11869  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
11870  */
11871 static uint32_t
11872 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11873 {
11874         struct mlx5_priv *priv = dev->data->dev_private;
11875         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11876         struct mlx5_aso_ct_action *ct = NULL;
11877         struct mlx5_aso_ct_pool *pool;
11878         uint8_t reg_c;
11879         uint32_t ct_idx;
11880
11881         MLX5_ASSERT(mng);
11882         if (!priv->config.devx) {
11883                 rte_errno = ENOTSUP;
11884                 return 0;
11885         }
11886         /* Get a free CT action, if no, a new pool will be created. */
11887         rte_spinlock_lock(&mng->ct_sl);
11888         ct = LIST_FIRST(&mng->free_cts);
11889         if (ct) {
11890                 LIST_REMOVE(ct, next);
11891         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
11892                 rte_spinlock_unlock(&mng->ct_sl);
11893                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11894                                    NULL, "failed to create ASO CT pool");
11895                 return 0;
11896         }
11897         rte_spinlock_unlock(&mng->ct_sl);
11898         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
11899         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
11900         /* 0: inactive, 1: created, 2+: used by flows. */
11901         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
11902         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
11903         if (!ct->dr_action_orig) {
11904 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11905                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
11906                         (priv->sh->rx_domain, pool->devx_obj->obj,
11907                          ct->offset,
11908                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
11909                          reg_c - REG_C_0);
11910 #else
11911                 RTE_SET_USED(reg_c);
11912 #endif
11913                 if (!ct->dr_action_orig) {
11914                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11915                         rte_flow_error_set(error, rte_errno,
11916                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11917                                            "failed to create ASO CT action");
11918                         return 0;
11919                 }
11920         }
11921         if (!ct->dr_action_rply) {
11922 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11923                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
11924                         (priv->sh->rx_domain, pool->devx_obj->obj,
11925                          ct->offset,
11926                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
11927                          reg_c - REG_C_0);
11928 #endif
11929                 if (!ct->dr_action_rply) {
11930                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11931                         rte_flow_error_set(error, rte_errno,
11932                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11933                                            "failed to create ASO CT action");
11934                         return 0;
11935                 }
11936         }
11937         return ct_idx;
11938 }
11939
11940 /*
11941  * Create a conntrack object with context and actions by using ASO mechanism.
11942  *
11943  * @param[in] dev
11944  *   Pointer to rte_eth_dev structure.
11945  * @param[in] pro
11946  *   Pointer to conntrack information profile.
11947  * @param[out] error
11948  *   Pointer to the error structure.
11949  *
11950  * @return
11951  *   Index to conntrack object on success, 0 otherwise.
11952  */
11953 static uint32_t
11954 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
11955                                    const struct rte_flow_action_conntrack *pro,
11956                                    struct rte_flow_error *error)
11957 {
11958         struct mlx5_priv *priv = dev->data->dev_private;
11959         struct mlx5_dev_ctx_shared *sh = priv->sh;
11960         struct mlx5_aso_ct_action *ct;
11961         uint32_t idx;
11962
11963         if (!sh->ct_aso_en)
11964                 return rte_flow_error_set(error, ENOTSUP,
11965                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11966                                           "Connection is not supported");
11967         idx = flow_dv_aso_ct_alloc(dev, error);
11968         if (!idx)
11969                 return rte_flow_error_set(error, rte_errno,
11970                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11971                                           "Failed to allocate CT object");
11972         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11973         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
11974                 return rte_flow_error_set(error, EBUSY,
11975                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11976                                           "Failed to update CT");
11977         ct->is_original = !!pro->is_original_dir;
11978         ct->peer = pro->peer_port;
11979         return idx;
11980 }
11981
11982 /**
11983  * Fill the flow with DV spec, lock free
11984  * (mutex should be acquired by caller).
11985  *
11986  * @param[in] dev
11987  *   Pointer to rte_eth_dev structure.
11988  * @param[in, out] dev_flow
11989  *   Pointer to the sub flow.
11990  * @param[in] attr
11991  *   Pointer to the flow attributes.
11992  * @param[in] items
11993  *   Pointer to the list of items.
11994  * @param[in] actions
11995  *   Pointer to the list of actions.
11996  * @param[out] error
11997  *   Pointer to the error structure.
11998  *
11999  * @return
12000  *   0 on success, a negative errno value otherwise and rte_errno is set.
12001  */
12002 static int
12003 flow_dv_translate(struct rte_eth_dev *dev,
12004                   struct mlx5_flow *dev_flow,
12005                   const struct rte_flow_attr *attr,
12006                   const struct rte_flow_item items[],
12007                   const struct rte_flow_action actions[],
12008                   struct rte_flow_error *error)
12009 {
12010         struct mlx5_priv *priv = dev->data->dev_private;
12011         struct mlx5_dev_config *dev_conf = &priv->config;
12012         struct rte_flow *flow = dev_flow->flow;
12013         struct mlx5_flow_handle *handle = dev_flow->handle;
12014         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12015         struct mlx5_flow_rss_desc *rss_desc;
12016         uint64_t item_flags = 0;
12017         uint64_t last_item = 0;
12018         uint64_t action_flags = 0;
12019         struct mlx5_flow_dv_matcher matcher = {
12020                 .mask = {
12021                         .size = sizeof(matcher.mask.buf) -
12022                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
12023                 },
12024         };
12025         int actions_n = 0;
12026         bool actions_end = false;
12027         union {
12028                 struct mlx5_flow_dv_modify_hdr_resource res;
12029                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12030                             sizeof(struct mlx5_modification_cmd) *
12031                             (MLX5_MAX_MODIFY_NUM + 1)];
12032         } mhdr_dummy;
12033         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12034         const struct rte_flow_action_count *count = NULL;
12035         const struct rte_flow_action_age *non_shared_age = NULL;
12036         union flow_dv_attr flow_attr = { .attr = 0 };
12037         uint32_t tag_be;
12038         union mlx5_flow_tbl_key tbl_key;
12039         uint32_t modify_action_position = UINT32_MAX;
12040         void *match_mask = matcher.mask.buf;
12041         void *match_value = dev_flow->dv.value.buf;
12042         uint8_t next_protocol = 0xff;
12043         struct rte_vlan_hdr vlan = { 0 };
12044         struct mlx5_flow_dv_dest_array_resource mdest_res;
12045         struct mlx5_flow_dv_sample_resource sample_res;
12046         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12047         const struct rte_flow_action_sample *sample = NULL;
12048         struct mlx5_flow_sub_actions_list *sample_act;
12049         uint32_t sample_act_pos = UINT32_MAX;
12050         uint32_t age_act_pos = UINT32_MAX;
12051         uint32_t num_of_dest = 0;
12052         int tmp_actions_n = 0;
12053         uint32_t table;
12054         int ret = 0;
12055         const struct mlx5_flow_tunnel *tunnel = NULL;
12056         struct flow_grp_info grp_info = {
12057                 .external = !!dev_flow->external,
12058                 .transfer = !!attr->transfer,
12059                 .fdb_def_rule = !!priv->fdb_def_rule,
12060                 .skip_scale = dev_flow->skip_scale &
12061                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12062                 .std_tbl_fix = true,
12063         };
12064         const struct rte_flow_item *head_item = items;
12065
12066         if (!wks)
12067                 return rte_flow_error_set(error, ENOMEM,
12068                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12069                                           NULL,
12070                                           "failed to push flow workspace");
12071         rss_desc = &wks->rss_desc;
12072         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12073         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12074         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12075                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12076         /* update normal path action resource into last index of array */
12077         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12078         if (is_tunnel_offload_active(dev)) {
12079                 if (dev_flow->tunnel) {
12080                         RTE_VERIFY(dev_flow->tof_type ==
12081                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12082                         tunnel = dev_flow->tunnel;
12083                 } else {
12084                         tunnel = mlx5_get_tof(items, actions,
12085                                               &dev_flow->tof_type);
12086                         dev_flow->tunnel = tunnel;
12087                 }
12088                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12089                                         (dev, attr, tunnel, dev_flow->tof_type);
12090         }
12091         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12092                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12093         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12094                                        &grp_info, error);
12095         if (ret)
12096                 return ret;
12097         dev_flow->dv.group = table;
12098         if (attr->transfer)
12099                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12100         /* number of actions must be set to 0 in case of dirty stack. */
12101         mhdr_res->actions_num = 0;
12102         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12103                 /*
12104                  * do not add decap action if match rule drops packet
12105                  * HW rejects rules with decap & drop
12106                  *
12107                  * if tunnel match rule was inserted before matching tunnel set
12108                  * rule flow table used in the match rule must be registered.
12109                  * current implementation handles that in the
12110                  * flow_dv_match_register() at the function end.
12111                  */
12112                 bool add_decap = true;
12113                 const struct rte_flow_action *ptr = actions;
12114
12115                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12116                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12117                                 add_decap = false;
12118                                 break;
12119                         }
12120                 }
12121                 if (add_decap) {
12122                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12123                                                            attr->transfer,
12124                                                            error))
12125                                 return -rte_errno;
12126                         dev_flow->dv.actions[actions_n++] =
12127                                         dev_flow->dv.encap_decap->action;
12128                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12129                 }
12130         }
12131         for (; !actions_end ; actions++) {
12132                 const struct rte_flow_action_queue *queue;
12133                 const struct rte_flow_action_rss *rss;
12134                 const struct rte_flow_action *action = actions;
12135                 const uint8_t *rss_key;
12136                 struct mlx5_flow_tbl_resource *tbl;
12137                 struct mlx5_aso_age_action *age_act;
12138                 struct mlx5_flow_counter *cnt_act;
12139                 uint32_t port_id = 0;
12140                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12141                 int action_type = actions->type;
12142                 const struct rte_flow_action *found_action = NULL;
12143                 uint32_t jump_group = 0;
12144                 uint32_t owner_idx;
12145                 struct mlx5_aso_ct_action *ct;
12146
12147                 if (!mlx5_flow_os_action_supported(action_type))
12148                         return rte_flow_error_set(error, ENOTSUP,
12149                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12150                                                   actions,
12151                                                   "action not supported");
12152                 switch (action_type) {
12153                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12154                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12155                         break;
12156                 case RTE_FLOW_ACTION_TYPE_VOID:
12157                         break;
12158                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12159                         if (flow_dv_translate_action_port_id(dev, action,
12160                                                              &port_id, error))
12161                                 return -rte_errno;
12162                         port_id_resource.port_id = port_id;
12163                         MLX5_ASSERT(!handle->rix_port_id_action);
12164                         if (flow_dv_port_id_action_resource_register
12165                             (dev, &port_id_resource, dev_flow, error))
12166                                 return -rte_errno;
12167                         dev_flow->dv.actions[actions_n++] =
12168                                         dev_flow->dv.port_id_action->action;
12169                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12170                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12171                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12172                         num_of_dest++;
12173                         break;
12174                 case RTE_FLOW_ACTION_TYPE_FLAG:
12175                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12176                         dev_flow->handle->mark = 1;
12177                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12178                                 struct rte_flow_action_mark mark = {
12179                                         .id = MLX5_FLOW_MARK_DEFAULT,
12180                                 };
12181
12182                                 if (flow_dv_convert_action_mark(dev, &mark,
12183                                                                 mhdr_res,
12184                                                                 error))
12185                                         return -rte_errno;
12186                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12187                                 break;
12188                         }
12189                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12190                         /*
12191                          * Only one FLAG or MARK is supported per device flow
12192                          * right now. So the pointer to the tag resource must be
12193                          * zero before the register process.
12194                          */
12195                         MLX5_ASSERT(!handle->dvh.rix_tag);
12196                         if (flow_dv_tag_resource_register(dev, tag_be,
12197                                                           dev_flow, error))
12198                                 return -rte_errno;
12199                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12200                         dev_flow->dv.actions[actions_n++] =
12201                                         dev_flow->dv.tag_resource->action;
12202                         break;
12203                 case RTE_FLOW_ACTION_TYPE_MARK:
12204                         action_flags |= MLX5_FLOW_ACTION_MARK;
12205                         dev_flow->handle->mark = 1;
12206                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12207                                 const struct rte_flow_action_mark *mark =
12208                                         (const struct rte_flow_action_mark *)
12209                                                 actions->conf;
12210
12211                                 if (flow_dv_convert_action_mark(dev, mark,
12212                                                                 mhdr_res,
12213                                                                 error))
12214                                         return -rte_errno;
12215                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12216                                 break;
12217                         }
12218                         /* Fall-through */
12219                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12220                         /* Legacy (non-extensive) MARK action. */
12221                         tag_be = mlx5_flow_mark_set
12222                               (((const struct rte_flow_action_mark *)
12223                                (actions->conf))->id);
12224                         MLX5_ASSERT(!handle->dvh.rix_tag);
12225                         if (flow_dv_tag_resource_register(dev, tag_be,
12226                                                           dev_flow, error))
12227                                 return -rte_errno;
12228                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12229                         dev_flow->dv.actions[actions_n++] =
12230                                         dev_flow->dv.tag_resource->action;
12231                         break;
12232                 case RTE_FLOW_ACTION_TYPE_SET_META:
12233                         if (flow_dv_convert_action_set_meta
12234                                 (dev, mhdr_res, attr,
12235                                  (const struct rte_flow_action_set_meta *)
12236                                   actions->conf, error))
12237                                 return -rte_errno;
12238                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12239                         break;
12240                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12241                         if (flow_dv_convert_action_set_tag
12242                                 (dev, mhdr_res,
12243                                  (const struct rte_flow_action_set_tag *)
12244                                   actions->conf, error))
12245                                 return -rte_errno;
12246                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12247                         break;
12248                 case RTE_FLOW_ACTION_TYPE_DROP:
12249                         action_flags |= MLX5_FLOW_ACTION_DROP;
12250                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12251                         break;
12252                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12253                         queue = actions->conf;
12254                         rss_desc->queue_num = 1;
12255                         rss_desc->queue[0] = queue->index;
12256                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12257                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12258                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12259                         num_of_dest++;
12260                         break;
12261                 case RTE_FLOW_ACTION_TYPE_RSS:
12262                         rss = actions->conf;
12263                         memcpy(rss_desc->queue, rss->queue,
12264                                rss->queue_num * sizeof(uint16_t));
12265                         rss_desc->queue_num = rss->queue_num;
12266                         /* NULL RSS key indicates default RSS key. */
12267                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12268                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12269                         /*
12270                          * rss->level and rss.types should be set in advance
12271                          * when expanding items for RSS.
12272                          */
12273                         action_flags |= MLX5_FLOW_ACTION_RSS;
12274                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12275                                 MLX5_FLOW_FATE_SHARED_RSS :
12276                                 MLX5_FLOW_FATE_QUEUE;
12277                         break;
12278                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12279                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12280                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12281                         __atomic_fetch_add(&age_act->refcnt, 1,
12282                                            __ATOMIC_RELAXED);
12283                         age_act_pos = actions_n++;
12284                         action_flags |= MLX5_FLOW_ACTION_AGE;
12285                         break;
12286                 case RTE_FLOW_ACTION_TYPE_AGE:
12287                         non_shared_age = action->conf;
12288                         age_act_pos = actions_n++;
12289                         action_flags |= MLX5_FLOW_ACTION_AGE;
12290                         break;
12291                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12292                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12293                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12294                                                              NULL);
12295                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12296                                            __ATOMIC_RELAXED);
12297                         /* Save information first, will apply later. */
12298                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12299                         break;
12300                 case RTE_FLOW_ACTION_TYPE_COUNT:
12301                         if (!dev_conf->devx) {
12302                                 return rte_flow_error_set
12303                                               (error, ENOTSUP,
12304                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12305                                                NULL,
12306                                                "count action not supported");
12307                         }
12308                         /* Save information first, will apply later. */
12309                         count = action->conf;
12310                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12311                         break;
12312                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12313                         dev_flow->dv.actions[actions_n++] =
12314                                                 priv->sh->pop_vlan_action;
12315                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12316                         break;
12317                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12318                         if (!(action_flags &
12319                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12320                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12321                         vlan.eth_proto = rte_be_to_cpu_16
12322                              ((((const struct rte_flow_action_of_push_vlan *)
12323                                                    actions->conf)->ethertype));
12324                         found_action = mlx5_flow_find_action
12325                                         (actions + 1,
12326                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12327                         if (found_action)
12328                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12329                         found_action = mlx5_flow_find_action
12330                                         (actions + 1,
12331                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12332                         if (found_action)
12333                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12334                         if (flow_dv_create_action_push_vlan
12335                                             (dev, attr, &vlan, dev_flow, error))
12336                                 return -rte_errno;
12337                         dev_flow->dv.actions[actions_n++] =
12338                                         dev_flow->dv.push_vlan_res->action;
12339                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12340                         break;
12341                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12342                         /* of_vlan_push action handled this action */
12343                         MLX5_ASSERT(action_flags &
12344                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12345                         break;
12346                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12347                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12348                                 break;
12349                         flow_dev_get_vlan_info_from_items(items, &vlan);
12350                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12351                         /* If no VLAN push - this is a modify header action */
12352                         if (flow_dv_convert_action_modify_vlan_vid
12353                                                 (mhdr_res, actions, error))
12354                                 return -rte_errno;
12355                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12356                         break;
12357                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12358                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12359                         if (flow_dv_create_action_l2_encap(dev, actions,
12360                                                            dev_flow,
12361                                                            attr->transfer,
12362                                                            error))
12363                                 return -rte_errno;
12364                         dev_flow->dv.actions[actions_n++] =
12365                                         dev_flow->dv.encap_decap->action;
12366                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12367                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12368                                 sample_act->action_flags |=
12369                                                         MLX5_FLOW_ACTION_ENCAP;
12370                         break;
12371                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12372                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12373                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12374                                                            attr->transfer,
12375                                                            error))
12376                                 return -rte_errno;
12377                         dev_flow->dv.actions[actions_n++] =
12378                                         dev_flow->dv.encap_decap->action;
12379                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12380                         break;
12381                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12382                         /* Handle encap with preceding decap. */
12383                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12384                                 if (flow_dv_create_action_raw_encap
12385                                         (dev, actions, dev_flow, attr, error))
12386                                         return -rte_errno;
12387                                 dev_flow->dv.actions[actions_n++] =
12388                                         dev_flow->dv.encap_decap->action;
12389                         } else {
12390                                 /* Handle encap without preceding decap. */
12391                                 if (flow_dv_create_action_l2_encap
12392                                     (dev, actions, dev_flow, attr->transfer,
12393                                      error))
12394                                         return -rte_errno;
12395                                 dev_flow->dv.actions[actions_n++] =
12396                                         dev_flow->dv.encap_decap->action;
12397                         }
12398                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12399                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12400                                 sample_act->action_flags |=
12401                                                         MLX5_FLOW_ACTION_ENCAP;
12402                         break;
12403                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12404                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12405                                 ;
12406                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12407                                 if (flow_dv_create_action_l2_decap
12408                                     (dev, dev_flow, attr->transfer, error))
12409                                         return -rte_errno;
12410                                 dev_flow->dv.actions[actions_n++] =
12411                                         dev_flow->dv.encap_decap->action;
12412                         }
12413                         /* If decap is followed by encap, handle it at encap. */
12414                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12415                         break;
12416                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12417                         dev_flow->dv.actions[actions_n++] =
12418                                 (void *)(uintptr_t)action->conf;
12419                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12420                         break;
12421                 case RTE_FLOW_ACTION_TYPE_JUMP:
12422                         jump_group = ((const struct rte_flow_action_jump *)
12423                                                         action->conf)->group;
12424                         grp_info.std_tbl_fix = 0;
12425                         if (dev_flow->skip_scale &
12426                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12427                                 grp_info.skip_scale = 1;
12428                         else
12429                                 grp_info.skip_scale = 0;
12430                         ret = mlx5_flow_group_to_table(dev, tunnel,
12431                                                        jump_group,
12432                                                        &table,
12433                                                        &grp_info, error);
12434                         if (ret)
12435                                 return ret;
12436                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12437                                                        attr->transfer,
12438                                                        !!dev_flow->external,
12439                                                        tunnel, jump_group, 0,
12440                                                        0, error);
12441                         if (!tbl)
12442                                 return rte_flow_error_set
12443                                                 (error, errno,
12444                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12445                                                  NULL,
12446                                                  "cannot create jump action.");
12447                         if (flow_dv_jump_tbl_resource_register
12448                             (dev, tbl, dev_flow, error)) {
12449                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12450                                 return rte_flow_error_set
12451                                                 (error, errno,
12452                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12453                                                  NULL,
12454                                                  "cannot create jump action.");
12455                         }
12456                         dev_flow->dv.actions[actions_n++] =
12457                                         dev_flow->dv.jump->action;
12458                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12459                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12460                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12461                         num_of_dest++;
12462                         break;
12463                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12464                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12465                         if (flow_dv_convert_action_modify_mac
12466                                         (mhdr_res, actions, error))
12467                                 return -rte_errno;
12468                         action_flags |= actions->type ==
12469                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12470                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12471                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12472                         break;
12473                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12474                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12475                         if (flow_dv_convert_action_modify_ipv4
12476                                         (mhdr_res, actions, error))
12477                                 return -rte_errno;
12478                         action_flags |= actions->type ==
12479                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12480                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12481                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12482                         break;
12483                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12484                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12485                         if (flow_dv_convert_action_modify_ipv6
12486                                         (mhdr_res, actions, error))
12487                                 return -rte_errno;
12488                         action_flags |= actions->type ==
12489                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12490                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12491                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12492                         break;
12493                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12494                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12495                         if (flow_dv_convert_action_modify_tp
12496                                         (mhdr_res, actions, items,
12497                                          &flow_attr, dev_flow, !!(action_flags &
12498                                          MLX5_FLOW_ACTION_DECAP), error))
12499                                 return -rte_errno;
12500                         action_flags |= actions->type ==
12501                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12502                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12503                                         MLX5_FLOW_ACTION_SET_TP_DST;
12504                         break;
12505                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12506                         if (flow_dv_convert_action_modify_dec_ttl
12507                                         (mhdr_res, items, &flow_attr, dev_flow,
12508                                          !!(action_flags &
12509                                          MLX5_FLOW_ACTION_DECAP), error))
12510                                 return -rte_errno;
12511                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12512                         break;
12513                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12514                         if (flow_dv_convert_action_modify_ttl
12515                                         (mhdr_res, actions, items, &flow_attr,
12516                                          dev_flow, !!(action_flags &
12517                                          MLX5_FLOW_ACTION_DECAP), error))
12518                                 return -rte_errno;
12519                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12520                         break;
12521                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12522                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12523                         if (flow_dv_convert_action_modify_tcp_seq
12524                                         (mhdr_res, actions, error))
12525                                 return -rte_errno;
12526                         action_flags |= actions->type ==
12527                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12528                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12529                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12530                         break;
12531
12532                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12533                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12534                         if (flow_dv_convert_action_modify_tcp_ack
12535                                         (mhdr_res, actions, error))
12536                                 return -rte_errno;
12537                         action_flags |= actions->type ==
12538                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12539                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12540                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12541                         break;
12542                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12543                         if (flow_dv_convert_action_set_reg
12544                                         (mhdr_res, actions, error))
12545                                 return -rte_errno;
12546                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12547                         break;
12548                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12549                         if (flow_dv_convert_action_copy_mreg
12550                                         (dev, mhdr_res, actions, error))
12551                                 return -rte_errno;
12552                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12553                         break;
12554                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12555                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12556                         dev_flow->handle->fate_action =
12557                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12558                         break;
12559                 case RTE_FLOW_ACTION_TYPE_METER:
12560                         if (!wks->fm)
12561                                 return rte_flow_error_set(error, rte_errno,
12562                                         RTE_FLOW_ERROR_TYPE_ACTION,
12563                                         NULL, "Failed to get meter in flow.");
12564                         /* Set the meter action. */
12565                         dev_flow->dv.actions[actions_n++] =
12566                                 wks->fm->meter_action;
12567                         action_flags |= MLX5_FLOW_ACTION_METER;
12568                         break;
12569                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12570                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12571                                                               actions, error))
12572                                 return -rte_errno;
12573                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12574                         break;
12575                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12576                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12577                                                               actions, error))
12578                                 return -rte_errno;
12579                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12580                         break;
12581                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12582                         sample_act_pos = actions_n;
12583                         sample = (const struct rte_flow_action_sample *)
12584                                  action->conf;
12585                         actions_n++;
12586                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12587                         /* put encap action into group if work with port id */
12588                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12589                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12590                                 sample_act->action_flags |=
12591                                                         MLX5_FLOW_ACTION_ENCAP;
12592                         break;
12593                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12594                         if (flow_dv_convert_action_modify_field
12595                                         (dev, mhdr_res, actions, attr, error))
12596                                 return -rte_errno;
12597                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12598                         break;
12599                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12600                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12601                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12602                         if (!ct)
12603                                 return rte_flow_error_set(error, EINVAL,
12604                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12605                                                 NULL,
12606                                                 "Failed to get CT object.");
12607                         if (mlx5_aso_ct_available(priv->sh, ct))
12608                                 return rte_flow_error_set(error, rte_errno,
12609                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12610                                                 NULL,
12611                                                 "CT is unavailable.");
12612                         if (ct->is_original)
12613                                 dev_flow->dv.actions[actions_n] =
12614                                                         ct->dr_action_orig;
12615                         else
12616                                 dev_flow->dv.actions[actions_n] =
12617                                                         ct->dr_action_rply;
12618                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12619                         flow->ct = owner_idx;
12620                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12621                         actions_n++;
12622                         action_flags |= MLX5_FLOW_ACTION_CT;
12623                         break;
12624                 case RTE_FLOW_ACTION_TYPE_END:
12625                         actions_end = true;
12626                         if (mhdr_res->actions_num) {
12627                                 /* create modify action if needed. */
12628                                 if (flow_dv_modify_hdr_resource_register
12629                                         (dev, mhdr_res, dev_flow, error))
12630                                         return -rte_errno;
12631                                 dev_flow->dv.actions[modify_action_position] =
12632                                         handle->dvh.modify_hdr->action;
12633                         }
12634                         /*
12635                          * Handle AGE and COUNT action by single HW counter
12636                          * when they are not shared.
12637                          */
12638                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12639                                 if ((non_shared_age &&
12640                                      count && !count->shared) ||
12641                                     !(priv->sh->flow_hit_aso_en &&
12642                                       (attr->group || attr->transfer))) {
12643                                         /* Creates age by counters. */
12644                                         cnt_act = flow_dv_prepare_counter
12645                                                                 (dev, dev_flow,
12646                                                                  flow, count,
12647                                                                  non_shared_age,
12648                                                                  error);
12649                                         if (!cnt_act)
12650                                                 return -rte_errno;
12651                                         dev_flow->dv.actions[age_act_pos] =
12652                                                                 cnt_act->action;
12653                                         break;
12654                                 }
12655                                 if (!flow->age && non_shared_age) {
12656                                         flow->age = flow_dv_aso_age_alloc
12657                                                                 (dev, error);
12658                                         if (!flow->age)
12659                                                 return -rte_errno;
12660                                         flow_dv_aso_age_params_init
12661                                                     (dev, flow->age,
12662                                                      non_shared_age->context ?
12663                                                      non_shared_age->context :
12664                                                      (void *)(uintptr_t)
12665                                                      (dev_flow->flow_idx),
12666                                                      non_shared_age->timeout);
12667                                 }
12668                                 age_act = flow_aso_age_get_by_idx(dev,
12669                                                                   flow->age);
12670                                 dev_flow->dv.actions[age_act_pos] =
12671                                                              age_act->dr_action;
12672                         }
12673                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12674                                 /*
12675                                  * Create one count action, to be used
12676                                  * by all sub-flows.
12677                                  */
12678                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12679                                                                   flow, count,
12680                                                                   NULL, error);
12681                                 if (!cnt_act)
12682                                         return -rte_errno;
12683                                 dev_flow->dv.actions[actions_n++] =
12684                                                                 cnt_act->action;
12685                         }
12686                 default:
12687                         break;
12688                 }
12689                 if (mhdr_res->actions_num &&
12690                     modify_action_position == UINT32_MAX)
12691                         modify_action_position = actions_n++;
12692         }
12693         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12694                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12695                 int item_type = items->type;
12696
12697                 if (!mlx5_flow_os_item_supported(item_type))
12698                         return rte_flow_error_set(error, ENOTSUP,
12699                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12700                                                   NULL, "item not supported");
12701                 switch (item_type) {
12702                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12703                         flow_dv_translate_item_port_id
12704                                 (dev, match_mask, match_value, items, attr);
12705                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12706                         break;
12707                 case RTE_FLOW_ITEM_TYPE_ETH:
12708                         flow_dv_translate_item_eth(match_mask, match_value,
12709                                                    items, tunnel,
12710                                                    dev_flow->dv.group);
12711                         matcher.priority = action_flags &
12712                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
12713                                         !dev_flow->external ?
12714                                         MLX5_PRIORITY_MAP_L3 :
12715                                         MLX5_PRIORITY_MAP_L2;
12716                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
12717                                              MLX5_FLOW_LAYER_OUTER_L2;
12718                         break;
12719                 case RTE_FLOW_ITEM_TYPE_VLAN:
12720                         flow_dv_translate_item_vlan(dev_flow,
12721                                                     match_mask, match_value,
12722                                                     items, tunnel,
12723                                                     dev_flow->dv.group);
12724                         matcher.priority = MLX5_PRIORITY_MAP_L2;
12725                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
12726                                               MLX5_FLOW_LAYER_INNER_VLAN) :
12727                                              (MLX5_FLOW_LAYER_OUTER_L2 |
12728                                               MLX5_FLOW_LAYER_OUTER_VLAN);
12729                         break;
12730                 case RTE_FLOW_ITEM_TYPE_IPV4:
12731                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12732                                                   &item_flags, &tunnel);
12733                         flow_dv_translate_item_ipv4(match_mask, match_value,
12734                                                     items, tunnel,
12735                                                     dev_flow->dv.group);
12736                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12737                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
12738                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
12739                         if (items->mask != NULL &&
12740                             ((const struct rte_flow_item_ipv4 *)
12741                              items->mask)->hdr.next_proto_id) {
12742                                 next_protocol =
12743                                         ((const struct rte_flow_item_ipv4 *)
12744                                          (items->spec))->hdr.next_proto_id;
12745                                 next_protocol &=
12746                                         ((const struct rte_flow_item_ipv4 *)
12747                                          (items->mask))->hdr.next_proto_id;
12748                         } else {
12749                                 /* Reset for inner layer. */
12750                                 next_protocol = 0xff;
12751                         }
12752                         break;
12753                 case RTE_FLOW_ITEM_TYPE_IPV6:
12754                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12755                                                   &item_flags, &tunnel);
12756                         flow_dv_translate_item_ipv6(match_mask, match_value,
12757                                                     items, tunnel,
12758                                                     dev_flow->dv.group);
12759                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12760                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
12761                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
12762                         if (items->mask != NULL &&
12763                             ((const struct rte_flow_item_ipv6 *)
12764                              items->mask)->hdr.proto) {
12765                                 next_protocol =
12766                                         ((const struct rte_flow_item_ipv6 *)
12767                                          items->spec)->hdr.proto;
12768                                 next_protocol &=
12769                                         ((const struct rte_flow_item_ipv6 *)
12770                                          items->mask)->hdr.proto;
12771                         } else {
12772                                 /* Reset for inner layer. */
12773                                 next_protocol = 0xff;
12774                         }
12775                         break;
12776                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
12777                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
12778                                                              match_value,
12779                                                              items, tunnel);
12780                         last_item = tunnel ?
12781                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
12782                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
12783                         if (items->mask != NULL &&
12784                             ((const struct rte_flow_item_ipv6_frag_ext *)
12785                              items->mask)->hdr.next_header) {
12786                                 next_protocol =
12787                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12788                                  items->spec)->hdr.next_header;
12789                                 next_protocol &=
12790                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12791                                  items->mask)->hdr.next_header;
12792                         } else {
12793                                 /* Reset for inner layer. */
12794                                 next_protocol = 0xff;
12795                         }
12796                         break;
12797                 case RTE_FLOW_ITEM_TYPE_TCP:
12798                         flow_dv_translate_item_tcp(match_mask, match_value,
12799                                                    items, tunnel);
12800                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12801                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
12802                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
12803                         break;
12804                 case RTE_FLOW_ITEM_TYPE_UDP:
12805                         flow_dv_translate_item_udp(match_mask, match_value,
12806                                                    items, tunnel);
12807                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12808                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12809                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12810                         break;
12811                 case RTE_FLOW_ITEM_TYPE_GRE:
12812                         flow_dv_translate_item_gre(match_mask, match_value,
12813                                                    items, tunnel);
12814                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12815                         last_item = MLX5_FLOW_LAYER_GRE;
12816                         break;
12817                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12818                         flow_dv_translate_item_gre_key(match_mask,
12819                                                        match_value, items);
12820                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12821                         break;
12822                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12823                         flow_dv_translate_item_nvgre(match_mask, match_value,
12824                                                      items, tunnel);
12825                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12826                         last_item = MLX5_FLOW_LAYER_GRE;
12827                         break;
12828                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12829                         flow_dv_translate_item_vxlan(match_mask, match_value,
12830                                                      items, tunnel);
12831                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12832                         last_item = MLX5_FLOW_LAYER_VXLAN;
12833                         break;
12834                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12835                         flow_dv_translate_item_vxlan_gpe(match_mask,
12836                                                          match_value, items,
12837                                                          tunnel);
12838                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12839                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12840                         break;
12841                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12842                         flow_dv_translate_item_geneve(match_mask, match_value,
12843                                                       items, tunnel);
12844                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12845                         last_item = MLX5_FLOW_LAYER_GENEVE;
12846                         break;
12847                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12848                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12849                                                           match_value,
12850                                                           items, error);
12851                         if (ret)
12852                                 return rte_flow_error_set(error, -ret,
12853                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12854                                         "cannot create GENEVE TLV option");
12855                         flow->geneve_tlv_option = 1;
12856                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12857                         break;
12858                 case RTE_FLOW_ITEM_TYPE_MPLS:
12859                         flow_dv_translate_item_mpls(match_mask, match_value,
12860                                                     items, last_item, tunnel);
12861                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12862                         last_item = MLX5_FLOW_LAYER_MPLS;
12863                         break;
12864                 case RTE_FLOW_ITEM_TYPE_MARK:
12865                         flow_dv_translate_item_mark(dev, match_mask,
12866                                                     match_value, items);
12867                         last_item = MLX5_FLOW_ITEM_MARK;
12868                         break;
12869                 case RTE_FLOW_ITEM_TYPE_META:
12870                         flow_dv_translate_item_meta(dev, match_mask,
12871                                                     match_value, attr, items);
12872                         last_item = MLX5_FLOW_ITEM_METADATA;
12873                         break;
12874                 case RTE_FLOW_ITEM_TYPE_ICMP:
12875                         flow_dv_translate_item_icmp(match_mask, match_value,
12876                                                     items, tunnel);
12877                         last_item = MLX5_FLOW_LAYER_ICMP;
12878                         break;
12879                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12880                         flow_dv_translate_item_icmp6(match_mask, match_value,
12881                                                       items, tunnel);
12882                         last_item = MLX5_FLOW_LAYER_ICMP6;
12883                         break;
12884                 case RTE_FLOW_ITEM_TYPE_TAG:
12885                         flow_dv_translate_item_tag(dev, match_mask,
12886                                                    match_value, items);
12887                         last_item = MLX5_FLOW_ITEM_TAG;
12888                         break;
12889                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12890                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12891                                                         match_value, items);
12892                         last_item = MLX5_FLOW_ITEM_TAG;
12893                         break;
12894                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12895                         flow_dv_translate_item_tx_queue(dev, match_mask,
12896                                                         match_value,
12897                                                         items);
12898                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12899                         break;
12900                 case RTE_FLOW_ITEM_TYPE_GTP:
12901                         flow_dv_translate_item_gtp(match_mask, match_value,
12902                                                    items, tunnel);
12903                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12904                         last_item = MLX5_FLOW_LAYER_GTP;
12905                         break;
12906                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12907                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12908                                                           match_value,
12909                                                           items);
12910                         if (ret)
12911                                 return rte_flow_error_set(error, -ret,
12912                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12913                                         "cannot create GTP PSC item");
12914                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12915                         break;
12916                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12917                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12918                                 /* Create it only the first time to be used. */
12919                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12920                                 if (ret)
12921                                         return rte_flow_error_set
12922                                                 (error, -ret,
12923                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12924                                                 NULL,
12925                                                 "cannot create eCPRI parser");
12926                         }
12927                         /* Adjust the length matcher and device flow value. */
12928                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12929                         dev_flow->dv.value.size =
12930                                         MLX5_ST_SZ_BYTES(fte_match_param);
12931                         flow_dv_translate_item_ecpri(dev, match_mask,
12932                                                      match_value, items);
12933                         /* No other protocol should follow eCPRI layer. */
12934                         last_item = MLX5_FLOW_LAYER_ECPRI;
12935                         break;
12936                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
12937                         flow_dv_translate_item_integrity(match_mask,
12938                                                          match_value,
12939                                                          head_item, items);
12940                         break;
12941                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
12942                         flow_dv_translate_item_aso_ct(dev, match_mask,
12943                                                       match_value, items);
12944                         break;
12945                 default:
12946                         break;
12947                 }
12948                 item_flags |= last_item;
12949         }
12950         /*
12951          * When E-Switch mode is enabled, we have two cases where we need to
12952          * set the source port manually.
12953          * The first one, is in case of Nic steering rule, and the second is
12954          * E-Switch rule where no port_id item was found. In both cases
12955          * the source port is set according the current port in use.
12956          */
12957         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12958             (priv->representor || priv->master)) {
12959                 if (flow_dv_translate_item_port_id(dev, match_mask,
12960                                                    match_value, NULL, attr))
12961                         return -rte_errno;
12962         }
12963 #ifdef RTE_LIBRTE_MLX5_DEBUG
12964         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12965                                               dev_flow->dv.value.buf));
12966 #endif
12967         /*
12968          * Layers may be already initialized from prefix flow if this dev_flow
12969          * is the suffix flow.
12970          */
12971         handle->layers |= item_flags;
12972         if (action_flags & MLX5_FLOW_ACTION_RSS)
12973                 flow_dv_hashfields_set(dev_flow, rss_desc);
12974         /* If has RSS action in the sample action, the Sample/Mirror resource
12975          * should be registered after the hash filed be update.
12976          */
12977         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
12978                 ret = flow_dv_translate_action_sample(dev,
12979                                                       sample,
12980                                                       dev_flow, attr,
12981                                                       &num_of_dest,
12982                                                       sample_actions,
12983                                                       &sample_res,
12984                                                       error);
12985                 if (ret < 0)
12986                         return ret;
12987                 ret = flow_dv_create_action_sample(dev,
12988                                                    dev_flow,
12989                                                    num_of_dest,
12990                                                    &sample_res,
12991                                                    &mdest_res,
12992                                                    sample_actions,
12993                                                    action_flags,
12994                                                    error);
12995                 if (ret < 0)
12996                         return rte_flow_error_set
12997                                                 (error, rte_errno,
12998                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12999                                                 NULL,
13000                                                 "cannot create sample action");
13001                 if (num_of_dest > 1) {
13002                         dev_flow->dv.actions[sample_act_pos] =
13003                         dev_flow->dv.dest_array_res->action;
13004                 } else {
13005                         dev_flow->dv.actions[sample_act_pos] =
13006                         dev_flow->dv.sample_res->verbs_action;
13007                 }
13008         }
13009         /*
13010          * For multiple destination (sample action with ratio=1), the encap
13011          * action and port id action will be combined into group action.
13012          * So need remove the original these actions in the flow and only
13013          * use the sample action instead of.
13014          */
13015         if (num_of_dest > 1 &&
13016             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13017                 int i;
13018                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13019
13020                 for (i = 0; i < actions_n; i++) {
13021                         if ((sample_act->dr_encap_action &&
13022                                 sample_act->dr_encap_action ==
13023                                 dev_flow->dv.actions[i]) ||
13024                                 (sample_act->dr_port_id_action &&
13025                                 sample_act->dr_port_id_action ==
13026                                 dev_flow->dv.actions[i]) ||
13027                                 (sample_act->dr_jump_action &&
13028                                 sample_act->dr_jump_action ==
13029                                 dev_flow->dv.actions[i]))
13030                                 continue;
13031                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13032                 }
13033                 memcpy((void *)dev_flow->dv.actions,
13034                                 (void *)temp_actions,
13035                                 tmp_actions_n * sizeof(void *));
13036                 actions_n = tmp_actions_n;
13037         }
13038         dev_flow->dv.actions_n = actions_n;
13039         dev_flow->act_flags = action_flags;
13040         if (wks->skip_matcher_reg)
13041                 return 0;
13042         /* Register matcher. */
13043         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13044                                     matcher.mask.size);
13045         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13046                                         matcher.priority);
13047         /* reserved field no needs to be set to 0 here. */
13048         tbl_key.is_fdb = attr->transfer;
13049         tbl_key.is_egress = attr->egress;
13050         tbl_key.level = dev_flow->dv.group;
13051         tbl_key.id = dev_flow->dv.table_id;
13052         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13053                                      tunnel, attr->group, error))
13054                 return -rte_errno;
13055         return 0;
13056 }
13057
13058 /**
13059  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13060  * and tunnel.
13061  *
13062  * @param[in, out] action
13063  *   Shred RSS action holding hash RX queue objects.
13064  * @param[in] hash_fields
13065  *   Defines combination of packet fields to participate in RX hash.
13066  * @param[in] tunnel
13067  *   Tunnel type
13068  * @param[in] hrxq_idx
13069  *   Hash RX queue index to set.
13070  *
13071  * @return
13072  *   0 on success, otherwise negative errno value.
13073  */
13074 static int
13075 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13076                               const uint64_t hash_fields,
13077                               uint32_t hrxq_idx)
13078 {
13079         uint32_t *hrxqs = action->hrxq;
13080
13081         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13082         case MLX5_RSS_HASH_IPV4:
13083                 /* fall-through. */
13084         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13085                 /* fall-through. */
13086         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13087                 hrxqs[0] = hrxq_idx;
13088                 return 0;
13089         case MLX5_RSS_HASH_IPV4_TCP:
13090                 /* fall-through. */
13091         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13092                 /* fall-through. */
13093         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13094                 hrxqs[1] = hrxq_idx;
13095                 return 0;
13096         case MLX5_RSS_HASH_IPV4_UDP:
13097                 /* fall-through. */
13098         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13099                 /* fall-through. */
13100         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13101                 hrxqs[2] = hrxq_idx;
13102                 return 0;
13103         case MLX5_RSS_HASH_IPV6:
13104                 /* fall-through. */
13105         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13106                 /* fall-through. */
13107         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13108                 hrxqs[3] = hrxq_idx;
13109                 return 0;
13110         case MLX5_RSS_HASH_IPV6_TCP:
13111                 /* fall-through. */
13112         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13113                 /* fall-through. */
13114         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13115                 hrxqs[4] = hrxq_idx;
13116                 return 0;
13117         case MLX5_RSS_HASH_IPV6_UDP:
13118                 /* fall-through. */
13119         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13120                 /* fall-through. */
13121         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13122                 hrxqs[5] = hrxq_idx;
13123                 return 0;
13124         case MLX5_RSS_HASH_NONE:
13125                 hrxqs[6] = hrxq_idx;
13126                 return 0;
13127         default:
13128                 return -1;
13129         }
13130 }
13131
13132 /**
13133  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13134  * and tunnel.
13135  *
13136  * @param[in] dev
13137  *   Pointer to the Ethernet device structure.
13138  * @param[in] idx
13139  *   Shared RSS action ID holding hash RX queue objects.
13140  * @param[in] hash_fields
13141  *   Defines combination of packet fields to participate in RX hash.
13142  * @param[in] tunnel
13143  *   Tunnel type
13144  *
13145  * @return
13146  *   Valid hash RX queue index, otherwise 0.
13147  */
13148 static uint32_t
13149 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13150                                  const uint64_t hash_fields)
13151 {
13152         struct mlx5_priv *priv = dev->data->dev_private;
13153         struct mlx5_shared_action_rss *shared_rss =
13154             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13155         const uint32_t *hrxqs = shared_rss->hrxq;
13156
13157         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13158         case MLX5_RSS_HASH_IPV4:
13159                 /* fall-through. */
13160         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13161                 /* fall-through. */
13162         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13163                 return hrxqs[0];
13164         case MLX5_RSS_HASH_IPV4_TCP:
13165                 /* fall-through. */
13166         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13167                 /* fall-through. */
13168         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13169                 return hrxqs[1];
13170         case MLX5_RSS_HASH_IPV4_UDP:
13171                 /* fall-through. */
13172         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13173                 /* fall-through. */
13174         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13175                 return hrxqs[2];
13176         case MLX5_RSS_HASH_IPV6:
13177                 /* fall-through. */
13178         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13179                 /* fall-through. */
13180         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13181                 return hrxqs[3];
13182         case MLX5_RSS_HASH_IPV6_TCP:
13183                 /* fall-through. */
13184         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13185                 /* fall-through. */
13186         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13187                 return hrxqs[4];
13188         case MLX5_RSS_HASH_IPV6_UDP:
13189                 /* fall-through. */
13190         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13191                 /* fall-through. */
13192         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13193                 return hrxqs[5];
13194         case MLX5_RSS_HASH_NONE:
13195                 return hrxqs[6];
13196         default:
13197                 return 0;
13198         }
13199
13200 }
13201
13202 /**
13203  * Apply the flow to the NIC, lock free,
13204  * (mutex should be acquired by caller).
13205  *
13206  * @param[in] dev
13207  *   Pointer to the Ethernet device structure.
13208  * @param[in, out] flow
13209  *   Pointer to flow structure.
13210  * @param[out] error
13211  *   Pointer to error structure.
13212  *
13213  * @return
13214  *   0 on success, a negative errno value otherwise and rte_errno is set.
13215  */
13216 static int
13217 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13218               struct rte_flow_error *error)
13219 {
13220         struct mlx5_flow_dv_workspace *dv;
13221         struct mlx5_flow_handle *dh;
13222         struct mlx5_flow_handle_dv *dv_h;
13223         struct mlx5_flow *dev_flow;
13224         struct mlx5_priv *priv = dev->data->dev_private;
13225         uint32_t handle_idx;
13226         int n;
13227         int err;
13228         int idx;
13229         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13230         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13231
13232         MLX5_ASSERT(wks);
13233         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13234                 dev_flow = &wks->flows[idx];
13235                 dv = &dev_flow->dv;
13236                 dh = dev_flow->handle;
13237                 dv_h = &dh->dvh;
13238                 n = dv->actions_n;
13239                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13240                         if (dv->transfer) {
13241                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13242                                 dv->actions[n++] = priv->sh->dr_drop_action;
13243                         } else {
13244 #ifdef HAVE_MLX5DV_DR
13245                                 /* DR supports drop action placeholder. */
13246                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13247                                 dv->actions[n++] = priv->sh->dr_drop_action;
13248 #else
13249                                 /* For DV we use the explicit drop queue. */
13250                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13251                                 dv->actions[n++] =
13252                                                 priv->drop_queue.hrxq->action;
13253 #endif
13254                         }
13255                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13256                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13257                         struct mlx5_hrxq *hrxq;
13258                         uint32_t hrxq_idx;
13259
13260                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13261                                                     &hrxq_idx);
13262                         if (!hrxq) {
13263                                 rte_flow_error_set
13264                                         (error, rte_errno,
13265                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13266                                          "cannot get hash queue");
13267                                 goto error;
13268                         }
13269                         dh->rix_hrxq = hrxq_idx;
13270                         dv->actions[n++] = hrxq->action;
13271                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13272                         struct mlx5_hrxq *hrxq = NULL;
13273                         uint32_t hrxq_idx;
13274
13275                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13276                                                 rss_desc->shared_rss,
13277                                                 dev_flow->hash_fields);
13278                         if (hrxq_idx)
13279                                 hrxq = mlx5_ipool_get
13280                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13281                                          hrxq_idx);
13282                         if (!hrxq) {
13283                                 rte_flow_error_set
13284                                         (error, rte_errno,
13285                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13286                                          "cannot get hash queue");
13287                                 goto error;
13288                         }
13289                         dh->rix_srss = rss_desc->shared_rss;
13290                         dv->actions[n++] = hrxq->action;
13291                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13292                         if (!priv->sh->default_miss_action) {
13293                                 rte_flow_error_set
13294                                         (error, rte_errno,
13295                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13296                                          "default miss action not be created.");
13297                                 goto error;
13298                         }
13299                         dv->actions[n++] = priv->sh->default_miss_action;
13300                 }
13301                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13302                                                (void *)&dv->value, n,
13303                                                dv->actions, &dh->drv_flow);
13304                 if (err) {
13305                         rte_flow_error_set(error, errno,
13306                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13307                                            NULL,
13308                                            "hardware refuses to create flow");
13309                         goto error;
13310                 }
13311                 if (priv->vmwa_context &&
13312                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13313                         /*
13314                          * The rule contains the VLAN pattern.
13315                          * For VF we are going to create VLAN
13316                          * interface to make hypervisor set correct
13317                          * e-Switch vport context.
13318                          */
13319                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13320                 }
13321         }
13322         return 0;
13323 error:
13324         err = rte_errno; /* Save rte_errno before cleanup. */
13325         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13326                        handle_idx, dh, next) {
13327                 /* hrxq is union, don't clear it if the flag is not set. */
13328                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13329                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13330                         dh->rix_hrxq = 0;
13331                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13332                         dh->rix_srss = 0;
13333                 }
13334                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13335                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13336         }
13337         rte_errno = err; /* Restore rte_errno. */
13338         return -rte_errno;
13339 }
13340
13341 void
13342 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
13343                           struct mlx5_cache_entry *entry)
13344 {
13345         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
13346                                                           entry);
13347
13348         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
13349         mlx5_free(cache);
13350 }
13351
13352 /**
13353  * Release the flow matcher.
13354  *
13355  * @param dev
13356  *   Pointer to Ethernet device.
13357  * @param port_id
13358  *   Index to port ID action resource.
13359  *
13360  * @return
13361  *   1 while a reference on it exists, 0 when freed.
13362  */
13363 static int
13364 flow_dv_matcher_release(struct rte_eth_dev *dev,
13365                         struct mlx5_flow_handle *handle)
13366 {
13367         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13368         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13369                                                             typeof(*tbl), tbl);
13370         int ret;
13371
13372         MLX5_ASSERT(matcher->matcher_object);
13373         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
13374         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13375         return ret;
13376 }
13377
13378 /**
13379  * Release encap_decap resource.
13380  *
13381  * @param list
13382  *   Pointer to the hash list.
13383  * @param entry
13384  *   Pointer to exist resource entry object.
13385  */
13386 void
13387 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13388                               struct mlx5_hlist_entry *entry)
13389 {
13390         struct mlx5_dev_ctx_shared *sh = list->ctx;
13391         struct mlx5_flow_dv_encap_decap_resource *res =
13392                 container_of(entry, typeof(*res), entry);
13393
13394         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13395         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13396 }
13397
13398 /**
13399  * Release an encap/decap resource.
13400  *
13401  * @param dev
13402  *   Pointer to Ethernet device.
13403  * @param encap_decap_idx
13404  *   Index of encap decap resource.
13405  *
13406  * @return
13407  *   1 while a reference on it exists, 0 when freed.
13408  */
13409 static int
13410 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13411                                      uint32_t encap_decap_idx)
13412 {
13413         struct mlx5_priv *priv = dev->data->dev_private;
13414         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
13415
13416         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13417                                         encap_decap_idx);
13418         if (!cache_resource)
13419                 return 0;
13420         MLX5_ASSERT(cache_resource->action);
13421         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
13422                                      &cache_resource->entry);
13423 }
13424
13425 /**
13426  * Release an jump to table action resource.
13427  *
13428  * @param dev
13429  *   Pointer to Ethernet device.
13430  * @param rix_jump
13431  *   Index to the jump action resource.
13432  *
13433  * @return
13434  *   1 while a reference on it exists, 0 when freed.
13435  */
13436 static int
13437 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13438                                   uint32_t rix_jump)
13439 {
13440         struct mlx5_priv *priv = dev->data->dev_private;
13441         struct mlx5_flow_tbl_data_entry *tbl_data;
13442
13443         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13444                                   rix_jump);
13445         if (!tbl_data)
13446                 return 0;
13447         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13448 }
13449
13450 void
13451 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13452                          struct mlx5_hlist_entry *entry)
13453 {
13454         struct mlx5_flow_dv_modify_hdr_resource *res =
13455                 container_of(entry, typeof(*res), entry);
13456
13457         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13458         mlx5_free(entry);
13459 }
13460
13461 /**
13462  * Release a modify-header resource.
13463  *
13464  * @param dev
13465  *   Pointer to Ethernet device.
13466  * @param handle
13467  *   Pointer to mlx5_flow_handle.
13468  *
13469  * @return
13470  *   1 while a reference on it exists, 0 when freed.
13471  */
13472 static int
13473 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13474                                     struct mlx5_flow_handle *handle)
13475 {
13476         struct mlx5_priv *priv = dev->data->dev_private;
13477         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13478
13479         MLX5_ASSERT(entry->action);
13480         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13481 }
13482
13483 void
13484 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
13485                           struct mlx5_cache_entry *entry)
13486 {
13487         struct mlx5_dev_ctx_shared *sh = list->ctx;
13488         struct mlx5_flow_dv_port_id_action_resource *cache =
13489                         container_of(entry, typeof(*cache), entry);
13490
13491         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13492         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
13493 }
13494
13495 /**
13496  * Release port ID action resource.
13497  *
13498  * @param dev
13499  *   Pointer to Ethernet device.
13500  * @param handle
13501  *   Pointer to mlx5_flow_handle.
13502  *
13503  * @return
13504  *   1 while a reference on it exists, 0 when freed.
13505  */
13506 static int
13507 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13508                                         uint32_t port_id)
13509 {
13510         struct mlx5_priv *priv = dev->data->dev_private;
13511         struct mlx5_flow_dv_port_id_action_resource *cache;
13512
13513         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13514         if (!cache)
13515                 return 0;
13516         MLX5_ASSERT(cache->action);
13517         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
13518                                      &cache->entry);
13519 }
13520
13521 /**
13522  * Release shared RSS action resource.
13523  *
13524  * @param dev
13525  *   Pointer to Ethernet device.
13526  * @param srss
13527  *   Shared RSS action index.
13528  */
13529 static void
13530 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13531 {
13532         struct mlx5_priv *priv = dev->data->dev_private;
13533         struct mlx5_shared_action_rss *shared_rss;
13534
13535         shared_rss = mlx5_ipool_get
13536                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13537         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13538 }
13539
13540 void
13541 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
13542                             struct mlx5_cache_entry *entry)
13543 {
13544         struct mlx5_dev_ctx_shared *sh = list->ctx;
13545         struct mlx5_flow_dv_push_vlan_action_resource *cache =
13546                         container_of(entry, typeof(*cache), entry);
13547
13548         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13549         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
13550 }
13551
13552 /**
13553  * Release push vlan action resource.
13554  *
13555  * @param dev
13556  *   Pointer to Ethernet device.
13557  * @param handle
13558  *   Pointer to mlx5_flow_handle.
13559  *
13560  * @return
13561  *   1 while a reference on it exists, 0 when freed.
13562  */
13563 static int
13564 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13565                                           struct mlx5_flow_handle *handle)
13566 {
13567         struct mlx5_priv *priv = dev->data->dev_private;
13568         struct mlx5_flow_dv_push_vlan_action_resource *cache;
13569         uint32_t idx = handle->dvh.rix_push_vlan;
13570
13571         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13572         if (!cache)
13573                 return 0;
13574         MLX5_ASSERT(cache->action);
13575         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
13576                                      &cache->entry);
13577 }
13578
13579 /**
13580  * Release the fate resource.
13581  *
13582  * @param dev
13583  *   Pointer to Ethernet device.
13584  * @param handle
13585  *   Pointer to mlx5_flow_handle.
13586  */
13587 static void
13588 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13589                                struct mlx5_flow_handle *handle)
13590 {
13591         if (!handle->rix_fate)
13592                 return;
13593         switch (handle->fate_action) {
13594         case MLX5_FLOW_FATE_QUEUE:
13595                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13596                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13597                 break;
13598         case MLX5_FLOW_FATE_JUMP:
13599                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13600                 break;
13601         case MLX5_FLOW_FATE_PORT_ID:
13602                 flow_dv_port_id_action_resource_release(dev,
13603                                 handle->rix_port_id_action);
13604                 break;
13605         default:
13606                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13607                 break;
13608         }
13609         handle->rix_fate = 0;
13610 }
13611
13612 void
13613 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
13614                          struct mlx5_cache_entry *entry)
13615 {
13616         struct mlx5_flow_dv_sample_resource *cache_resource =
13617                         container_of(entry, typeof(*cache_resource), entry);
13618         struct rte_eth_dev *dev = cache_resource->dev;
13619         struct mlx5_priv *priv = dev->data->dev_private;
13620
13621         if (cache_resource->verbs_action)
13622                 claim_zero(mlx5_flow_os_destroy_flow_action
13623                                 (cache_resource->verbs_action));
13624         if (cache_resource->normal_path_tbl)
13625                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13626                         cache_resource->normal_path_tbl);
13627         flow_dv_sample_sub_actions_release(dev,
13628                                 &cache_resource->sample_idx);
13629         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13630                         cache_resource->idx);
13631         DRV_LOG(DEBUG, "sample resource %p: removed",
13632                 (void *)cache_resource);
13633 }
13634
13635 /**
13636  * Release an sample resource.
13637  *
13638  * @param dev
13639  *   Pointer to Ethernet device.
13640  * @param handle
13641  *   Pointer to mlx5_flow_handle.
13642  *
13643  * @return
13644  *   1 while a reference on it exists, 0 when freed.
13645  */
13646 static int
13647 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13648                                      struct mlx5_flow_handle *handle)
13649 {
13650         struct mlx5_priv *priv = dev->data->dev_private;
13651         struct mlx5_flow_dv_sample_resource *cache_resource;
13652
13653         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13654                          handle->dvh.rix_sample);
13655         if (!cache_resource)
13656                 return 0;
13657         MLX5_ASSERT(cache_resource->verbs_action);
13658         return mlx5_cache_unregister(&priv->sh->sample_action_list,
13659                                      &cache_resource->entry);
13660 }
13661
13662 void
13663 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
13664                              struct mlx5_cache_entry *entry)
13665 {
13666         struct mlx5_flow_dv_dest_array_resource *cache_resource =
13667                         container_of(entry, typeof(*cache_resource), entry);
13668         struct rte_eth_dev *dev = cache_resource->dev;
13669         struct mlx5_priv *priv = dev->data->dev_private;
13670         uint32_t i = 0;
13671
13672         MLX5_ASSERT(cache_resource->action);
13673         if (cache_resource->action)
13674                 claim_zero(mlx5_flow_os_destroy_flow_action
13675                                         (cache_resource->action));
13676         for (; i < cache_resource->num_of_dest; i++)
13677                 flow_dv_sample_sub_actions_release(dev,
13678                                 &cache_resource->sample_idx[i]);
13679         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13680                         cache_resource->idx);
13681         DRV_LOG(DEBUG, "destination array resource %p: removed",
13682                 (void *)cache_resource);
13683 }
13684
13685 /**
13686  * Release an destination array resource.
13687  *
13688  * @param dev
13689  *   Pointer to Ethernet device.
13690  * @param handle
13691  *   Pointer to mlx5_flow_handle.
13692  *
13693  * @return
13694  *   1 while a reference on it exists, 0 when freed.
13695  */
13696 static int
13697 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13698                                     struct mlx5_flow_handle *handle)
13699 {
13700         struct mlx5_priv *priv = dev->data->dev_private;
13701         struct mlx5_flow_dv_dest_array_resource *cache;
13702
13703         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13704                                handle->dvh.rix_dest_array);
13705         if (!cache)
13706                 return 0;
13707         MLX5_ASSERT(cache->action);
13708         return mlx5_cache_unregister(&priv->sh->dest_array_list,
13709                                      &cache->entry);
13710 }
13711
13712 static void
13713 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
13714 {
13715         struct mlx5_priv *priv = dev->data->dev_private;
13716         struct mlx5_dev_ctx_shared *sh = priv->sh;
13717         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
13718                                 sh->geneve_tlv_option_resource;
13719         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
13720         if (geneve_opt_resource) {
13721                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
13722                                          __ATOMIC_RELAXED))) {
13723                         claim_zero(mlx5_devx_cmd_destroy
13724                                         (geneve_opt_resource->obj));
13725                         mlx5_free(sh->geneve_tlv_option_resource);
13726                         sh->geneve_tlv_option_resource = NULL;
13727                 }
13728         }
13729         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
13730 }
13731
13732 /**
13733  * Remove the flow from the NIC but keeps it in memory.
13734  * Lock free, (mutex should be acquired by caller).
13735  *
13736  * @param[in] dev
13737  *   Pointer to Ethernet device.
13738  * @param[in, out] flow
13739  *   Pointer to flow structure.
13740  */
13741 static void
13742 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
13743 {
13744         struct mlx5_flow_handle *dh;
13745         uint32_t handle_idx;
13746         struct mlx5_priv *priv = dev->data->dev_private;
13747
13748         if (!flow)
13749                 return;
13750         handle_idx = flow->dev_handles;
13751         while (handle_idx) {
13752                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13753                                     handle_idx);
13754                 if (!dh)
13755                         return;
13756                 if (dh->drv_flow) {
13757                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
13758                         dh->drv_flow = NULL;
13759                 }
13760                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
13761                         flow_dv_fate_resource_release(dev, dh);
13762                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13763                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13764                 handle_idx = dh->next.next;
13765         }
13766 }
13767
13768 /**
13769  * Remove the flow from the NIC and the memory.
13770  * Lock free, (mutex should be acquired by caller).
13771  *
13772  * @param[in] dev
13773  *   Pointer to the Ethernet device structure.
13774  * @param[in, out] flow
13775  *   Pointer to flow structure.
13776  */
13777 static void
13778 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
13779 {
13780         struct mlx5_flow_handle *dev_handle;
13781         struct mlx5_priv *priv = dev->data->dev_private;
13782         struct mlx5_flow_meter_info *fm = NULL;
13783         uint32_t srss = 0;
13784
13785         if (!flow)
13786                 return;
13787         flow_dv_remove(dev, flow);
13788         if (flow->counter) {
13789                 flow_dv_counter_free(dev, flow->counter);
13790                 flow->counter = 0;
13791         }
13792         if (flow->meter) {
13793                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
13794                 if (fm)
13795                         mlx5_flow_meter_detach(priv, fm);
13796                 flow->meter = 0;
13797         }
13798         /* Keep the current age handling by default. */
13799         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
13800                 flow_dv_aso_ct_release(dev, flow->ct);
13801         else if (flow->age)
13802                 flow_dv_aso_age_release(dev, flow->age);
13803         if (flow->geneve_tlv_option) {
13804                 flow_dv_geneve_tlv_option_resource_release(dev);
13805                 flow->geneve_tlv_option = 0;
13806         }
13807         while (flow->dev_handles) {
13808                 uint32_t tmp_idx = flow->dev_handles;
13809
13810                 dev_handle = mlx5_ipool_get(priv->sh->ipool
13811                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
13812                 if (!dev_handle)
13813                         return;
13814                 flow->dev_handles = dev_handle->next.next;
13815                 if (dev_handle->dvh.matcher)
13816                         flow_dv_matcher_release(dev, dev_handle);
13817                 if (dev_handle->dvh.rix_sample)
13818                         flow_dv_sample_resource_release(dev, dev_handle);
13819                 if (dev_handle->dvh.rix_dest_array)
13820                         flow_dv_dest_array_resource_release(dev, dev_handle);
13821                 if (dev_handle->dvh.rix_encap_decap)
13822                         flow_dv_encap_decap_resource_release(dev,
13823                                 dev_handle->dvh.rix_encap_decap);
13824                 if (dev_handle->dvh.modify_hdr)
13825                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13826                 if (dev_handle->dvh.rix_push_vlan)
13827                         flow_dv_push_vlan_action_resource_release(dev,
13828                                                                   dev_handle);
13829                 if (dev_handle->dvh.rix_tag)
13830                         flow_dv_tag_release(dev,
13831                                             dev_handle->dvh.rix_tag);
13832                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13833                         flow_dv_fate_resource_release(dev, dev_handle);
13834                 else if (!srss)
13835                         srss = dev_handle->rix_srss;
13836                 if (fm && dev_handle->is_meter_flow_id &&
13837                     dev_handle->split_flow_id)
13838                         mlx5_ipool_free(fm->flow_ipool,
13839                                         dev_handle->split_flow_id);
13840                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13841                            tmp_idx);
13842         }
13843         if (srss)
13844                 flow_dv_shared_rss_action_release(dev, srss);
13845 }
13846
13847 /**
13848  * Release array of hash RX queue objects.
13849  * Helper function.
13850  *
13851  * @param[in] dev
13852  *   Pointer to the Ethernet device structure.
13853  * @param[in, out] hrxqs
13854  *   Array of hash RX queue objects.
13855  *
13856  * @return
13857  *   Total number of references to hash RX queue objects in *hrxqs* array
13858  *   after this operation.
13859  */
13860 static int
13861 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13862                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13863 {
13864         size_t i;
13865         int remaining = 0;
13866
13867         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13868                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13869
13870                 if (!ret)
13871                         (*hrxqs)[i] = 0;
13872                 remaining += ret;
13873         }
13874         return remaining;
13875 }
13876
13877 /**
13878  * Release all hash RX queue objects representing shared RSS action.
13879  *
13880  * @param[in] dev
13881  *   Pointer to the Ethernet device structure.
13882  * @param[in, out] action
13883  *   Shared RSS action to remove hash RX queue objects from.
13884  *
13885  * @return
13886  *   Total number of references to hash RX queue objects stored in *action*
13887  *   after this operation.
13888  *   Expected to be 0 if no external references held.
13889  */
13890 static int
13891 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13892                                  struct mlx5_shared_action_rss *shared_rss)
13893 {
13894         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13895 }
13896
13897 /**
13898  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13899  * user input.
13900  *
13901  * Only one hash value is available for one L3+L4 combination:
13902  * for example:
13903  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13904  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13905  * same slot in mlx5_rss_hash_fields.
13906  *
13907  * @param[in] rss
13908  *   Pointer to the shared action RSS conf.
13909  * @param[in, out] hash_field
13910  *   hash_field variable needed to be adjusted.
13911  *
13912  * @return
13913  *   void
13914  */
13915 static void
13916 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13917                                      uint64_t *hash_field)
13918 {
13919         uint64_t rss_types = rss->origin.types;
13920
13921         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13922         case MLX5_RSS_HASH_IPV4:
13923                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13924                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13925                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13926                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13927                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13928                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13929                         else
13930                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13931                 }
13932                 return;
13933         case MLX5_RSS_HASH_IPV6:
13934                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13935                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13936                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13937                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13938                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13939                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13940                         else
13941                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13942                 }
13943                 return;
13944         case MLX5_RSS_HASH_IPV4_UDP:
13945                 /* fall-through. */
13946         case MLX5_RSS_HASH_IPV6_UDP:
13947                 if (rss_types & ETH_RSS_UDP) {
13948                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13949                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13950                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13951                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13952                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13953                         else
13954                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13955                 }
13956                 return;
13957         case MLX5_RSS_HASH_IPV4_TCP:
13958                 /* fall-through. */
13959         case MLX5_RSS_HASH_IPV6_TCP:
13960                 if (rss_types & ETH_RSS_TCP) {
13961                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13962                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13963                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13964                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13965                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13966                         else
13967                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13968                 }
13969                 return;
13970         default:
13971                 return;
13972         }
13973 }
13974
13975 /**
13976  * Setup shared RSS action.
13977  * Prepare set of hash RX queue objects sufficient to handle all valid
13978  * hash_fields combinations (see enum ibv_rx_hash_fields).
13979  *
13980  * @param[in] dev
13981  *   Pointer to the Ethernet device structure.
13982  * @param[in] action_idx
13983  *   Shared RSS action ipool index.
13984  * @param[in, out] action
13985  *   Partially initialized shared RSS action.
13986  * @param[out] error
13987  *   Perform verbose error reporting if not NULL. Initialized in case of
13988  *   error only.
13989  *
13990  * @return
13991  *   0 on success, otherwise negative errno value.
13992  */
13993 static int
13994 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
13995                            uint32_t action_idx,
13996                            struct mlx5_shared_action_rss *shared_rss,
13997                            struct rte_flow_error *error)
13998 {
13999         struct mlx5_flow_rss_desc rss_desc = { 0 };
14000         size_t i;
14001         int err;
14002
14003         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14004                 return rte_flow_error_set(error, rte_errno,
14005                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14006                                           "cannot setup indirection table");
14007         }
14008         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14009         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14010         rss_desc.const_q = shared_rss->origin.queue;
14011         rss_desc.queue_num = shared_rss->origin.queue_num;
14012         /* Set non-zero value to indicate a shared RSS. */
14013         rss_desc.shared_rss = action_idx;
14014         rss_desc.ind_tbl = shared_rss->ind_tbl;
14015         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14016                 uint32_t hrxq_idx;
14017                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14018                 int tunnel = 0;
14019
14020                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14021                 if (shared_rss->origin.level > 1) {
14022                         hash_fields |= IBV_RX_HASH_INNER;
14023                         tunnel = 1;
14024                 }
14025                 rss_desc.tunnel = tunnel;
14026                 rss_desc.hash_fields = hash_fields;
14027                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14028                 if (!hrxq_idx) {
14029                         rte_flow_error_set
14030                                 (error, rte_errno,
14031                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14032                                  "cannot get hash queue");
14033                         goto error_hrxq_new;
14034                 }
14035                 err = __flow_dv_action_rss_hrxq_set
14036                         (shared_rss, hash_fields, hrxq_idx);
14037                 MLX5_ASSERT(!err);
14038         }
14039         return 0;
14040 error_hrxq_new:
14041         err = rte_errno;
14042         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14043         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14044                 shared_rss->ind_tbl = NULL;
14045         rte_errno = err;
14046         return -rte_errno;
14047 }
14048
14049 /**
14050  * Create shared RSS action.
14051  *
14052  * @param[in] dev
14053  *   Pointer to the Ethernet device structure.
14054  * @param[in] conf
14055  *   Shared action configuration.
14056  * @param[in] rss
14057  *   RSS action specification used to create shared action.
14058  * @param[out] error
14059  *   Perform verbose error reporting if not NULL. Initialized in case of
14060  *   error only.
14061  *
14062  * @return
14063  *   A valid shared action ID in case of success, 0 otherwise and
14064  *   rte_errno is set.
14065  */
14066 static uint32_t
14067 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14068                             const struct rte_flow_indir_action_conf *conf,
14069                             const struct rte_flow_action_rss *rss,
14070                             struct rte_flow_error *error)
14071 {
14072         struct mlx5_priv *priv = dev->data->dev_private;
14073         struct mlx5_shared_action_rss *shared_rss = NULL;
14074         void *queue = NULL;
14075         struct rte_flow_action_rss *origin;
14076         const uint8_t *rss_key;
14077         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14078         uint32_t idx;
14079
14080         RTE_SET_USED(conf);
14081         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14082                             0, SOCKET_ID_ANY);
14083         shared_rss = mlx5_ipool_zmalloc
14084                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14085         if (!shared_rss || !queue) {
14086                 rte_flow_error_set(error, ENOMEM,
14087                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14088                                    "cannot allocate resource memory");
14089                 goto error_rss_init;
14090         }
14091         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14092                 rte_flow_error_set(error, E2BIG,
14093                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14094                                    "rss action number out of range");
14095                 goto error_rss_init;
14096         }
14097         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14098                                           sizeof(*shared_rss->ind_tbl),
14099                                           0, SOCKET_ID_ANY);
14100         if (!shared_rss->ind_tbl) {
14101                 rte_flow_error_set(error, ENOMEM,
14102                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14103                                    "cannot allocate resource memory");
14104                 goto error_rss_init;
14105         }
14106         memcpy(queue, rss->queue, queue_size);
14107         shared_rss->ind_tbl->queues = queue;
14108         shared_rss->ind_tbl->queues_n = rss->queue_num;
14109         origin = &shared_rss->origin;
14110         origin->func = rss->func;
14111         origin->level = rss->level;
14112         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14113         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14114         /* NULL RSS key indicates default RSS key. */
14115         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14116         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14117         origin->key = &shared_rss->key[0];
14118         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14119         origin->queue = queue;
14120         origin->queue_num = rss->queue_num;
14121         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14122                 goto error_rss_init;
14123         rte_spinlock_init(&shared_rss->action_rss_sl);
14124         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14125         rte_spinlock_lock(&priv->shared_act_sl);
14126         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14127                      &priv->rss_shared_actions, idx, shared_rss, next);
14128         rte_spinlock_unlock(&priv->shared_act_sl);
14129         return idx;
14130 error_rss_init:
14131         if (shared_rss) {
14132                 if (shared_rss->ind_tbl)
14133                         mlx5_free(shared_rss->ind_tbl);
14134                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14135                                 idx);
14136         }
14137         if (queue)
14138                 mlx5_free(queue);
14139         return 0;
14140 }
14141
14142 /**
14143  * Destroy the shared RSS action.
14144  * Release related hash RX queue objects.
14145  *
14146  * @param[in] dev
14147  *   Pointer to the Ethernet device structure.
14148  * @param[in] idx
14149  *   The shared RSS action object ID to be removed.
14150  * @param[out] error
14151  *   Perform verbose error reporting if not NULL. Initialized in case of
14152  *   error only.
14153  *
14154  * @return
14155  *   0 on success, otherwise negative errno value.
14156  */
14157 static int
14158 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14159                              struct rte_flow_error *error)
14160 {
14161         struct mlx5_priv *priv = dev->data->dev_private;
14162         struct mlx5_shared_action_rss *shared_rss =
14163             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14164         uint32_t old_refcnt = 1;
14165         int remaining;
14166         uint16_t *queue = NULL;
14167
14168         if (!shared_rss)
14169                 return rte_flow_error_set(error, EINVAL,
14170                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14171                                           "invalid shared action");
14172         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14173         if (remaining)
14174                 return rte_flow_error_set(error, EBUSY,
14175                                           RTE_FLOW_ERROR_TYPE_ACTION,
14176                                           NULL,
14177                                           "shared rss hrxq has references");
14178         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14179                                          0, 0, __ATOMIC_ACQUIRE,
14180                                          __ATOMIC_RELAXED))
14181                 return rte_flow_error_set(error, EBUSY,
14182                                           RTE_FLOW_ERROR_TYPE_ACTION,
14183                                           NULL,
14184                                           "shared rss has references");
14185         queue = shared_rss->ind_tbl->queues;
14186         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14187         if (remaining)
14188                 return rte_flow_error_set(error, EBUSY,
14189                                           RTE_FLOW_ERROR_TYPE_ACTION,
14190                                           NULL,
14191                                           "shared rss indirection table has"
14192                                           " references");
14193         mlx5_free(queue);
14194         rte_spinlock_lock(&priv->shared_act_sl);
14195         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14196                      &priv->rss_shared_actions, idx, shared_rss, next);
14197         rte_spinlock_unlock(&priv->shared_act_sl);
14198         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14199                         idx);
14200         return 0;
14201 }
14202
14203 /**
14204  * Create indirect action, lock free,
14205  * (mutex should be acquired by caller).
14206  * Dispatcher for action type specific call.
14207  *
14208  * @param[in] dev
14209  *   Pointer to the Ethernet device structure.
14210  * @param[in] conf
14211  *   Shared action configuration.
14212  * @param[in] action
14213  *   Action specification used to create indirect action.
14214  * @param[out] error
14215  *   Perform verbose error reporting if not NULL. Initialized in case of
14216  *   error only.
14217  *
14218  * @return
14219  *   A valid shared action handle in case of success, NULL otherwise and
14220  *   rte_errno is set.
14221  */
14222 static struct rte_flow_action_handle *
14223 flow_dv_action_create(struct rte_eth_dev *dev,
14224                       const struct rte_flow_indir_action_conf *conf,
14225                       const struct rte_flow_action *action,
14226                       struct rte_flow_error *err)
14227 {
14228         struct mlx5_priv *priv = dev->data->dev_private;
14229         uint32_t age_idx = 0;
14230         uint32_t idx = 0;
14231         uint32_t ret = 0;
14232
14233         switch (action->type) {
14234         case RTE_FLOW_ACTION_TYPE_RSS:
14235                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14236                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14237                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14238                 break;
14239         case RTE_FLOW_ACTION_TYPE_AGE:
14240                 age_idx = flow_dv_aso_age_alloc(dev, err);
14241                 if (!age_idx) {
14242                         ret = -rte_errno;
14243                         break;
14244                 }
14245                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14246                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14247                 flow_dv_aso_age_params_init(dev, age_idx,
14248                                         ((const struct rte_flow_action_age *)
14249                                                 action->conf)->context ?
14250                                         ((const struct rte_flow_action_age *)
14251                                                 action->conf)->context :
14252                                         (void *)(uintptr_t)idx,
14253                                         ((const struct rte_flow_action_age *)
14254                                                 action->conf)->timeout);
14255                 ret = age_idx;
14256                 break;
14257         case RTE_FLOW_ACTION_TYPE_COUNT:
14258                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14259                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14260                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14261                 break;
14262         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14263                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14264                                                          err);
14265                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14266                 break;
14267         default:
14268                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14269                                    NULL, "action type not supported");
14270                 break;
14271         }
14272         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14273 }
14274
14275 /**
14276  * Destroy the indirect action.
14277  * Release action related resources on the NIC and the memory.
14278  * Lock free, (mutex should be acquired by caller).
14279  * Dispatcher for action type specific call.
14280  *
14281  * @param[in] dev
14282  *   Pointer to the Ethernet device structure.
14283  * @param[in] handle
14284  *   The indirect action object handle to be removed.
14285  * @param[out] error
14286  *   Perform verbose error reporting if not NULL. Initialized in case of
14287  *   error only.
14288  *
14289  * @return
14290  *   0 on success, otherwise negative errno value.
14291  */
14292 static int
14293 flow_dv_action_destroy(struct rte_eth_dev *dev,
14294                        struct rte_flow_action_handle *handle,
14295                        struct rte_flow_error *error)
14296 {
14297         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14298         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14299         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14300         struct mlx5_flow_counter *cnt;
14301         uint32_t no_flow_refcnt = 1;
14302         int ret;
14303
14304         switch (type) {
14305         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14306                 return __flow_dv_action_rss_release(dev, idx, error);
14307         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14308                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14309                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14310                                                  &no_flow_refcnt, 1, false,
14311                                                  __ATOMIC_ACQUIRE,
14312                                                  __ATOMIC_RELAXED))
14313                         return rte_flow_error_set(error, EBUSY,
14314                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14315                                                   NULL,
14316                                                   "Indirect count action has references");
14317                 flow_dv_counter_free(dev, idx);
14318                 return 0;
14319         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14320                 ret = flow_dv_aso_age_release(dev, idx);
14321                 if (ret)
14322                         /*
14323                          * In this case, the last flow has a reference will
14324                          * actually release the age action.
14325                          */
14326                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14327                                 " released with references %d.", idx, ret);
14328                 return 0;
14329         case MLX5_INDIRECT_ACTION_TYPE_CT:
14330                 ret = flow_dv_aso_ct_release(dev, idx);
14331                 if (ret < 0)
14332                         return ret;
14333                 if (ret > 0)
14334                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14335                                 "has references %d.", idx, ret);
14336                 return 0;
14337         default:
14338                 return rte_flow_error_set(error, ENOTSUP,
14339                                           RTE_FLOW_ERROR_TYPE_ACTION,
14340                                           NULL,
14341                                           "action type not supported");
14342         }
14343 }
14344
14345 /**
14346  * Updates in place shared RSS action configuration.
14347  *
14348  * @param[in] dev
14349  *   Pointer to the Ethernet device structure.
14350  * @param[in] idx
14351  *   The shared RSS action object ID to be updated.
14352  * @param[in] action_conf
14353  *   RSS action specification used to modify *shared_rss*.
14354  * @param[out] error
14355  *   Perform verbose error reporting if not NULL. Initialized in case of
14356  *   error only.
14357  *
14358  * @return
14359  *   0 on success, otherwise negative errno value.
14360  * @note: currently only support update of RSS queues.
14361  */
14362 static int
14363 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14364                             const struct rte_flow_action_rss *action_conf,
14365                             struct rte_flow_error *error)
14366 {
14367         struct mlx5_priv *priv = dev->data->dev_private;
14368         struct mlx5_shared_action_rss *shared_rss =
14369             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14370         int ret = 0;
14371         void *queue = NULL;
14372         uint16_t *queue_old = NULL;
14373         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14374
14375         if (!shared_rss)
14376                 return rte_flow_error_set(error, EINVAL,
14377                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14378                                           "invalid shared action to update");
14379         if (priv->obj_ops.ind_table_modify == NULL)
14380                 return rte_flow_error_set(error, ENOTSUP,
14381                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14382                                           "cannot modify indirection table");
14383         queue = mlx5_malloc(MLX5_MEM_ZERO,
14384                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14385                             0, SOCKET_ID_ANY);
14386         if (!queue)
14387                 return rte_flow_error_set(error, ENOMEM,
14388                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14389                                           NULL,
14390                                           "cannot allocate resource memory");
14391         memcpy(queue, action_conf->queue, queue_size);
14392         MLX5_ASSERT(shared_rss->ind_tbl);
14393         rte_spinlock_lock(&shared_rss->action_rss_sl);
14394         queue_old = shared_rss->ind_tbl->queues;
14395         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14396                                         queue, action_conf->queue_num, true);
14397         if (ret) {
14398                 mlx5_free(queue);
14399                 ret = rte_flow_error_set(error, rte_errno,
14400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14401                                           "cannot update indirection table");
14402         } else {
14403                 mlx5_free(queue_old);
14404                 shared_rss->origin.queue = queue;
14405                 shared_rss->origin.queue_num = action_conf->queue_num;
14406         }
14407         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14408         return ret;
14409 }
14410
14411 /*
14412  * Updates in place conntrack context or direction.
14413  * Context update should be synchronized.
14414  *
14415  * @param[in] dev
14416  *   Pointer to the Ethernet device structure.
14417  * @param[in] idx
14418  *   The conntrack object ID to be updated.
14419  * @param[in] update
14420  *   Pointer to the structure of information to update.
14421  * @param[out] error
14422  *   Perform verbose error reporting if not NULL. Initialized in case of
14423  *   error only.
14424  *
14425  * @return
14426  *   0 on success, otherwise negative errno value.
14427  */
14428 static int
14429 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14430                            const struct rte_flow_modify_conntrack *update,
14431                            struct rte_flow_error *error)
14432 {
14433         struct mlx5_priv *priv = dev->data->dev_private;
14434         struct mlx5_aso_ct_action *ct;
14435         const struct rte_flow_action_conntrack *new_prf;
14436         int ret = 0;
14437         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14438         uint32_t dev_idx;
14439
14440         if (PORT_ID(priv) != owner)
14441                 return rte_flow_error_set(error, EACCES,
14442                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14443                                           NULL,
14444                                           "CT object owned by another port");
14445         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14446         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14447         if (!ct->refcnt)
14448                 return rte_flow_error_set(error, ENOMEM,
14449                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14450                                           NULL,
14451                                           "CT object is inactive");
14452         new_prf = &update->new_ct;
14453         if (update->direction)
14454                 ct->is_original = !!new_prf->is_original_dir;
14455         if (update->state) {
14456                 /* Only validate the profile when it needs to be updated. */
14457                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14458                 if (ret)
14459                         return ret;
14460                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14461                 if (ret)
14462                         return rte_flow_error_set(error, EIO,
14463                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14464                                         NULL,
14465                                         "Failed to send CT context update WQE");
14466                 /* Block until ready or a failure. */
14467                 ret = mlx5_aso_ct_available(priv->sh, ct);
14468                 if (ret)
14469                         rte_flow_error_set(error, rte_errno,
14470                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14471                                            NULL,
14472                                            "Timeout to get the CT update");
14473         }
14474         return ret;
14475 }
14476
14477 /**
14478  * Updates in place shared action configuration, lock free,
14479  * (mutex should be acquired by caller).
14480  *
14481  * @param[in] dev
14482  *   Pointer to the Ethernet device structure.
14483  * @param[in] handle
14484  *   The indirect action object handle to be updated.
14485  * @param[in] update
14486  *   Action specification used to modify the action pointed by *handle*.
14487  *   *update* could be of same type with the action pointed by the *handle*
14488  *   handle argument, or some other structures like a wrapper, depending on
14489  *   the indirect action type.
14490  * @param[out] error
14491  *   Perform verbose error reporting if not NULL. Initialized in case of
14492  *   error only.
14493  *
14494  * @return
14495  *   0 on success, otherwise negative errno value.
14496  */
14497 static int
14498 flow_dv_action_update(struct rte_eth_dev *dev,
14499                         struct rte_flow_action_handle *handle,
14500                         const void *update,
14501                         struct rte_flow_error *err)
14502 {
14503         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14504         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14505         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14506         const void *action_conf;
14507
14508         switch (type) {
14509         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14510                 action_conf = ((const struct rte_flow_action *)update)->conf;
14511                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14512         case MLX5_INDIRECT_ACTION_TYPE_CT:
14513                 return __flow_dv_action_ct_update(dev, idx, update, err);
14514         default:
14515                 return rte_flow_error_set(err, ENOTSUP,
14516                                           RTE_FLOW_ERROR_TYPE_ACTION,
14517                                           NULL,
14518                                           "action type update not supported");
14519         }
14520 }
14521
14522 /**
14523  * Destroy the meter sub policy table rules.
14524  * Lock free, (mutex should be acquired by caller).
14525  *
14526  * @param[in] dev
14527  *   Pointer to Ethernet device.
14528  * @param[in] sub_policy
14529  *   Pointer to meter sub policy table.
14530  */
14531 static void
14532 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14533                              struct mlx5_flow_meter_sub_policy *sub_policy)
14534 {
14535         struct mlx5_flow_tbl_data_entry *tbl;
14536         int i;
14537
14538         for (i = 0; i < RTE_COLORS; i++) {
14539                 if (sub_policy->color_rule[i]) {
14540                         claim_zero(mlx5_flow_os_destroy_flow
14541                                 (sub_policy->color_rule[i]));
14542                         sub_policy->color_rule[i] = NULL;
14543                 }
14544                 if (sub_policy->color_matcher[i]) {
14545                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
14546                                 typeof(*tbl), tbl);
14547                         mlx5_cache_unregister(&tbl->matchers,
14548                                       &sub_policy->color_matcher[i]->entry);
14549                         sub_policy->color_matcher[i] = NULL;
14550                 }
14551         }
14552         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14553                 if (sub_policy->rix_hrxq[i]) {
14554                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14555                         sub_policy->rix_hrxq[i] = 0;
14556                 }
14557                 if (sub_policy->jump_tbl[i]) {
14558                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14559                         sub_policy->jump_tbl[i]);
14560                         sub_policy->jump_tbl[i] = NULL;
14561                 }
14562         }
14563         if (sub_policy->tbl_rsc) {
14564                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14565                         sub_policy->tbl_rsc);
14566                 sub_policy->tbl_rsc = NULL;
14567         }
14568 }
14569
14570 /**
14571  * Destroy policy rules, lock free,
14572  * (mutex should be acquired by caller).
14573  * Dispatcher for action type specific call.
14574  *
14575  * @param[in] dev
14576  *   Pointer to the Ethernet device structure.
14577  * @param[in] mtr_policy
14578  *   Meter policy struct.
14579  */
14580 static void
14581 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14582                       struct mlx5_flow_meter_policy *mtr_policy)
14583 {
14584         uint32_t i, j;
14585         struct mlx5_flow_meter_sub_policy *sub_policy;
14586         uint16_t sub_policy_num;
14587
14588         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14589                 sub_policy_num = (mtr_policy->sub_policy_num >>
14590                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14591                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14592                 for (j = 0; j < sub_policy_num; j++) {
14593                         sub_policy = mtr_policy->sub_policys[i][j];
14594                         if (sub_policy)
14595                                 __flow_dv_destroy_sub_policy_rules
14596                                                 (dev, sub_policy);
14597                 }
14598         }
14599 }
14600
14601 /**
14602  * Destroy policy action, lock free,
14603  * (mutex should be acquired by caller).
14604  * Dispatcher for action type specific call.
14605  *
14606  * @param[in] dev
14607  *   Pointer to the Ethernet device structure.
14608  * @param[in] mtr_policy
14609  *   Meter policy struct.
14610  */
14611 static void
14612 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14613                       struct mlx5_flow_meter_policy *mtr_policy)
14614 {
14615         struct rte_flow_action *rss_action;
14616         struct mlx5_flow_handle dev_handle;
14617         uint32_t i, j;
14618
14619         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14620                 if (mtr_policy->act_cnt[i].rix_mark) {
14621                         flow_dv_tag_release(dev,
14622                                 mtr_policy->act_cnt[i].rix_mark);
14623                         mtr_policy->act_cnt[i].rix_mark = 0;
14624                 }
14625                 if (mtr_policy->act_cnt[i].modify_hdr) {
14626                         dev_handle.dvh.modify_hdr =
14627                                 mtr_policy->act_cnt[i].modify_hdr;
14628                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14629                 }
14630                 switch (mtr_policy->act_cnt[i].fate_action) {
14631                 case MLX5_FLOW_FATE_SHARED_RSS:
14632                         rss_action = mtr_policy->act_cnt[i].rss;
14633                         mlx5_free(rss_action);
14634                         break;
14635                 case MLX5_FLOW_FATE_PORT_ID:
14636                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14637                                 flow_dv_port_id_action_resource_release(dev,
14638                                 mtr_policy->act_cnt[i].rix_port_id_action);
14639                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14640                         }
14641                         break;
14642                 case MLX5_FLOW_FATE_DROP:
14643                 case MLX5_FLOW_FATE_JUMP:
14644                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14645                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14646                                                 NULL;
14647                         break;
14648                 default:
14649                         /*Queue action do nothing*/
14650                         break;
14651                 }
14652         }
14653         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14654                 mtr_policy->dr_drop_action[j] = NULL;
14655 }
14656
14657 /**
14658  * Create policy action per domain, lock free,
14659  * (mutex should be acquired by caller).
14660  * Dispatcher for action type specific call.
14661  *
14662  * @param[in] dev
14663  *   Pointer to the Ethernet device structure.
14664  * @param[in] mtr_policy
14665  *   Meter policy struct.
14666  * @param[in] action
14667  *   Action specification used to create meter actions.
14668  * @param[out] error
14669  *   Perform verbose error reporting if not NULL. Initialized in case of
14670  *   error only.
14671  *
14672  * @return
14673  *   0 on success, otherwise negative errno value.
14674  */
14675 static int
14676 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14677                         struct mlx5_flow_meter_policy *mtr_policy,
14678                         const struct rte_flow_action *actions[RTE_COLORS],
14679                         enum mlx5_meter_domain domain,
14680                         struct rte_mtr_error *error)
14681 {
14682         struct mlx5_priv *priv = dev->data->dev_private;
14683         struct rte_flow_error flow_err;
14684         const struct rte_flow_action *act;
14685         uint64_t action_flags = 0;
14686         struct mlx5_flow_handle dh;
14687         struct mlx5_flow dev_flow;
14688         struct mlx5_flow_dv_port_id_action_resource port_id_action;
14689         int i, ret;
14690         uint8_t egress, transfer;
14691         struct mlx5_meter_policy_action_container *act_cnt = NULL;
14692         union {
14693                 struct mlx5_flow_dv_modify_hdr_resource res;
14694                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
14695                             sizeof(struct mlx5_modification_cmd) *
14696                             (MLX5_MAX_MODIFY_NUM + 1)];
14697         } mhdr_dummy;
14698
14699         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14700         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14701         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14702         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
14703         memset(&port_id_action, 0,
14704                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
14705         dev_flow.handle = &dh;
14706         dev_flow.dv.port_id_action = &port_id_action;
14707         dev_flow.external = true;
14708         for (i = 0; i < RTE_COLORS; i++) {
14709                 if (i < MLX5_MTR_RTE_COLORS)
14710                         act_cnt = &mtr_policy->act_cnt[i];
14711                 for (act = actions[i];
14712                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
14713                         act++) {
14714                         switch (act->type) {
14715                         case RTE_FLOW_ACTION_TYPE_MARK:
14716                         {
14717                                 uint32_t tag_be = mlx5_flow_mark_set
14718                                         (((const struct rte_flow_action_mark *)
14719                                         (act->conf))->id);
14720
14721                                 if (i >= MLX5_MTR_RTE_COLORS)
14722                                         return -rte_mtr_error_set(error,
14723                                           ENOTSUP,
14724                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14725                                           NULL,
14726                                           "cannot create policy "
14727                                           "mark action for this color");
14728                                 dev_flow.handle->mark = 1;
14729                                 if (flow_dv_tag_resource_register(dev, tag_be,
14730                                                   &dev_flow, &flow_err))
14731                                         return -rte_mtr_error_set(error,
14732                                         ENOTSUP,
14733                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14734                                         NULL,
14735                                         "cannot setup policy mark action");
14736                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
14737                                 act_cnt->rix_mark =
14738                                         dev_flow.handle->dvh.rix_tag;
14739                                 action_flags |= MLX5_FLOW_ACTION_MARK;
14740                                 break;
14741                         }
14742                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
14743                         {
14744                                 struct mlx5_flow_dv_modify_hdr_resource
14745                                         *mhdr_res = &mhdr_dummy.res;
14746
14747                                 if (i >= MLX5_MTR_RTE_COLORS)
14748                                         return -rte_mtr_error_set(error,
14749                                           ENOTSUP,
14750                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14751                                           NULL,
14752                                           "cannot create policy "
14753                                           "set tag action for this color");
14754                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
14755                                 mhdr_res->ft_type = transfer ?
14756                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
14757                                         egress ?
14758                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
14759                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
14760                                 if (flow_dv_convert_action_set_tag
14761                                 (dev, mhdr_res,
14762                                 (const struct rte_flow_action_set_tag *)
14763                                 act->conf,  &flow_err))
14764                                         return -rte_mtr_error_set(error,
14765                                         ENOTSUP,
14766                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14767                                         NULL, "cannot convert policy "
14768                                         "set tag action");
14769                                 if (!mhdr_res->actions_num)
14770                                         return -rte_mtr_error_set(error,
14771                                         ENOTSUP,
14772                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14773                                         NULL, "cannot find policy "
14774                                         "set tag action");
14775                                 /* create modify action if needed. */
14776                                 dev_flow.dv.group = 1;
14777                                 if (flow_dv_modify_hdr_resource_register
14778                                         (dev, mhdr_res, &dev_flow, &flow_err))
14779                                         return -rte_mtr_error_set(error,
14780                                         ENOTSUP,
14781                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14782                                         NULL, "cannot register policy "
14783                                         "set tag action");
14784                                 act_cnt->modify_hdr =
14785                                 dev_flow.handle->dvh.modify_hdr;
14786                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
14787                                 break;
14788                         }
14789                         case RTE_FLOW_ACTION_TYPE_DROP:
14790                         {
14791                                 struct mlx5_flow_mtr_mng *mtrmng =
14792                                                 priv->sh->mtrmng;
14793                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14794
14795                                 /*
14796                                  * Create the drop table with
14797                                  * METER DROP level.
14798                                  */
14799                                 if (!mtrmng->drop_tbl[domain]) {
14800                                         mtrmng->drop_tbl[domain] =
14801                                         flow_dv_tbl_resource_get(dev,
14802                                         MLX5_FLOW_TABLE_LEVEL_METER,
14803                                         egress, transfer, false, NULL, 0,
14804                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
14805                                         if (!mtrmng->drop_tbl[domain])
14806                                                 return -rte_mtr_error_set
14807                                         (error, ENOTSUP,
14808                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14809                                         NULL,
14810                                         "Failed to create meter drop table");
14811                                 }
14812                                 tbl_data = container_of
14813                                 (mtrmng->drop_tbl[domain],
14814                                 struct mlx5_flow_tbl_data_entry, tbl);
14815                                 if (i < MLX5_MTR_RTE_COLORS) {
14816                                         act_cnt->dr_jump_action[domain] =
14817                                                 tbl_data->jump.action;
14818                                         act_cnt->fate_action =
14819                                                 MLX5_FLOW_FATE_DROP;
14820                                 }
14821                                 if (i == RTE_COLOR_RED)
14822                                         mtr_policy->dr_drop_action[domain] =
14823                                                 tbl_data->jump.action;
14824                                 action_flags |= MLX5_FLOW_ACTION_DROP;
14825                                 break;
14826                         }
14827                         case RTE_FLOW_ACTION_TYPE_QUEUE:
14828                         {
14829                                 if (i >= MLX5_MTR_RTE_COLORS)
14830                                         return -rte_mtr_error_set(error,
14831                                         ENOTSUP,
14832                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14833                                         NULL, "cannot create policy "
14834                                         "fate queue for this color");
14835                                 act_cnt->queue =
14836                                 ((const struct rte_flow_action_queue *)
14837                                         (act->conf))->index;
14838                                 act_cnt->fate_action =
14839                                         MLX5_FLOW_FATE_QUEUE;
14840                                 dev_flow.handle->fate_action =
14841                                         MLX5_FLOW_FATE_QUEUE;
14842                                 mtr_policy->is_queue = 1;
14843                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
14844                                 break;
14845                         }
14846                         case RTE_FLOW_ACTION_TYPE_RSS:
14847                         {
14848                                 int rss_size;
14849
14850                                 if (i >= MLX5_MTR_RTE_COLORS)
14851                                         return -rte_mtr_error_set(error,
14852                                           ENOTSUP,
14853                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14854                                           NULL,
14855                                           "cannot create policy "
14856                                           "rss action for this color");
14857                                 /*
14858                                  * Save RSS conf into policy struct
14859                                  * for translate stage.
14860                                  */
14861                                 rss_size = (int)rte_flow_conv
14862                                         (RTE_FLOW_CONV_OP_ACTION,
14863                                         NULL, 0, act, &flow_err);
14864                                 if (rss_size <= 0)
14865                                         return -rte_mtr_error_set(error,
14866                                           ENOTSUP,
14867                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14868                                           NULL, "Get the wrong "
14869                                           "rss action struct size");
14870                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
14871                                                 rss_size, 0, SOCKET_ID_ANY);
14872                                 if (!act_cnt->rss)
14873                                         return -rte_mtr_error_set(error,
14874                                           ENOTSUP,
14875                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14876                                           NULL,
14877                                           "Fail to malloc rss action memory");
14878                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14879                                         act_cnt->rss, rss_size,
14880                                         act, &flow_err);
14881                                 if (ret < 0)
14882                                         return -rte_mtr_error_set(error,
14883                                           ENOTSUP,
14884                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14885                                           NULL, "Fail to save "
14886                                           "rss action into policy struct");
14887                                 act_cnt->fate_action =
14888                                         MLX5_FLOW_FATE_SHARED_RSS;
14889                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14890                                 break;
14891                         }
14892                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14893                         {
14894                                 struct mlx5_flow_dv_port_id_action_resource
14895                                         port_id_resource;
14896                                 uint32_t port_id = 0;
14897
14898                                 if (i >= MLX5_MTR_RTE_COLORS)
14899                                         return -rte_mtr_error_set(error,
14900                                         ENOTSUP,
14901                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14902                                         NULL, "cannot create policy "
14903                                         "port action for this color");
14904                                 memset(&port_id_resource, 0,
14905                                         sizeof(port_id_resource));
14906                                 if (flow_dv_translate_action_port_id(dev, act,
14907                                                 &port_id, &flow_err))
14908                                         return -rte_mtr_error_set(error,
14909                                         ENOTSUP,
14910                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14911                                         NULL, "cannot translate "
14912                                         "policy port action");
14913                                 port_id_resource.port_id = port_id;
14914                                 if (flow_dv_port_id_action_resource_register
14915                                         (dev, &port_id_resource,
14916                                         &dev_flow, &flow_err))
14917                                         return -rte_mtr_error_set(error,
14918                                         ENOTSUP,
14919                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14920                                         NULL, "cannot setup "
14921                                         "policy port action");
14922                                 act_cnt->rix_port_id_action =
14923                                         dev_flow.handle->rix_port_id_action;
14924                                 act_cnt->fate_action =
14925                                         MLX5_FLOW_FATE_PORT_ID;
14926                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14927                                 break;
14928                         }
14929                         case RTE_FLOW_ACTION_TYPE_JUMP:
14930                         {
14931                                 uint32_t jump_group = 0;
14932                                 uint32_t table = 0;
14933                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14934                                 struct flow_grp_info grp_info = {
14935                                         .external = !!dev_flow.external,
14936                                         .transfer = !!transfer,
14937                                         .fdb_def_rule = !!priv->fdb_def_rule,
14938                                         .std_tbl_fix = 0,
14939                                         .skip_scale = dev_flow.skip_scale &
14940                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14941                                 };
14942                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14943                                 mtr_policy->sub_policys[domain][0];
14944
14945                                 if (i >= MLX5_MTR_RTE_COLORS)
14946                                         return -rte_mtr_error_set(error,
14947                                           ENOTSUP,
14948                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14949                                           NULL,
14950                                           "cannot create policy "
14951                                           "jump action for this color");
14952                                 jump_group =
14953                                 ((const struct rte_flow_action_jump *)
14954                                                         act->conf)->group;
14955                                 if (mlx5_flow_group_to_table(dev, NULL,
14956                                                        jump_group,
14957                                                        &table,
14958                                                        &grp_info, &flow_err))
14959                                         return -rte_mtr_error_set(error,
14960                                         ENOTSUP,
14961                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14962                                         NULL, "cannot setup "
14963                                         "policy jump action");
14964                                 sub_policy->jump_tbl[i] =
14965                                 flow_dv_tbl_resource_get(dev,
14966                                         table, egress,
14967                                         transfer,
14968                                         !!dev_flow.external,
14969                                         NULL, jump_group, 0,
14970                                         0, &flow_err);
14971                                 if
14972                                 (!sub_policy->jump_tbl[i])
14973                                         return  -rte_mtr_error_set(error,
14974                                         ENOTSUP,
14975                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14976                                         NULL, "cannot create jump action.");
14977                                 tbl_data = container_of
14978                                 (sub_policy->jump_tbl[i],
14979                                 struct mlx5_flow_tbl_data_entry, tbl);
14980                                 act_cnt->dr_jump_action[domain] =
14981                                         tbl_data->jump.action;
14982                                 act_cnt->fate_action =
14983                                         MLX5_FLOW_FATE_JUMP;
14984                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
14985                                 break;
14986                         }
14987                         default:
14988                                 return -rte_mtr_error_set(error, ENOTSUP,
14989                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14990                                           NULL, "action type not supported");
14991                         }
14992                 }
14993         }
14994         return 0;
14995 }
14996
14997 /**
14998  * Create policy action per domain, lock free,
14999  * (mutex should be acquired by caller).
15000  * Dispatcher for action type specific call.
15001  *
15002  * @param[in] dev
15003  *   Pointer to the Ethernet device structure.
15004  * @param[in] mtr_policy
15005  *   Meter policy struct.
15006  * @param[in] action
15007  *   Action specification used to create meter actions.
15008  * @param[out] error
15009  *   Perform verbose error reporting if not NULL. Initialized in case of
15010  *   error only.
15011  *
15012  * @return
15013  *   0 on success, otherwise negative errno value.
15014  */
15015 static int
15016 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15017                       struct mlx5_flow_meter_policy *mtr_policy,
15018                       const struct rte_flow_action *actions[RTE_COLORS],
15019                       struct rte_mtr_error *error)
15020 {
15021         int ret, i;
15022         uint16_t sub_policy_num;
15023
15024         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15025                 sub_policy_num = (mtr_policy->sub_policy_num >>
15026                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15027                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15028                 if (sub_policy_num) {
15029                         ret = __flow_dv_create_domain_policy_acts(dev,
15030                                 mtr_policy, actions,
15031                                 (enum mlx5_meter_domain)i, error);
15032                         if (ret)
15033                                 return ret;
15034                 }
15035         }
15036         return 0;
15037 }
15038
15039 /**
15040  * Query a DV flow rule for its statistics via DevX.
15041  *
15042  * @param[in] dev
15043  *   Pointer to Ethernet device.
15044  * @param[in] cnt_idx
15045  *   Index to the flow counter.
15046  * @param[out] data
15047  *   Data retrieved by the query.
15048  * @param[out] error
15049  *   Perform verbose error reporting if not NULL.
15050  *
15051  * @return
15052  *   0 on success, a negative errno value otherwise and rte_errno is set.
15053  */
15054 static int
15055 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15056                     struct rte_flow_error *error)
15057 {
15058         struct mlx5_priv *priv = dev->data->dev_private;
15059         struct rte_flow_query_count *qc = data;
15060
15061         if (!priv->config.devx)
15062                 return rte_flow_error_set(error, ENOTSUP,
15063                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15064                                           NULL,
15065                                           "counters are not supported");
15066         if (cnt_idx) {
15067                 uint64_t pkts, bytes;
15068                 struct mlx5_flow_counter *cnt;
15069                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15070
15071                 if (err)
15072                         return rte_flow_error_set(error, -err,
15073                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15074                                         NULL, "cannot read counters");
15075                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15076                 qc->hits_set = 1;
15077                 qc->bytes_set = 1;
15078                 qc->hits = pkts - cnt->hits;
15079                 qc->bytes = bytes - cnt->bytes;
15080                 if (qc->reset) {
15081                         cnt->hits = pkts;
15082                         cnt->bytes = bytes;
15083                 }
15084                 return 0;
15085         }
15086         return rte_flow_error_set(error, EINVAL,
15087                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15088                                   NULL,
15089                                   "counters are not available");
15090 }
15091
15092 static int
15093 flow_dv_action_query(struct rte_eth_dev *dev,
15094                      const struct rte_flow_action_handle *handle, void *data,
15095                      struct rte_flow_error *error)
15096 {
15097         struct mlx5_age_param *age_param;
15098         struct rte_flow_query_age *resp;
15099         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15100         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15101         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15102         struct mlx5_priv *priv = dev->data->dev_private;
15103         struct mlx5_aso_ct_action *ct;
15104         uint16_t owner;
15105         uint32_t dev_idx;
15106
15107         switch (type) {
15108         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15109                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15110                 resp = data;
15111                 resp->aged = __atomic_load_n(&age_param->state,
15112                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15113                                                                           1 : 0;
15114                 resp->sec_since_last_hit_valid = !resp->aged;
15115                 if (resp->sec_since_last_hit_valid)
15116                         resp->sec_since_last_hit = __atomic_load_n
15117                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15118                 return 0;
15119         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15120                 return flow_dv_query_count(dev, idx, data, error);
15121         case MLX5_INDIRECT_ACTION_TYPE_CT:
15122                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15123                 if (owner != PORT_ID(priv))
15124                         return rte_flow_error_set(error, EACCES,
15125                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15126                                         NULL,
15127                                         "CT object owned by another port");
15128                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15129                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15130                 MLX5_ASSERT(ct);
15131                 if (!ct->refcnt)
15132                         return rte_flow_error_set(error, EFAULT,
15133                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15134                                         NULL,
15135                                         "CT object is inactive");
15136                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15137                                                         ct->peer;
15138                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15139                                                         ct->is_original;
15140                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15141                         return rte_flow_error_set(error, EIO,
15142                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15143                                         NULL,
15144                                         "Failed to query CT context");
15145                 return 0;
15146         default:
15147                 return rte_flow_error_set(error, ENOTSUP,
15148                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15149                                           "action type query not supported");
15150         }
15151 }
15152
15153 /**
15154  * Query a flow rule AGE action for aging information.
15155  *
15156  * @param[in] dev
15157  *   Pointer to Ethernet device.
15158  * @param[in] flow
15159  *   Pointer to the sub flow.
15160  * @param[out] data
15161  *   data retrieved by the query.
15162  * @param[out] error
15163  *   Perform verbose error reporting if not NULL.
15164  *
15165  * @return
15166  *   0 on success, a negative errno value otherwise and rte_errno is set.
15167  */
15168 static int
15169 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15170                   void *data, struct rte_flow_error *error)
15171 {
15172         struct rte_flow_query_age *resp = data;
15173         struct mlx5_age_param *age_param;
15174
15175         if (flow->age) {
15176                 struct mlx5_aso_age_action *act =
15177                                      flow_aso_age_get_by_idx(dev, flow->age);
15178
15179                 age_param = &act->age_params;
15180         } else if (flow->counter) {
15181                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15182
15183                 if (!age_param || !age_param->timeout)
15184                         return rte_flow_error_set
15185                                         (error, EINVAL,
15186                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15187                                          NULL, "cannot read age data");
15188         } else {
15189                 return rte_flow_error_set(error, EINVAL,
15190                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15191                                           NULL, "age data not available");
15192         }
15193         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15194                                      AGE_TMOUT ? 1 : 0;
15195         resp->sec_since_last_hit_valid = !resp->aged;
15196         if (resp->sec_since_last_hit_valid)
15197                 resp->sec_since_last_hit = __atomic_load_n
15198                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15199         return 0;
15200 }
15201
15202 /**
15203  * Query a flow.
15204  *
15205  * @see rte_flow_query()
15206  * @see rte_flow_ops
15207  */
15208 static int
15209 flow_dv_query(struct rte_eth_dev *dev,
15210               struct rte_flow *flow __rte_unused,
15211               const struct rte_flow_action *actions __rte_unused,
15212               void *data __rte_unused,
15213               struct rte_flow_error *error __rte_unused)
15214 {
15215         int ret = -EINVAL;
15216
15217         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15218                 switch (actions->type) {
15219                 case RTE_FLOW_ACTION_TYPE_VOID:
15220                         break;
15221                 case RTE_FLOW_ACTION_TYPE_COUNT:
15222                         ret = flow_dv_query_count(dev, flow->counter, data,
15223                                                   error);
15224                         break;
15225                 case RTE_FLOW_ACTION_TYPE_AGE:
15226                         ret = flow_dv_query_age(dev, flow, data, error);
15227                         break;
15228                 default:
15229                         return rte_flow_error_set(error, ENOTSUP,
15230                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15231                                                   actions,
15232                                                   "action not supported");
15233                 }
15234         }
15235         return ret;
15236 }
15237
15238 /**
15239  * Destroy the meter table set.
15240  * Lock free, (mutex should be acquired by caller).
15241  *
15242  * @param[in] dev
15243  *   Pointer to Ethernet device.
15244  * @param[in] fm
15245  *   Meter information table.
15246  */
15247 static void
15248 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15249                         struct mlx5_flow_meter_info *fm)
15250 {
15251         struct mlx5_priv *priv = dev->data->dev_private;
15252         int i;
15253
15254         if (!fm || !priv->config.dv_flow_en)
15255                 return;
15256         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15257                 if (fm->drop_rule[i]) {
15258                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15259                         fm->drop_rule[i] = NULL;
15260                 }
15261         }
15262 }
15263
15264 static void
15265 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15266 {
15267         struct mlx5_priv *priv = dev->data->dev_private;
15268         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15269         struct mlx5_flow_tbl_data_entry *tbl;
15270         int i, j;
15271
15272         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15273                 if (mtrmng->def_rule[i]) {
15274                         claim_zero(mlx5_flow_os_destroy_flow
15275                                         (mtrmng->def_rule[i]));
15276                         mtrmng->def_rule[i] = NULL;
15277                 }
15278                 if (mtrmng->def_matcher[i]) {
15279                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15280                                 struct mlx5_flow_tbl_data_entry, tbl);
15281                         mlx5_cache_unregister(&tbl->matchers,
15282                                       &mtrmng->def_matcher[i]->entry);
15283                         mtrmng->def_matcher[i] = NULL;
15284                 }
15285                 for (j = 0; j < MLX5_REG_BITS; j++) {
15286                         if (mtrmng->drop_matcher[i][j]) {
15287                                 tbl =
15288                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15289                                              struct mlx5_flow_tbl_data_entry,
15290                                              tbl);
15291                                 mlx5_cache_unregister(&tbl->matchers,
15292                                         &mtrmng->drop_matcher[i][j]->entry);
15293                                 mtrmng->drop_matcher[i][j] = NULL;
15294                         }
15295                 }
15296                 if (mtrmng->drop_tbl[i]) {
15297                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15298                                 mtrmng->drop_tbl[i]);
15299                         mtrmng->drop_tbl[i] = NULL;
15300                 }
15301         }
15302 }
15303
15304 /* Number of meter flow actions, count and jump or count and drop. */
15305 #define METER_ACTIONS 2
15306
15307 static void
15308 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15309                               enum mlx5_meter_domain domain)
15310 {
15311         struct mlx5_priv *priv = dev->data->dev_private;
15312         struct mlx5_flow_meter_def_policy *def_policy =
15313                         priv->sh->mtrmng->def_policy[domain];
15314
15315         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15316         mlx5_free(def_policy);
15317         priv->sh->mtrmng->def_policy[domain] = NULL;
15318 }
15319
15320 /**
15321  * Destroy the default policy table set.
15322  *
15323  * @param[in] dev
15324  *   Pointer to Ethernet device.
15325  */
15326 static void
15327 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15328 {
15329         struct mlx5_priv *priv = dev->data->dev_private;
15330         int i;
15331
15332         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15333                 if (priv->sh->mtrmng->def_policy[i])
15334                         __flow_dv_destroy_domain_def_policy(dev,
15335                                         (enum mlx5_meter_domain)i);
15336         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15337 }
15338
15339 static int
15340 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15341                         uint32_t color_reg_c_idx,
15342                         enum rte_color color, void *matcher_object,
15343                         int actions_n, void *actions,
15344                         bool is_default_policy, void **rule,
15345                         const struct rte_flow_attr *attr)
15346 {
15347         int ret;
15348         struct mlx5_flow_dv_match_params value = {
15349                 .size = sizeof(value.buf) -
15350                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15351         };
15352         struct mlx5_flow_dv_match_params matcher = {
15353                 .size = sizeof(matcher.buf) -
15354                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15355         };
15356         struct mlx5_priv *priv = dev->data->dev_private;
15357
15358         if (!is_default_policy && (priv->representor || priv->master)) {
15359                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15360                                                    value.buf, NULL, attr)) {
15361                         DRV_LOG(ERR,
15362                         "Failed to create meter policy flow with port.");
15363                         return -1;
15364                 }
15365         }
15366         flow_dv_match_meta_reg(matcher.buf, value.buf,
15367                                 (enum modify_reg)color_reg_c_idx,
15368                                 rte_col_2_mlx5_col(color),
15369                                 UINT32_MAX);
15370         ret = mlx5_flow_os_create_flow(matcher_object,
15371                         (void *)&value, actions_n, actions, rule);
15372         if (ret) {
15373                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15374                 return -1;
15375         }
15376         return 0;
15377 }
15378
15379 static int
15380 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15381                         uint32_t color_reg_c_idx,
15382                         uint16_t priority,
15383                         struct mlx5_flow_meter_sub_policy *sub_policy,
15384                         const struct rte_flow_attr *attr,
15385                         bool is_default_policy,
15386                         struct rte_flow_error *error)
15387 {
15388         struct mlx5_cache_entry *entry;
15389         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15390         struct mlx5_flow_dv_matcher matcher = {
15391                 .mask = {
15392                         .size = sizeof(matcher.mask.buf) -
15393                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15394                 },
15395                 .tbl = tbl_rsc,
15396         };
15397         struct mlx5_flow_dv_match_params value = {
15398                 .size = sizeof(value.buf) -
15399                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15400         };
15401         struct mlx5_flow_cb_ctx ctx = {
15402                 .error = error,
15403                 .data = &matcher,
15404         };
15405         struct mlx5_flow_tbl_data_entry *tbl_data;
15406         struct mlx5_priv *priv = dev->data->dev_private;
15407         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15408
15409         if (!is_default_policy && (priv->representor || priv->master)) {
15410                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15411                                                    value.buf, NULL, attr)) {
15412                         DRV_LOG(ERR,
15413                         "Failed to register meter drop matcher with port.");
15414                         return -1;
15415                 }
15416         }
15417         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15418         if (priority < RTE_COLOR_RED)
15419                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15420                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15421         matcher.priority = priority;
15422         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15423                                         matcher.mask.size);
15424         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15425         if (!entry) {
15426                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15427                 return -1;
15428         }
15429         sub_policy->color_matcher[priority] =
15430                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15431         return 0;
15432 }
15433
15434 /**
15435  * Create the policy rules per domain.
15436  *
15437  * @param[in] dev
15438  *   Pointer to Ethernet device.
15439  * @param[in] sub_policy
15440  *    Pointer to sub policy table..
15441  * @param[in] egress
15442  *   Direction of the table.
15443  * @param[in] transfer
15444  *   E-Switch or NIC flow.
15445  * @param[in] acts
15446  *   Pointer to policy action list per color.
15447  *
15448  * @return
15449  *   0 on success, -1 otherwise.
15450  */
15451 static int
15452 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15453                 struct mlx5_flow_meter_sub_policy *sub_policy,
15454                 uint8_t egress, uint8_t transfer, bool is_default_policy,
15455                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15456 {
15457         struct rte_flow_error flow_err;
15458         uint32_t color_reg_c_idx;
15459         struct rte_flow_attr attr = {
15460                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15461                 .priority = 0,
15462                 .ingress = 0,
15463                 .egress = !!egress,
15464                 .transfer = !!transfer,
15465                 .reserved = 0,
15466         };
15467         int i;
15468         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15469
15470         if (ret < 0)
15471                 return -1;
15472         /* Create policy table with POLICY level. */
15473         if (!sub_policy->tbl_rsc)
15474                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15475                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15476                                 egress, transfer, false, NULL, 0, 0,
15477                                 sub_policy->idx, &flow_err);
15478         if (!sub_policy->tbl_rsc) {
15479                 DRV_LOG(ERR,
15480                         "Failed to create meter sub policy table.");
15481                 return -1;
15482         }
15483         /* Prepare matchers. */
15484         color_reg_c_idx = ret;
15485         for (i = 0; i < RTE_COLORS; i++) {
15486                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15487                         continue;
15488                 attr.priority = i;
15489                 if (!sub_policy->color_matcher[i]) {
15490                         /* Create matchers for Color. */
15491                         if (__flow_dv_create_policy_matcher(dev,
15492                                 color_reg_c_idx, i, sub_policy,
15493                                 &attr, is_default_policy, &flow_err))
15494                                 return -1;
15495                 }
15496                 /* Create flow, matching color. */
15497                 if (acts[i].actions_n)
15498                         if (__flow_dv_create_policy_flow(dev,
15499                                 color_reg_c_idx, (enum rte_color)i,
15500                                 sub_policy->color_matcher[i]->matcher_object,
15501                                 acts[i].actions_n,
15502                                 acts[i].dv_actions,
15503                                 is_default_policy,
15504                                 &sub_policy->color_rule[i],
15505                                 &attr))
15506                                 return -1;
15507         }
15508         return 0;
15509 }
15510
15511 static int
15512 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15513                         struct mlx5_flow_meter_policy *mtr_policy,
15514                         struct mlx5_flow_meter_sub_policy *sub_policy,
15515                         uint32_t domain)
15516 {
15517         struct mlx5_priv *priv = dev->data->dev_private;
15518         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15519         struct mlx5_flow_dv_tag_resource *tag;
15520         struct mlx5_flow_dv_port_id_action_resource *port_action;
15521         struct mlx5_hrxq *hrxq;
15522         uint8_t egress, transfer;
15523         int i;
15524
15525         for (i = 0; i < RTE_COLORS; i++) {
15526                 acts[i].actions_n = 0;
15527                 if (i == RTE_COLOR_YELLOW)
15528                         continue;
15529                 if (i == RTE_COLOR_RED) {
15530                         /* Only support drop on red. */
15531                         acts[i].dv_actions[0] =
15532                         mtr_policy->dr_drop_action[domain];
15533                         acts[i].actions_n = 1;
15534                         continue;
15535                 }
15536                 if (mtr_policy->act_cnt[i].rix_mark) {
15537                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15538                                         mtr_policy->act_cnt[i].rix_mark);
15539                         if (!tag) {
15540                                 DRV_LOG(ERR, "Failed to find "
15541                                 "mark action for policy.");
15542                                 return -1;
15543                         }
15544                         acts[i].dv_actions[acts[i].actions_n] =
15545                                                 tag->action;
15546                         acts[i].actions_n++;
15547                 }
15548                 if (mtr_policy->act_cnt[i].modify_hdr) {
15549                         acts[i].dv_actions[acts[i].actions_n] =
15550                         mtr_policy->act_cnt[i].modify_hdr->action;
15551                         acts[i].actions_n++;
15552                 }
15553                 if (mtr_policy->act_cnt[i].fate_action) {
15554                         switch (mtr_policy->act_cnt[i].fate_action) {
15555                         case MLX5_FLOW_FATE_PORT_ID:
15556                                 port_action = mlx5_ipool_get
15557                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15558                                 mtr_policy->act_cnt[i].rix_port_id_action);
15559                                 if (!port_action) {
15560                                         DRV_LOG(ERR, "Failed to find "
15561                                                 "port action for policy.");
15562                                         return -1;
15563                                 }
15564                                 acts[i].dv_actions[acts[i].actions_n] =
15565                                 port_action->action;
15566                                 acts[i].actions_n++;
15567                                 break;
15568                         case MLX5_FLOW_FATE_DROP:
15569                         case MLX5_FLOW_FATE_JUMP:
15570                                 acts[i].dv_actions[acts[i].actions_n] =
15571                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
15572                                 acts[i].actions_n++;
15573                                 break;
15574                         case MLX5_FLOW_FATE_SHARED_RSS:
15575                         case MLX5_FLOW_FATE_QUEUE:
15576                                 hrxq = mlx5_ipool_get
15577                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
15578                                 sub_policy->rix_hrxq[i]);
15579                                 if (!hrxq) {
15580                                         DRV_LOG(ERR, "Failed to find "
15581                                                 "queue action for policy.");
15582                                         return -1;
15583                                 }
15584                                 acts[i].dv_actions[acts[i].actions_n] =
15585                                 hrxq->action;
15586                                 acts[i].actions_n++;
15587                                 break;
15588                         default:
15589                                 /*Queue action do nothing*/
15590                                 break;
15591                         }
15592                 }
15593         }
15594         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15595         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15596         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
15597                                 egress, transfer, false, acts)) {
15598                 DRV_LOG(ERR,
15599                 "Failed to create policy rules per domain.");
15600                 return -1;
15601         }
15602         return 0;
15603 }
15604
15605 /**
15606  * Create the policy rules.
15607  *
15608  * @param[in] dev
15609  *   Pointer to Ethernet device.
15610  * @param[in,out] mtr_policy
15611  *   Pointer to meter policy table.
15612  *
15613  * @return
15614  *   0 on success, -1 otherwise.
15615  */
15616 static int
15617 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
15618                              struct mlx5_flow_meter_policy *mtr_policy)
15619 {
15620         int i;
15621         uint16_t sub_policy_num;
15622
15623         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15624                 sub_policy_num = (mtr_policy->sub_policy_num >>
15625                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15626                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15627                 if (!sub_policy_num)
15628                         continue;
15629                 /* Prepare actions list and create policy rules. */
15630                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15631                         mtr_policy->sub_policys[i][0], i)) {
15632                         DRV_LOG(ERR,
15633                         "Failed to create policy action list per domain.");
15634                         return -1;
15635                 }
15636         }
15637         return 0;
15638 }
15639
15640 static int
15641 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
15642 {
15643         struct mlx5_priv *priv = dev->data->dev_private;
15644         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15645         struct mlx5_flow_meter_def_policy *def_policy;
15646         struct mlx5_flow_tbl_resource *jump_tbl;
15647         struct mlx5_flow_tbl_data_entry *tbl_data;
15648         uint8_t egress, transfer;
15649         struct rte_flow_error error;
15650         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15651         int ret;
15652
15653         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15654         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15655         def_policy = mtrmng->def_policy[domain];
15656         if (!def_policy) {
15657                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
15658                         sizeof(struct mlx5_flow_meter_def_policy),
15659                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15660                 if (!def_policy) {
15661                         DRV_LOG(ERR, "Failed to alloc "
15662                                         "default policy table.");
15663                         goto def_policy_error;
15664                 }
15665                 mtrmng->def_policy[domain] = def_policy;
15666                 /* Create the meter suffix table with SUFFIX level. */
15667                 jump_tbl = flow_dv_tbl_resource_get(dev,
15668                                 MLX5_FLOW_TABLE_LEVEL_METER,
15669                                 egress, transfer, false, NULL, 0,
15670                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
15671                 if (!jump_tbl) {
15672                         DRV_LOG(ERR,
15673                                 "Failed to create meter suffix table.");
15674                         goto def_policy_error;
15675                 }
15676                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
15677                 tbl_data = container_of(jump_tbl,
15678                                 struct mlx5_flow_tbl_data_entry, tbl);
15679                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
15680                                                 tbl_data->jump.action;
15681                 acts[RTE_COLOR_GREEN].dv_actions[0] =
15682                                                 tbl_data->jump.action;
15683                 acts[RTE_COLOR_GREEN].actions_n = 1;
15684                 /* Create jump action to the drop table. */
15685                 if (!mtrmng->drop_tbl[domain]) {
15686                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
15687                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
15688                                 egress, transfer, false, NULL, 0,
15689                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
15690                         if (!mtrmng->drop_tbl[domain]) {
15691                                 DRV_LOG(ERR, "Failed to create "
15692                                 "meter drop table for default policy.");
15693                                 goto def_policy_error;
15694                         }
15695                 }
15696                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15697                                 struct mlx5_flow_tbl_data_entry, tbl);
15698                 def_policy->dr_jump_action[RTE_COLOR_RED] =
15699                                                 tbl_data->jump.action;
15700                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
15701                 acts[RTE_COLOR_RED].actions_n = 1;
15702                 /* Create default policy rules. */
15703                 ret = __flow_dv_create_domain_policy_rules(dev,
15704                                         &def_policy->sub_policy,
15705                                         egress, transfer, true, acts);
15706                 if (ret) {
15707                         DRV_LOG(ERR, "Failed to create "
15708                                 "default policy rules.");
15709                                 goto def_policy_error;
15710                 }
15711         }
15712         return 0;
15713 def_policy_error:
15714         __flow_dv_destroy_domain_def_policy(dev,
15715                         (enum mlx5_meter_domain)domain);
15716         return -1;
15717 }
15718
15719 /**
15720  * Create the default policy table set.
15721  *
15722  * @param[in] dev
15723  *   Pointer to Ethernet device.
15724  * @return
15725  *   0 on success, -1 otherwise.
15726  */
15727 static int
15728 flow_dv_create_def_policy(struct rte_eth_dev *dev)
15729 {
15730         struct mlx5_priv *priv = dev->data->dev_private;
15731         int i;
15732
15733         /* Non-termination policy table. */
15734         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15735                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
15736                         continue;
15737                 if (__flow_dv_create_domain_def_policy(dev, i)) {
15738                         DRV_LOG(ERR,
15739                         "Failed to create default policy");
15740                         return -1;
15741                 }
15742         }
15743         return 0;
15744 }
15745
15746 /**
15747  * Create the needed meter tables.
15748  * Lock free, (mutex should be acquired by caller).
15749  *
15750  * @param[in] dev
15751  *   Pointer to Ethernet device.
15752  * @param[in] fm
15753  *   Meter information table.
15754  * @param[in] mtr_idx
15755  *   Meter index.
15756  * @param[in] domain_bitmap
15757  *   Domain bitmap.
15758  * @return
15759  *   0 on success, -1 otherwise.
15760  */
15761 static int
15762 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
15763                         struct mlx5_flow_meter_info *fm,
15764                         uint32_t mtr_idx,
15765                         uint8_t domain_bitmap)
15766 {
15767         struct mlx5_priv *priv = dev->data->dev_private;
15768         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15769         struct rte_flow_error error;
15770         struct mlx5_flow_tbl_data_entry *tbl_data;
15771         uint8_t egress, transfer;
15772         void *actions[METER_ACTIONS];
15773         int domain, ret, i;
15774         struct mlx5_flow_counter *cnt;
15775         struct mlx5_flow_dv_match_params value = {
15776                 .size = sizeof(value.buf) -
15777                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15778         };
15779         struct mlx5_flow_dv_match_params matcher_para = {
15780                 .size = sizeof(matcher_para.buf) -
15781                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15782         };
15783         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
15784                                                      0, &error);
15785         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
15786         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
15787         struct mlx5_cache_entry *entry;
15788         struct mlx5_flow_dv_matcher matcher = {
15789                 .mask = {
15790                         .size = sizeof(matcher.mask.buf) -
15791                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15792                 },
15793         };
15794         struct mlx5_flow_dv_matcher *drop_matcher;
15795         struct mlx5_flow_cb_ctx ctx = {
15796                 .error = &error,
15797                 .data = &matcher,
15798         };
15799
15800         if (!priv->mtr_en || mtr_id_reg_c < 0) {
15801                 rte_errno = ENOTSUP;
15802                 return -1;
15803         }
15804         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
15805                 if (!(domain_bitmap & (1 << domain)) ||
15806                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
15807                         continue;
15808                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15809                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15810                 /* Create the drop table with METER DROP level. */
15811                 if (!mtrmng->drop_tbl[domain]) {
15812                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
15813                                         MLX5_FLOW_TABLE_LEVEL_METER,
15814                                         egress, transfer, false, NULL, 0,
15815                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
15816                         if (!mtrmng->drop_tbl[domain]) {
15817                                 DRV_LOG(ERR, "Failed to create meter drop table.");
15818                                 goto policy_error;
15819                         }
15820                 }
15821                 /* Create default matcher in drop table. */
15822                 matcher.tbl = mtrmng->drop_tbl[domain],
15823                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15824                                 struct mlx5_flow_tbl_data_entry, tbl);
15825                 if (!mtrmng->def_matcher[domain]) {
15826                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15827                                        (enum modify_reg)mtr_id_reg_c,
15828                                        0, 0);
15829                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
15830                         matcher.crc = rte_raw_cksum
15831                                         ((const void *)matcher.mask.buf,
15832                                         matcher.mask.size);
15833                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15834                         if (!entry) {
15835                                 DRV_LOG(ERR, "Failed to register meter "
15836                                 "drop default matcher.");
15837                                 goto policy_error;
15838                         }
15839                         mtrmng->def_matcher[domain] = container_of(entry,
15840                         struct mlx5_flow_dv_matcher, entry);
15841                 }
15842                 /* Create default rule in drop table. */
15843                 if (!mtrmng->def_rule[domain]) {
15844                         i = 0;
15845                         actions[i++] = priv->sh->dr_drop_action;
15846                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15847                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
15848                         ret = mlx5_flow_os_create_flow
15849                                 (mtrmng->def_matcher[domain]->matcher_object,
15850                                 (void *)&value, i, actions,
15851                                 &mtrmng->def_rule[domain]);
15852                         if (ret) {
15853                                 DRV_LOG(ERR, "Failed to create meter "
15854                                 "default drop rule for drop table.");
15855                                 goto policy_error;
15856                         }
15857                 }
15858                 if (!fm->drop_cnt)
15859                         continue;
15860                 MLX5_ASSERT(mtrmng->max_mtr_bits);
15861                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
15862                         /* Create matchers for Drop. */
15863                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15864                                         (enum modify_reg)mtr_id_reg_c, 0,
15865                                         (mtr_id_mask << mtr_id_offset));
15866                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
15867                         matcher.crc = rte_raw_cksum
15868                                         ((const void *)matcher.mask.buf,
15869                                         matcher.mask.size);
15870                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15871                         if (!entry) {
15872                                 DRV_LOG(ERR,
15873                                 "Failed to register meter drop matcher.");
15874                                 goto policy_error;
15875                         }
15876                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
15877                                 container_of(entry, struct mlx5_flow_dv_matcher,
15878                                              entry);
15879                 }
15880                 drop_matcher =
15881                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
15882                 /* Create drop rule, matching meter_id only. */
15883                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15884                                 (enum modify_reg)mtr_id_reg_c,
15885                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
15886                 i = 0;
15887                 cnt = flow_dv_counter_get_by_idx(dev,
15888                                         fm->drop_cnt, NULL);
15889                 actions[i++] = cnt->action;
15890                 actions[i++] = priv->sh->dr_drop_action;
15891                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
15892                                                (void *)&value, i, actions,
15893                                                &fm->drop_rule[domain]);
15894                 if (ret) {
15895                         DRV_LOG(ERR, "Failed to create meter "
15896                                 "drop rule for drop table.");
15897                                 goto policy_error;
15898                 }
15899         }
15900         return 0;
15901 policy_error:
15902         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15903                 if (fm->drop_rule[i]) {
15904                         claim_zero(mlx5_flow_os_destroy_flow
15905                                 (fm->drop_rule[i]));
15906                         fm->drop_rule[i] = NULL;
15907                 }
15908         }
15909         return -1;
15910 }
15911
15912 /**
15913  * Find the policy table for prefix table with RSS.
15914  *
15915  * @param[in] dev
15916  *   Pointer to Ethernet device.
15917  * @param[in] mtr_policy
15918  *   Pointer to meter policy table.
15919  * @param[in] rss_desc
15920  *   Pointer to rss_desc
15921  * @return
15922  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
15923  */
15924 static struct mlx5_flow_meter_sub_policy *
15925 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
15926                 struct mlx5_flow_meter_policy *mtr_policy,
15927                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
15928 {
15929         struct mlx5_priv *priv = dev->data->dev_private;
15930         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
15931         uint32_t sub_policy_idx = 0;
15932         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
15933         uint32_t i, j;
15934         struct mlx5_hrxq *hrxq;
15935         struct mlx5_flow_handle dh;
15936         struct mlx5_meter_policy_action_container *act_cnt;
15937         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
15938         uint16_t sub_policy_num;
15939
15940         rte_spinlock_lock(&mtr_policy->sl);
15941         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15942                 if (!rss_desc[i])
15943                         continue;
15944                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
15945                 if (!hrxq_idx[i]) {
15946                         rte_spinlock_unlock(&mtr_policy->sl);
15947                         return NULL;
15948                 }
15949         }
15950         sub_policy_num = (mtr_policy->sub_policy_num >>
15951                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15952                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15953         for (i = 0; i < sub_policy_num;
15954                 i++) {
15955                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
15956                         if (rss_desc[j] &&
15957                                 hrxq_idx[j] !=
15958                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
15959                                 break;
15960                 }
15961                 if (j >= MLX5_MTR_RTE_COLORS) {
15962                         /*
15963                          * Found the sub policy table with
15964                          * the same queue per color
15965                          */
15966                         rte_spinlock_unlock(&mtr_policy->sl);
15967                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
15968                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
15969                         return mtr_policy->sub_policys[domain][i];
15970                 }
15971         }
15972         /* Create sub policy. */
15973         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
15974                 /* Reuse the first dummy sub_policy*/
15975                 sub_policy = mtr_policy->sub_policys[domain][0];
15976                 sub_policy_idx = sub_policy->idx;
15977         } else {
15978                 sub_policy = mlx5_ipool_zmalloc
15979                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15980                                 &sub_policy_idx);
15981                 if (!sub_policy ||
15982                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
15983                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
15984                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
15985                         goto rss_sub_policy_error;
15986                 }
15987                 sub_policy->idx = sub_policy_idx;
15988                 sub_policy->main_policy = mtr_policy;
15989         }
15990         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15991                 if (!rss_desc[i])
15992                         continue;
15993                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
15994                 /*
15995                  * Overwrite the last action from
15996                  * RSS action to Queue action.
15997                  */
15998                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
15999                               hrxq_idx[i]);
16000                 if (!hrxq) {
16001                         DRV_LOG(ERR, "Failed to create policy hrxq");
16002                         goto rss_sub_policy_error;
16003                 }
16004                 act_cnt = &mtr_policy->act_cnt[i];
16005                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16006                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16007                         if (act_cnt->rix_mark)
16008                                 dh.mark = 1;
16009                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16010                         dh.rix_hrxq = hrxq_idx[i];
16011                         flow_drv_rxq_flags_set(dev, &dh);
16012                 }
16013         }
16014         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16015                 sub_policy, domain)) {
16016                 DRV_LOG(ERR, "Failed to create policy "
16017                         "rules per domain.");
16018                 goto rss_sub_policy_error;
16019         }
16020         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16021                 i = (mtr_policy->sub_policy_num >>
16022                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16023                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16024                 mtr_policy->sub_policys[domain][i] = sub_policy;
16025                 i++;
16026                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16027                         goto rss_sub_policy_error;
16028                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16029                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16030                 mtr_policy->sub_policy_num |=
16031                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16032                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16033         }
16034         rte_spinlock_unlock(&mtr_policy->sl);
16035         return sub_policy;
16036 rss_sub_policy_error:
16037         if (sub_policy) {
16038                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16039                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16040                         i = (mtr_policy->sub_policy_num >>
16041                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16042                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16043                         mtr_policy->sub_policys[domain][i] = NULL;
16044                         mlx5_ipool_free
16045                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16046                                         sub_policy->idx);
16047                 }
16048         }
16049         if (sub_policy_idx)
16050                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16051                         sub_policy_idx);
16052         rte_spinlock_unlock(&mtr_policy->sl);
16053         return NULL;
16054 }
16055
16056
16057 /**
16058  * Destroy the sub policy table with RX queue.
16059  *
16060  * @param[in] dev
16061  *   Pointer to Ethernet device.
16062  * @param[in] mtr_policy
16063  *   Pointer to meter policy table.
16064  */
16065 static void
16066 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16067                 struct mlx5_flow_meter_policy *mtr_policy)
16068 {
16069         struct mlx5_priv *priv = dev->data->dev_private;
16070         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16071         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16072         uint32_t i, j;
16073         uint16_t sub_policy_num, new_policy_num;
16074
16075         rte_spinlock_lock(&mtr_policy->sl);
16076         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16077                 switch (mtr_policy->act_cnt[i].fate_action) {
16078                 case MLX5_FLOW_FATE_SHARED_RSS:
16079                         sub_policy_num = (mtr_policy->sub_policy_num >>
16080                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16081                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16082                         new_policy_num = sub_policy_num;
16083                         for (j = 0; j < sub_policy_num; j++) {
16084                                 sub_policy =
16085                                         mtr_policy->sub_policys[domain][j];
16086                                 if (sub_policy) {
16087                                         __flow_dv_destroy_sub_policy_rules(dev,
16088                                                 sub_policy);
16089                                 if (sub_policy !=
16090                                         mtr_policy->sub_policys[domain][0]) {
16091                                         mtr_policy->sub_policys[domain][j] =
16092                                                                 NULL;
16093                                         mlx5_ipool_free
16094                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16095                                                 sub_policy->idx);
16096                                                 new_policy_num--;
16097                                         }
16098                                 }
16099                         }
16100                         if (new_policy_num != sub_policy_num) {
16101                                 mtr_policy->sub_policy_num &=
16102                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16103                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16104                                 mtr_policy->sub_policy_num |=
16105                                 (new_policy_num &
16106                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16107                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16108                         }
16109                         break;
16110                 case MLX5_FLOW_FATE_QUEUE:
16111                         sub_policy = mtr_policy->sub_policys[domain][0];
16112                         __flow_dv_destroy_sub_policy_rules(dev,
16113                                                 sub_policy);
16114                         break;
16115                 default:
16116                         /*Other actions without queue and do nothing*/
16117                         break;
16118                 }
16119         }
16120         rte_spinlock_unlock(&mtr_policy->sl);
16121 }
16122
16123 /**
16124  * Validate the batch counter support in root table.
16125  *
16126  * Create a simple flow with invalid counter and drop action on root table to
16127  * validate if batch counter with offset on root table is supported or not.
16128  *
16129  * @param[in] dev
16130  *   Pointer to rte_eth_dev structure.
16131  *
16132  * @return
16133  *   0 on success, a negative errno value otherwise and rte_errno is set.
16134  */
16135 int
16136 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16137 {
16138         struct mlx5_priv *priv = dev->data->dev_private;
16139         struct mlx5_dev_ctx_shared *sh = priv->sh;
16140         struct mlx5_flow_dv_match_params mask = {
16141                 .size = sizeof(mask.buf),
16142         };
16143         struct mlx5_flow_dv_match_params value = {
16144                 .size = sizeof(value.buf),
16145         };
16146         struct mlx5dv_flow_matcher_attr dv_attr = {
16147                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16148                 .priority = 0,
16149                 .match_criteria_enable = 0,
16150                 .match_mask = (void *)&mask,
16151         };
16152         void *actions[2] = { 0 };
16153         struct mlx5_flow_tbl_resource *tbl = NULL;
16154         struct mlx5_devx_obj *dcs = NULL;
16155         void *matcher = NULL;
16156         void *flow = NULL;
16157         int ret = -1;
16158
16159         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16160                                         0, 0, 0, NULL);
16161         if (!tbl)
16162                 goto err;
16163         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16164         if (!dcs)
16165                 goto err;
16166         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16167                                                     &actions[0]);
16168         if (ret)
16169                 goto err;
16170         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16171         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16172                                                &matcher);
16173         if (ret)
16174                 goto err;
16175         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16176                                        actions, &flow);
16177 err:
16178         /*
16179          * If batch counter with offset is not supported, the driver will not
16180          * validate the invalid offset value, flow create should success.
16181          * In this case, it means batch counter is not supported in root table.
16182          *
16183          * Otherwise, if flow create is failed, counter offset is supported.
16184          */
16185         if (flow) {
16186                 DRV_LOG(INFO, "Batch counter is not supported in root "
16187                               "table. Switch to fallback mode.");
16188                 rte_errno = ENOTSUP;
16189                 ret = -rte_errno;
16190                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16191         } else {
16192                 /* Check matcher to make sure validate fail at flow create. */
16193                 if (!matcher || (matcher && errno != EINVAL))
16194                         DRV_LOG(ERR, "Unexpected error in counter offset "
16195                                      "support detection");
16196                 ret = 0;
16197         }
16198         if (actions[0])
16199                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16200         if (matcher)
16201                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16202         if (tbl)
16203                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16204         if (dcs)
16205                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16206         return ret;
16207 }
16208
16209 /**
16210  * Query a devx counter.
16211  *
16212  * @param[in] dev
16213  *   Pointer to the Ethernet device structure.
16214  * @param[in] cnt
16215  *   Index to the flow counter.
16216  * @param[in] clear
16217  *   Set to clear the counter statistics.
16218  * @param[out] pkts
16219  *   The statistics value of packets.
16220  * @param[out] bytes
16221  *   The statistics value of bytes.
16222  *
16223  * @return
16224  *   0 on success, otherwise return -1.
16225  */
16226 static int
16227 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16228                       uint64_t *pkts, uint64_t *bytes)
16229 {
16230         struct mlx5_priv *priv = dev->data->dev_private;
16231         struct mlx5_flow_counter *cnt;
16232         uint64_t inn_pkts, inn_bytes;
16233         int ret;
16234
16235         if (!priv->config.devx)
16236                 return -1;
16237
16238         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16239         if (ret)
16240                 return -1;
16241         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16242         *pkts = inn_pkts - cnt->hits;
16243         *bytes = inn_bytes - cnt->bytes;
16244         if (clear) {
16245                 cnt->hits = inn_pkts;
16246                 cnt->bytes = inn_bytes;
16247         }
16248         return 0;
16249 }
16250
16251 /**
16252  * Get aged-out flows.
16253  *
16254  * @param[in] dev
16255  *   Pointer to the Ethernet device structure.
16256  * @param[in] context
16257  *   The address of an array of pointers to the aged-out flows contexts.
16258  * @param[in] nb_contexts
16259  *   The length of context array pointers.
16260  * @param[out] error
16261  *   Perform verbose error reporting if not NULL. Initialized in case of
16262  *   error only.
16263  *
16264  * @return
16265  *   how many contexts get in success, otherwise negative errno value.
16266  *   if nb_contexts is 0, return the amount of all aged contexts.
16267  *   if nb_contexts is not 0 , return the amount of aged flows reported
16268  *   in the context array.
16269  * @note: only stub for now
16270  */
16271 static int
16272 flow_get_aged_flows(struct rte_eth_dev *dev,
16273                     void **context,
16274                     uint32_t nb_contexts,
16275                     struct rte_flow_error *error)
16276 {
16277         struct mlx5_priv *priv = dev->data->dev_private;
16278         struct mlx5_age_info *age_info;
16279         struct mlx5_age_param *age_param;
16280         struct mlx5_flow_counter *counter;
16281         struct mlx5_aso_age_action *act;
16282         int nb_flows = 0;
16283
16284         if (nb_contexts && !context)
16285                 return rte_flow_error_set(error, EINVAL,
16286                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16287                                           NULL, "empty context");
16288         age_info = GET_PORT_AGE_INFO(priv);
16289         rte_spinlock_lock(&age_info->aged_sl);
16290         LIST_FOREACH(act, &age_info->aged_aso, next) {
16291                 nb_flows++;
16292                 if (nb_contexts) {
16293                         context[nb_flows - 1] =
16294                                                 act->age_params.context;
16295                         if (!(--nb_contexts))
16296                                 break;
16297                 }
16298         }
16299         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
16300                 nb_flows++;
16301                 if (nb_contexts) {
16302                         age_param = MLX5_CNT_TO_AGE(counter);
16303                         context[nb_flows - 1] = age_param->context;
16304                         if (!(--nb_contexts))
16305                                 break;
16306                 }
16307         }
16308         rte_spinlock_unlock(&age_info->aged_sl);
16309         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
16310         return nb_flows;
16311 }
16312
16313 /*
16314  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
16315  */
16316 static uint32_t
16317 flow_dv_counter_allocate(struct rte_eth_dev *dev)
16318 {
16319         return flow_dv_counter_alloc(dev, 0);
16320 }
16321
16322 /**
16323  * Validate indirect action.
16324  * Dispatcher for action type specific validation.
16325  *
16326  * @param[in] dev
16327  *   Pointer to the Ethernet device structure.
16328  * @param[in] conf
16329  *   Indirect action configuration.
16330  * @param[in] action
16331  *   The indirect action object to validate.
16332  * @param[out] error
16333  *   Perform verbose error reporting if not NULL. Initialized in case of
16334  *   error only.
16335  *
16336  * @return
16337  *   0 on success, otherwise negative errno value.
16338  */
16339 static int
16340 flow_dv_action_validate(struct rte_eth_dev *dev,
16341                         const struct rte_flow_indir_action_conf *conf,
16342                         const struct rte_flow_action *action,
16343                         struct rte_flow_error *err)
16344 {
16345         struct mlx5_priv *priv = dev->data->dev_private;
16346
16347         RTE_SET_USED(conf);
16348         switch (action->type) {
16349         case RTE_FLOW_ACTION_TYPE_RSS:
16350                 /*
16351                  * priv->obj_ops is set according to driver capabilities.
16352                  * When DevX capabilities are
16353                  * sufficient, it is set to devx_obj_ops.
16354                  * Otherwise, it is set to ibv_obj_ops.
16355                  * ibv_obj_ops doesn't support ind_table_modify operation.
16356                  * In this case the indirect RSS action can't be used.
16357                  */
16358                 if (priv->obj_ops.ind_table_modify == NULL)
16359                         return rte_flow_error_set
16360                                         (err, ENOTSUP,
16361                                          RTE_FLOW_ERROR_TYPE_ACTION,
16362                                          NULL,
16363                                          "Indirect RSS action not supported");
16364                 return mlx5_validate_action_rss(dev, action, err);
16365         case RTE_FLOW_ACTION_TYPE_AGE:
16366                 if (!priv->sh->aso_age_mng)
16367                         return rte_flow_error_set(err, ENOTSUP,
16368                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16369                                                 NULL,
16370                                                 "Indirect age action not supported");
16371                 return flow_dv_validate_action_age(0, action, dev, err);
16372         case RTE_FLOW_ACTION_TYPE_COUNT:
16373                 /*
16374                  * There are two mechanisms to share the action count.
16375                  * The old mechanism uses the shared field to share, while the
16376                  * new mechanism uses the indirect action API.
16377                  * This validation comes to make sure that the two mechanisms
16378                  * are not combined.
16379                  */
16380                 if (is_shared_action_count(action))
16381                         return rte_flow_error_set(err, ENOTSUP,
16382                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16383                                                   NULL,
16384                                                   "Mix shared and indirect counter is not supported");
16385                 return flow_dv_validate_action_count(dev, true, 0, err);
16386         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16387                 if (!priv->sh->ct_aso_en)
16388                         return rte_flow_error_set(err, ENOTSUP,
16389                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16390                                         "ASO CT is not supported");
16391                 return mlx5_validate_action_ct(dev, action->conf, err);
16392         default:
16393                 return rte_flow_error_set(err, ENOTSUP,
16394                                           RTE_FLOW_ERROR_TYPE_ACTION,
16395                                           NULL,
16396                                           "action type not supported");
16397         }
16398 }
16399
16400 /**
16401  * Validate meter policy actions.
16402  * Dispatcher for action type specific validation.
16403  *
16404  * @param[in] dev
16405  *   Pointer to the Ethernet device structure.
16406  * @param[in] action
16407  *   The meter policy action object to validate.
16408  * @param[in] attr
16409  *   Attributes of flow to determine steering domain.
16410  * @param[out] error
16411  *   Perform verbose error reporting if not NULL. Initialized in case of
16412  *   error only.
16413  *
16414  * @return
16415  *   0 on success, otherwise negative errno value.
16416  */
16417 static int
16418 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
16419                         const struct rte_flow_action *actions[RTE_COLORS],
16420                         struct rte_flow_attr *attr,
16421                         bool *is_rss,
16422                         uint8_t *domain_bitmap,
16423                         bool *is_def_policy,
16424                         struct rte_mtr_error *error)
16425 {
16426         struct mlx5_priv *priv = dev->data->dev_private;
16427         struct mlx5_dev_config *dev_conf = &priv->config;
16428         const struct rte_flow_action *act;
16429         uint64_t action_flags = 0;
16430         int actions_n;
16431         int i, ret;
16432         struct rte_flow_error flow_err;
16433         uint8_t domain_color[RTE_COLORS] = {0};
16434         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
16435
16436         if (!priv->config.dv_esw_en)
16437                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
16438         *domain_bitmap = def_domain;
16439         if (actions[RTE_COLOR_YELLOW] &&
16440                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
16441                 return -rte_mtr_error_set(error, ENOTSUP,
16442                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16443                                 NULL,
16444                                 "Yellow color does not support any action.");
16445         if (actions[RTE_COLOR_YELLOW] &&
16446                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
16447                 return -rte_mtr_error_set(error, ENOTSUP,
16448                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16449                                 NULL, "Red color only supports drop action.");
16450         /*
16451          * Check default policy actions:
16452          * Green/Yellow: no action, Red: drop action
16453          */
16454         if ((!actions[RTE_COLOR_GREEN] ||
16455                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
16456                 *is_def_policy = true;
16457                 return 0;
16458         }
16459         flow_err.message = NULL;
16460         for (i = 0; i < RTE_COLORS; i++) {
16461                 act = actions[i];
16462                 for (action_flags = 0, actions_n = 0;
16463                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
16464                         act++) {
16465                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
16466                                 return -rte_mtr_error_set(error, ENOTSUP,
16467                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16468                                           NULL, "too many actions");
16469                         switch (act->type) {
16470                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
16471                                 if (!priv->config.dv_esw_en)
16472                                         return -rte_mtr_error_set(error,
16473                                         ENOTSUP,
16474                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16475                                         NULL, "PORT action validate check"
16476                                         " fail for ESW disable");
16477                                 ret = flow_dv_validate_action_port_id(dev,
16478                                                 action_flags,
16479                                                 act, attr, &flow_err);
16480                                 if (ret)
16481                                         return -rte_mtr_error_set(error,
16482                                         ENOTSUP,
16483                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16484                                         NULL, flow_err.message ?
16485                                         flow_err.message :
16486                                         "PORT action validate check fail");
16487                                 ++actions_n;
16488                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16489                                 break;
16490                         case RTE_FLOW_ACTION_TYPE_MARK:
16491                                 ret = flow_dv_validate_action_mark(dev, act,
16492                                                            action_flags,
16493                                                            attr, &flow_err);
16494                                 if (ret < 0)
16495                                         return -rte_mtr_error_set(error,
16496                                         ENOTSUP,
16497                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16498                                         NULL, flow_err.message ?
16499                                         flow_err.message :
16500                                         "Mark action validate check fail");
16501                                 if (dev_conf->dv_xmeta_en !=
16502                                         MLX5_XMETA_MODE_LEGACY)
16503                                         return -rte_mtr_error_set(error,
16504                                         ENOTSUP,
16505                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16506                                         NULL, "Extend MARK action is "
16507                                         "not supported. Please try use "
16508                                         "default policy for meter.");
16509                                 action_flags |= MLX5_FLOW_ACTION_MARK;
16510                                 ++actions_n;
16511                                 break;
16512                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
16513                                 ret = flow_dv_validate_action_set_tag(dev,
16514                                                         act, action_flags,
16515                                                         attr, &flow_err);
16516                                 if (ret)
16517                                         return -rte_mtr_error_set(error,
16518                                         ENOTSUP,
16519                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16520                                         NULL, flow_err.message ?
16521                                         flow_err.message :
16522                                         "Set tag action validate check fail");
16523                                 /*
16524                                  * Count all modify-header actions
16525                                  * as one action.
16526                                  */
16527                                 if (!(action_flags &
16528                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
16529                                         ++actions_n;
16530                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
16531                                 break;
16532                         case RTE_FLOW_ACTION_TYPE_DROP:
16533                                 ret = mlx5_flow_validate_action_drop
16534                                         (action_flags,
16535                                         attr, &flow_err);
16536                                 if (ret < 0)
16537                                         return -rte_mtr_error_set(error,
16538                                         ENOTSUP,
16539                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16540                                         NULL, flow_err.message ?
16541                                         flow_err.message :
16542                                         "Drop action validate check fail");
16543                                 action_flags |= MLX5_FLOW_ACTION_DROP;
16544                                 ++actions_n;
16545                                 break;
16546                         case RTE_FLOW_ACTION_TYPE_QUEUE:
16547                                 /*
16548                                  * Check whether extensive
16549                                  * metadata feature is engaged.
16550                                  */
16551                                 if (dev_conf->dv_flow_en &&
16552                                         (dev_conf->dv_xmeta_en !=
16553                                         MLX5_XMETA_MODE_LEGACY) &&
16554                                         mlx5_flow_ext_mreg_supported(dev))
16555                                         return -rte_mtr_error_set(error,
16556                                           ENOTSUP,
16557                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16558                                           NULL, "Queue action with meta "
16559                                           "is not supported. Please try use "
16560                                           "default policy for meter.");
16561                                 ret = mlx5_flow_validate_action_queue(act,
16562                                                         action_flags, dev,
16563                                                         attr, &flow_err);
16564                                 if (ret < 0)
16565                                         return -rte_mtr_error_set(error,
16566                                           ENOTSUP,
16567                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16568                                           NULL, flow_err.message ?
16569                                           flow_err.message :
16570                                           "Queue action validate check fail");
16571                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
16572                                 ++actions_n;
16573                                 break;
16574                         case RTE_FLOW_ACTION_TYPE_RSS:
16575                                 if (dev_conf->dv_flow_en &&
16576                                         (dev_conf->dv_xmeta_en !=
16577                                         MLX5_XMETA_MODE_LEGACY) &&
16578                                         mlx5_flow_ext_mreg_supported(dev))
16579                                         return -rte_mtr_error_set(error,
16580                                           ENOTSUP,
16581                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16582                                           NULL, "RSS action with meta "
16583                                           "is not supported. Please try use "
16584                                           "default policy for meter.");
16585                                 ret = mlx5_validate_action_rss(dev, act,
16586                                                 &flow_err);
16587                                 if (ret < 0)
16588                                         return -rte_mtr_error_set(error,
16589                                           ENOTSUP,
16590                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16591                                           NULL, flow_err.message ?
16592                                           flow_err.message :
16593                                           "RSS action validate check fail");
16594                                 action_flags |= MLX5_FLOW_ACTION_RSS;
16595                                 ++actions_n;
16596                                 *is_rss = true;
16597                                 break;
16598                         case RTE_FLOW_ACTION_TYPE_JUMP:
16599                                 ret = flow_dv_validate_action_jump(dev,
16600                                         NULL, act, action_flags,
16601                                         attr, true, &flow_err);
16602                                 if (ret)
16603                                         return -rte_mtr_error_set(error,
16604                                           ENOTSUP,
16605                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16606                                           NULL, flow_err.message ?
16607                                           flow_err.message :
16608                                           "Jump action validate check fail");
16609                                 ++actions_n;
16610                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
16611                                 break;
16612                         default:
16613                                 return -rte_mtr_error_set(error, ENOTSUP,
16614                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16615                                         NULL,
16616                                         "Doesn't support optional action");
16617                         }
16618                 }
16619                 /* Yellow is not supported, just skip. */
16620                 if (i == RTE_COLOR_YELLOW)
16621                         continue;
16622                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
16623                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
16624                 else if ((action_flags &
16625                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
16626                         (action_flags & MLX5_FLOW_ACTION_MARK))
16627                         /*
16628                          * Only support MLX5_XMETA_MODE_LEGACY
16629                          * so MARK action only in ingress domain.
16630                          */
16631                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
16632                 else
16633                         domain_color[i] = def_domain;
16634                 /*
16635                  * Validate the drop action mutual exclusion
16636                  * with other actions. Drop action is mutually-exclusive
16637                  * with any other action, except for Count action.
16638                  */
16639                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
16640                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
16641                         return -rte_mtr_error_set(error, ENOTSUP,
16642                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16643                                 NULL, "Drop action is mutually-exclusive "
16644                                 "with any other action");
16645                 }
16646                 /* Eswitch has few restrictions on using items and actions */
16647                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
16648                         if (!mlx5_flow_ext_mreg_supported(dev) &&
16649                                 action_flags & MLX5_FLOW_ACTION_MARK)
16650                                 return -rte_mtr_error_set(error, ENOTSUP,
16651                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16652                                         NULL, "unsupported action MARK");
16653                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
16654                                 return -rte_mtr_error_set(error, ENOTSUP,
16655                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16656                                         NULL, "unsupported action QUEUE");
16657                         if (action_flags & MLX5_FLOW_ACTION_RSS)
16658                                 return -rte_mtr_error_set(error, ENOTSUP,
16659                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16660                                         NULL, "unsupported action RSS");
16661                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
16662                                 return -rte_mtr_error_set(error, ENOTSUP,
16663                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16664                                         NULL, "no fate action is found");
16665                 } else {
16666                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
16667                                 (domain_color[i] &
16668                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
16669                                 if ((domain_color[i] &
16670                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
16671                                         domain_color[i] =
16672                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
16673                                 else
16674                                         return -rte_mtr_error_set(error,
16675                                         ENOTSUP,
16676                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16677                                         NULL, "no fate action is found");
16678                         }
16679                 }
16680                 if (domain_color[i] != def_domain)
16681                         *domain_bitmap = domain_color[i];
16682         }
16683         return 0;
16684 }
16685
16686 static int
16687 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
16688 {
16689         struct mlx5_priv *priv = dev->data->dev_private;
16690         int ret = 0;
16691
16692         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
16693                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
16694                                                 flags);
16695                 if (ret != 0)
16696                         return ret;
16697         }
16698         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
16699                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
16700                 if (ret != 0)
16701                         return ret;
16702         }
16703         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
16704                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
16705                 if (ret != 0)
16706                         return ret;
16707         }
16708         return 0;
16709 }
16710
16711 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
16712         .validate = flow_dv_validate,
16713         .prepare = flow_dv_prepare,
16714         .translate = flow_dv_translate,
16715         .apply = flow_dv_apply,
16716         .remove = flow_dv_remove,
16717         .destroy = flow_dv_destroy,
16718         .query = flow_dv_query,
16719         .create_mtr_tbls = flow_dv_create_mtr_tbls,
16720         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
16721         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
16722         .create_meter = flow_dv_mtr_alloc,
16723         .free_meter = flow_dv_aso_mtr_release_to_pool,
16724         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
16725         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
16726         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
16727         .create_policy_rules = flow_dv_create_policy_rules,
16728         .destroy_policy_rules = flow_dv_destroy_policy_rules,
16729         .create_def_policy = flow_dv_create_def_policy,
16730         .destroy_def_policy = flow_dv_destroy_def_policy,
16731         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
16732         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
16733         .counter_alloc = flow_dv_counter_allocate,
16734         .counter_free = flow_dv_counter_free,
16735         .counter_query = flow_dv_counter_query,
16736         .get_aged_flows = flow_get_aged_flows,
16737         .action_validate = flow_dv_action_validate,
16738         .action_create = flow_dv_action_create,
16739         .action_destroy = flow_dv_action_destroy,
16740         .action_update = flow_dv_action_update,
16741         .action_query = flow_dv_action_query,
16742         .sync_domain = flow_dv_sync_domain,
16743 };
16744
16745 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
16746