net/mlx5: use indexed pool as id generator
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                       uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87
88 /**
89  * Initialize flow attributes structure according to flow items' types.
90  *
91  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
92  * mode. For tunnel mode, the items to be modified are the outermost ones.
93  *
94  * @param[in] item
95  *   Pointer to item specification.
96  * @param[out] attr
97  *   Pointer to flow attributes structure.
98  * @param[in] dev_flow
99  *   Pointer to the sub flow.
100  * @param[in] tunnel_decap
101  *   Whether action is after tunnel decapsulation.
102  */
103 static void
104 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
105                   struct mlx5_flow *dev_flow, bool tunnel_decap)
106 {
107         uint64_t layers = dev_flow->handle->layers;
108
109         /*
110          * If layers is already initialized, it means this dev_flow is the
111          * suffix flow, the layers flags is set by the prefix flow. Need to
112          * use the layer flags from prefix flow as the suffix flow may not
113          * have the user defined items as the flow is split.
114          */
115         if (layers) {
116                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
117                         attr->ipv4 = 1;
118                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
119                         attr->ipv6 = 1;
120                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
121                         attr->tcp = 1;
122                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
123                         attr->udp = 1;
124                 attr->valid = 1;
125                 return;
126         }
127         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
128                 uint8_t next_protocol = 0xff;
129                 switch (item->type) {
130                 case RTE_FLOW_ITEM_TYPE_GRE:
131                 case RTE_FLOW_ITEM_TYPE_NVGRE:
132                 case RTE_FLOW_ITEM_TYPE_VXLAN:
133                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
134                 case RTE_FLOW_ITEM_TYPE_GENEVE:
135                 case RTE_FLOW_ITEM_TYPE_MPLS:
136                         if (tunnel_decap)
137                                 attr->attr = 0;
138                         break;
139                 case RTE_FLOW_ITEM_TYPE_IPV4:
140                         if (!attr->ipv6)
141                                 attr->ipv4 = 1;
142                         if (item->mask != NULL &&
143                             ((const struct rte_flow_item_ipv4 *)
144                             item->mask)->hdr.next_proto_id)
145                                 next_protocol =
146                                     ((const struct rte_flow_item_ipv4 *)
147                                       (item->spec))->hdr.next_proto_id &
148                                     ((const struct rte_flow_item_ipv4 *)
149                                       (item->mask))->hdr.next_proto_id;
150                         if ((next_protocol == IPPROTO_IPIP ||
151                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
152                                 attr->attr = 0;
153                         break;
154                 case RTE_FLOW_ITEM_TYPE_IPV6:
155                         if (!attr->ipv4)
156                                 attr->ipv6 = 1;
157                         if (item->mask != NULL &&
158                             ((const struct rte_flow_item_ipv6 *)
159                             item->mask)->hdr.proto)
160                                 next_protocol =
161                                     ((const struct rte_flow_item_ipv6 *)
162                                       (item->spec))->hdr.proto &
163                                     ((const struct rte_flow_item_ipv6 *)
164                                       (item->mask))->hdr.proto;
165                         if ((next_protocol == IPPROTO_IPIP ||
166                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
167                                 attr->attr = 0;
168                         break;
169                 case RTE_FLOW_ITEM_TYPE_UDP:
170                         if (!attr->tcp)
171                                 attr->udp = 1;
172                         break;
173                 case RTE_FLOW_ITEM_TYPE_TCP:
174                         if (!attr->udp)
175                                 attr->tcp = 1;
176                         break;
177                 default:
178                         break;
179                 }
180         }
181         attr->valid = 1;
182 }
183
184 /**
185  * Convert rte_mtr_color to mlx5 color.
186  *
187  * @param[in] rcol
188  *   rte_mtr_color.
189  *
190  * @return
191  *   mlx5 color.
192  */
193 static int
194 rte_col_2_mlx5_col(enum rte_color rcol)
195 {
196         switch (rcol) {
197         case RTE_COLOR_GREEN:
198                 return MLX5_FLOW_COLOR_GREEN;
199         case RTE_COLOR_YELLOW:
200                 return MLX5_FLOW_COLOR_YELLOW;
201         case RTE_COLOR_RED:
202                 return MLX5_FLOW_COLOR_RED;
203         default:
204                 break;
205         }
206         return MLX5_FLOW_COLOR_UNDEFINED;
207 }
208
209 struct field_modify_info {
210         uint32_t size; /* Size of field in protocol header, in bytes. */
211         uint32_t offset; /* Offset of field in protocol header, in bytes. */
212         enum mlx5_modification_field id;
213 };
214
215 struct field_modify_info modify_eth[] = {
216         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
217         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
218         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
219         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
220         {0, 0, 0},
221 };
222
223 struct field_modify_info modify_vlan_out_first_vid[] = {
224         /* Size in bits !!! */
225         {12, 0, MLX5_MODI_OUT_FIRST_VID},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_ipv4[] = {
230         {1,  1, MLX5_MODI_OUT_IP_DSCP},
231         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
232         {4, 12, MLX5_MODI_OUT_SIPV4},
233         {4, 16, MLX5_MODI_OUT_DIPV4},
234         {0, 0, 0},
235 };
236
237 struct field_modify_info modify_ipv6[] = {
238         {1,  0, MLX5_MODI_OUT_IP_DSCP},
239         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
240         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
241         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
242         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
243         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
244         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
245         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
246         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
247         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
248         {0, 0, 0},
249 };
250
251 struct field_modify_info modify_udp[] = {
252         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
253         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_tcp[] = {
258         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
259         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
260         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
261         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
262         {0, 0, 0},
263 };
264
265 static void
266 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
267                           uint8_t next_protocol, uint64_t *item_flags,
268                           int *tunnel)
269 {
270         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
271                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
272         if (next_protocol == IPPROTO_IPIP) {
273                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
274                 *tunnel = 1;
275         }
276         if (next_protocol == IPPROTO_IPV6) {
277                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
278                 *tunnel = 1;
279         }
280 }
281
282 /**
283  * Acquire the synchronizing object to protect multithreaded access
284  * to shared dv context. Lock occurs only if context is actually
285  * shared, i.e. we have multiport IB device and representors are
286  * created.
287  *
288  * @param[in] dev
289  *   Pointer to the rte_eth_dev structure.
290  */
291 static void
292 flow_dv_shared_lock(struct rte_eth_dev *dev)
293 {
294         struct mlx5_priv *priv = dev->data->dev_private;
295         struct mlx5_dev_ctx_shared *sh = priv->sh;
296
297         if (sh->refcnt > 1) {
298                 int ret;
299
300                 ret = pthread_mutex_lock(&sh->dv_mutex);
301                 MLX5_ASSERT(!ret);
302                 (void)ret;
303         }
304 }
305
306 static void
307 flow_dv_shared_unlock(struct rte_eth_dev *dev)
308 {
309         struct mlx5_priv *priv = dev->data->dev_private;
310         struct mlx5_dev_ctx_shared *sh = priv->sh;
311
312         if (sh->refcnt > 1) {
313                 int ret;
314
315                 ret = pthread_mutex_unlock(&sh->dv_mutex);
316                 MLX5_ASSERT(!ret);
317                 (void)ret;
318         }
319 }
320
321 /* Update VLAN's VID/PCP based on input rte_flow_action.
322  *
323  * @param[in] action
324  *   Pointer to struct rte_flow_action.
325  * @param[out] vlan
326  *   Pointer to struct rte_vlan_hdr.
327  */
328 static void
329 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
330                          struct rte_vlan_hdr *vlan)
331 {
332         uint16_t vlan_tci;
333         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
334                 vlan_tci =
335                     ((const struct rte_flow_action_of_set_vlan_pcp *)
336                                                action->conf)->vlan_pcp;
337                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
338                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
339                 vlan->vlan_tci |= vlan_tci;
340         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
341                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
342                 vlan->vlan_tci |= rte_be_to_cpu_16
343                     (((const struct rte_flow_action_of_set_vlan_vid *)
344                                              action->conf)->vlan_vid);
345         }
346 }
347
348 /**
349  * Fetch 1, 2, 3 or 4 byte field from the byte array
350  * and return as unsigned integer in host-endian format.
351  *
352  * @param[in] data
353  *   Pointer to data array.
354  * @param[in] size
355  *   Size of field to extract.
356  *
357  * @return
358  *   converted field in host endian format.
359  */
360 static inline uint32_t
361 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
362 {
363         uint32_t ret;
364
365         switch (size) {
366         case 1:
367                 ret = *data;
368                 break;
369         case 2:
370                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371                 break;
372         case 3:
373                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
374                 ret = (ret << 8) | *(data + sizeof(uint16_t));
375                 break;
376         case 4:
377                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
378                 break;
379         default:
380                 MLX5_ASSERT(false);
381                 ret = 0;
382                 break;
383         }
384         return ret;
385 }
386
387 /**
388  * Convert modify-header action to DV specification.
389  *
390  * Data length of each action is determined by provided field description
391  * and the item mask. Data bit offset and width of each action is determined
392  * by provided item mask.
393  *
394  * @param[in] item
395  *   Pointer to item specification.
396  * @param[in] field
397  *   Pointer to field modification information.
398  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
399  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
400  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
401  * @param[in] dcopy
402  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
403  *   Negative offset value sets the same offset as source offset.
404  *   size field is ignored, value is taken from source field.
405  * @param[in,out] resource
406  *   Pointer to the modify-header resource.
407  * @param[in] type
408  *   Type of modification.
409  * @param[out] error
410  *   Pointer to the error structure.
411  *
412  * @return
413  *   0 on success, a negative errno value otherwise and rte_errno is set.
414  */
415 static int
416 flow_dv_convert_modify_action(struct rte_flow_item *item,
417                               struct field_modify_info *field,
418                               struct field_modify_info *dcopy,
419                               struct mlx5_flow_dv_modify_hdr_resource *resource,
420                               uint32_t type, struct rte_flow_error *error)
421 {
422         uint32_t i = resource->actions_num;
423         struct mlx5_modification_cmd *actions = resource->actions;
424
425         /*
426          * The item and mask are provided in big-endian format.
427          * The fields should be presented as in big-endian format either.
428          * Mask must be always present, it defines the actual field width.
429          */
430         MLX5_ASSERT(item->mask);
431         MLX5_ASSERT(field->size);
432         do {
433                 unsigned int size_b;
434                 unsigned int off_b;
435                 uint32_t mask;
436                 uint32_t data;
437
438                 if (i >= MLX5_MAX_MODIFY_NUM)
439                         return rte_flow_error_set(error, EINVAL,
440                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
441                                  "too many items to modify");
442                 /* Fetch variable byte size mask from the array. */
443                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
444                                            field->offset, field->size);
445                 if (!mask) {
446                         ++field;
447                         continue;
448                 }
449                 /* Deduce actual data width in bits from mask value. */
450                 off_b = rte_bsf32(mask);
451                 size_b = sizeof(uint32_t) * CHAR_BIT -
452                          off_b - __builtin_clz(mask);
453                 MLX5_ASSERT(size_b);
454                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
455                 actions[i] = (struct mlx5_modification_cmd) {
456                         .action_type = type,
457                         .field = field->id,
458                         .offset = off_b,
459                         .length = size_b,
460                 };
461                 /* Convert entire record to expected big-endian format. */
462                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
463                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
464                         MLX5_ASSERT(dcopy);
465                         actions[i].dst_field = dcopy->id;
466                         actions[i].dst_offset =
467                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
468                         /* Convert entire record to big-endian format. */
469                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
470                 } else {
471                         MLX5_ASSERT(item->spec);
472                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
473                                                    field->offset, field->size);
474                         /* Shift out the trailing masked bits from data. */
475                         data = (data & mask) >> off_b;
476                         actions[i].data1 = rte_cpu_to_be_32(data);
477                 }
478                 ++i;
479                 ++field;
480         } while (field->size);
481         if (resource->actions_num == i)
482                 return rte_flow_error_set(error, EINVAL,
483                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
484                                           "invalid modification flow item");
485         resource->actions_num = i;
486         return 0;
487 }
488
489 /**
490  * Convert modify-header set IPv4 address action to DV specification.
491  *
492  * @param[in,out] resource
493  *   Pointer to the modify-header resource.
494  * @param[in] action
495  *   Pointer to action specification.
496  * @param[out] error
497  *   Pointer to the error structure.
498  *
499  * @return
500  *   0 on success, a negative errno value otherwise and rte_errno is set.
501  */
502 static int
503 flow_dv_convert_action_modify_ipv4
504                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
505                          const struct rte_flow_action *action,
506                          struct rte_flow_error *error)
507 {
508         const struct rte_flow_action_set_ipv4 *conf =
509                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
510         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
511         struct rte_flow_item_ipv4 ipv4;
512         struct rte_flow_item_ipv4 ipv4_mask;
513
514         memset(&ipv4, 0, sizeof(ipv4));
515         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
516         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
517                 ipv4.hdr.src_addr = conf->ipv4_addr;
518                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
519         } else {
520                 ipv4.hdr.dst_addr = conf->ipv4_addr;
521                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
522         }
523         item.spec = &ipv4;
524         item.mask = &ipv4_mask;
525         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
526                                              MLX5_MODIFICATION_TYPE_SET, error);
527 }
528
529 /**
530  * Convert modify-header set IPv6 address action to DV specification.
531  *
532  * @param[in,out] resource
533  *   Pointer to the modify-header resource.
534  * @param[in] action
535  *   Pointer to action specification.
536  * @param[out] error
537  *   Pointer to the error structure.
538  *
539  * @return
540  *   0 on success, a negative errno value otherwise and rte_errno is set.
541  */
542 static int
543 flow_dv_convert_action_modify_ipv6
544                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
545                          const struct rte_flow_action *action,
546                          struct rte_flow_error *error)
547 {
548         const struct rte_flow_action_set_ipv6 *conf =
549                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
550         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
551         struct rte_flow_item_ipv6 ipv6;
552         struct rte_flow_item_ipv6 ipv6_mask;
553
554         memset(&ipv6, 0, sizeof(ipv6));
555         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
556         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
557                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
558                        sizeof(ipv6.hdr.src_addr));
559                 memcpy(&ipv6_mask.hdr.src_addr,
560                        &rte_flow_item_ipv6_mask.hdr.src_addr,
561                        sizeof(ipv6.hdr.src_addr));
562         } else {
563                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
564                        sizeof(ipv6.hdr.dst_addr));
565                 memcpy(&ipv6_mask.hdr.dst_addr,
566                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
567                        sizeof(ipv6.hdr.dst_addr));
568         }
569         item.spec = &ipv6;
570         item.mask = &ipv6_mask;
571         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
572                                              MLX5_MODIFICATION_TYPE_SET, error);
573 }
574
575 /**
576  * Convert modify-header set MAC address action to DV specification.
577  *
578  * @param[in,out] resource
579  *   Pointer to the modify-header resource.
580  * @param[in] action
581  *   Pointer to action specification.
582  * @param[out] error
583  *   Pointer to the error structure.
584  *
585  * @return
586  *   0 on success, a negative errno value otherwise and rte_errno is set.
587  */
588 static int
589 flow_dv_convert_action_modify_mac
590                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
591                          const struct rte_flow_action *action,
592                          struct rte_flow_error *error)
593 {
594         const struct rte_flow_action_set_mac *conf =
595                 (const struct rte_flow_action_set_mac *)(action->conf);
596         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
597         struct rte_flow_item_eth eth;
598         struct rte_flow_item_eth eth_mask;
599
600         memset(&eth, 0, sizeof(eth));
601         memset(&eth_mask, 0, sizeof(eth_mask));
602         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
603                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
604                        sizeof(eth.src.addr_bytes));
605                 memcpy(&eth_mask.src.addr_bytes,
606                        &rte_flow_item_eth_mask.src.addr_bytes,
607                        sizeof(eth_mask.src.addr_bytes));
608         } else {
609                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
610                        sizeof(eth.dst.addr_bytes));
611                 memcpy(&eth_mask.dst.addr_bytes,
612                        &rte_flow_item_eth_mask.dst.addr_bytes,
613                        sizeof(eth_mask.dst.addr_bytes));
614         }
615         item.spec = &eth;
616         item.mask = &eth_mask;
617         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
618                                              MLX5_MODIFICATION_TYPE_SET, error);
619 }
620
621 /**
622  * Convert modify-header set VLAN VID action to DV specification.
623  *
624  * @param[in,out] resource
625  *   Pointer to the modify-header resource.
626  * @param[in] action
627  *   Pointer to action specification.
628  * @param[out] error
629  *   Pointer to the error structure.
630  *
631  * @return
632  *   0 on success, a negative errno value otherwise and rte_errno is set.
633  */
634 static int
635 flow_dv_convert_action_modify_vlan_vid
636                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
637                          const struct rte_flow_action *action,
638                          struct rte_flow_error *error)
639 {
640         const struct rte_flow_action_of_set_vlan_vid *conf =
641                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
642         int i = resource->actions_num;
643         struct mlx5_modification_cmd *actions = resource->actions;
644         struct field_modify_info *field = modify_vlan_out_first_vid;
645
646         if (i >= MLX5_MAX_MODIFY_NUM)
647                 return rte_flow_error_set(error, EINVAL,
648                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
649                          "too many items to modify");
650         actions[i] = (struct mlx5_modification_cmd) {
651                 .action_type = MLX5_MODIFICATION_TYPE_SET,
652                 .field = field->id,
653                 .length = field->size,
654                 .offset = field->offset,
655         };
656         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
657         actions[i].data1 = conf->vlan_vid;
658         actions[i].data1 = actions[i].data1 << 16;
659         resource->actions_num = ++i;
660         return 0;
661 }
662
663 /**
664  * Convert modify-header set TP action to DV specification.
665  *
666  * @param[in,out] resource
667  *   Pointer to the modify-header resource.
668  * @param[in] action
669  *   Pointer to action specification.
670  * @param[in] items
671  *   Pointer to rte_flow_item objects list.
672  * @param[in] attr
673  *   Pointer to flow attributes structure.
674  * @param[in] dev_flow
675  *   Pointer to the sub flow.
676  * @param[in] tunnel_decap
677  *   Whether action is after tunnel decapsulation.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_tp
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          const struct rte_flow_item *items,
689                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
690                          bool tunnel_decap, struct rte_flow_error *error)
691 {
692         const struct rte_flow_action_set_tp *conf =
693                 (const struct rte_flow_action_set_tp *)(action->conf);
694         struct rte_flow_item item;
695         struct rte_flow_item_udp udp;
696         struct rte_flow_item_udp udp_mask;
697         struct rte_flow_item_tcp tcp;
698         struct rte_flow_item_tcp tcp_mask;
699         struct field_modify_info *field;
700
701         if (!attr->valid)
702                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
703         if (attr->udp) {
704                 memset(&udp, 0, sizeof(udp));
705                 memset(&udp_mask, 0, sizeof(udp_mask));
706                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
707                         udp.hdr.src_port = conf->port;
708                         udp_mask.hdr.src_port =
709                                         rte_flow_item_udp_mask.hdr.src_port;
710                 } else {
711                         udp.hdr.dst_port = conf->port;
712                         udp_mask.hdr.dst_port =
713                                         rte_flow_item_udp_mask.hdr.dst_port;
714                 }
715                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
716                 item.spec = &udp;
717                 item.mask = &udp_mask;
718                 field = modify_udp;
719         } else {
720                 MLX5_ASSERT(attr->tcp);
721                 memset(&tcp, 0, sizeof(tcp));
722                 memset(&tcp_mask, 0, sizeof(tcp_mask));
723                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
724                         tcp.hdr.src_port = conf->port;
725                         tcp_mask.hdr.src_port =
726                                         rte_flow_item_tcp_mask.hdr.src_port;
727                 } else {
728                         tcp.hdr.dst_port = conf->port;
729                         tcp_mask.hdr.dst_port =
730                                         rte_flow_item_tcp_mask.hdr.dst_port;
731                 }
732                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
733                 item.spec = &tcp;
734                 item.mask = &tcp_mask;
735                 field = modify_tcp;
736         }
737         return flow_dv_convert_modify_action(&item, field, NULL, resource,
738                                              MLX5_MODIFICATION_TYPE_SET, error);
739 }
740
741 /**
742  * Convert modify-header set TTL action to DV specification.
743  *
744  * @param[in,out] resource
745  *   Pointer to the modify-header resource.
746  * @param[in] action
747  *   Pointer to action specification.
748  * @param[in] items
749  *   Pointer to rte_flow_item objects list.
750  * @param[in] attr
751  *   Pointer to flow attributes structure.
752  * @param[in] dev_flow
753  *   Pointer to the sub flow.
754  * @param[in] tunnel_decap
755  *   Whether action is after tunnel decapsulation.
756  * @param[out] error
757  *   Pointer to the error structure.
758  *
759  * @return
760  *   0 on success, a negative errno value otherwise and rte_errno is set.
761  */
762 static int
763 flow_dv_convert_action_modify_ttl
764                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
765                          const struct rte_flow_action *action,
766                          const struct rte_flow_item *items,
767                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
768                          bool tunnel_decap, struct rte_flow_error *error)
769 {
770         const struct rte_flow_action_set_ttl *conf =
771                 (const struct rte_flow_action_set_ttl *)(action->conf);
772         struct rte_flow_item item;
773         struct rte_flow_item_ipv4 ipv4;
774         struct rte_flow_item_ipv4 ipv4_mask;
775         struct rte_flow_item_ipv6 ipv6;
776         struct rte_flow_item_ipv6 ipv6_mask;
777         struct field_modify_info *field;
778
779         if (!attr->valid)
780                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
781         if (attr->ipv4) {
782                 memset(&ipv4, 0, sizeof(ipv4));
783                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
784                 ipv4.hdr.time_to_live = conf->ttl_value;
785                 ipv4_mask.hdr.time_to_live = 0xFF;
786                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
787                 item.spec = &ipv4;
788                 item.mask = &ipv4_mask;
789                 field = modify_ipv4;
790         } else {
791                 MLX5_ASSERT(attr->ipv6);
792                 memset(&ipv6, 0, sizeof(ipv6));
793                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
794                 ipv6.hdr.hop_limits = conf->ttl_value;
795                 ipv6_mask.hdr.hop_limits = 0xFF;
796                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
797                 item.spec = &ipv6;
798                 item.mask = &ipv6_mask;
799                 field = modify_ipv6;
800         }
801         return flow_dv_convert_modify_action(&item, field, NULL, resource,
802                                              MLX5_MODIFICATION_TYPE_SET, error);
803 }
804
805 /**
806  * Convert modify-header decrement TTL action to DV specification.
807  *
808  * @param[in,out] resource
809  *   Pointer to the modify-header resource.
810  * @param[in] action
811  *   Pointer to action specification.
812  * @param[in] items
813  *   Pointer to rte_flow_item objects list.
814  * @param[in] attr
815  *   Pointer to flow attributes structure.
816  * @param[in] dev_flow
817  *   Pointer to the sub flow.
818  * @param[in] tunnel_decap
819  *   Whether action is after tunnel decapsulation.
820  * @param[out] error
821  *   Pointer to the error structure.
822  *
823  * @return
824  *   0 on success, a negative errno value otherwise and rte_errno is set.
825  */
826 static int
827 flow_dv_convert_action_modify_dec_ttl
828                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
829                          const struct rte_flow_item *items,
830                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
831                          bool tunnel_decap, struct rte_flow_error *error)
832 {
833         struct rte_flow_item item;
834         struct rte_flow_item_ipv4 ipv4;
835         struct rte_flow_item_ipv4 ipv4_mask;
836         struct rte_flow_item_ipv6 ipv6;
837         struct rte_flow_item_ipv6 ipv6_mask;
838         struct field_modify_info *field;
839
840         if (!attr->valid)
841                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
842         if (attr->ipv4) {
843                 memset(&ipv4, 0, sizeof(ipv4));
844                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
845                 ipv4.hdr.time_to_live = 0xFF;
846                 ipv4_mask.hdr.time_to_live = 0xFF;
847                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
848                 item.spec = &ipv4;
849                 item.mask = &ipv4_mask;
850                 field = modify_ipv4;
851         } else {
852                 MLX5_ASSERT(attr->ipv6);
853                 memset(&ipv6, 0, sizeof(ipv6));
854                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
855                 ipv6.hdr.hop_limits = 0xFF;
856                 ipv6_mask.hdr.hop_limits = 0xFF;
857                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
858                 item.spec = &ipv6;
859                 item.mask = &ipv6_mask;
860                 field = modify_ipv6;
861         }
862         return flow_dv_convert_modify_action(&item, field, NULL, resource,
863                                              MLX5_MODIFICATION_TYPE_ADD, error);
864 }
865
866 /**
867  * Convert modify-header increment/decrement TCP Sequence number
868  * to DV specification.
869  *
870  * @param[in,out] resource
871  *   Pointer to the modify-header resource.
872  * @param[in] action
873  *   Pointer to action specification.
874  * @param[out] error
875  *   Pointer to the error structure.
876  *
877  * @return
878  *   0 on success, a negative errno value otherwise and rte_errno is set.
879  */
880 static int
881 flow_dv_convert_action_modify_tcp_seq
882                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
883                          const struct rte_flow_action *action,
884                          struct rte_flow_error *error)
885 {
886         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
887         uint64_t value = rte_be_to_cpu_32(*conf);
888         struct rte_flow_item item;
889         struct rte_flow_item_tcp tcp;
890         struct rte_flow_item_tcp tcp_mask;
891
892         memset(&tcp, 0, sizeof(tcp));
893         memset(&tcp_mask, 0, sizeof(tcp_mask));
894         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
895                 /*
896                  * The HW has no decrement operation, only increment operation.
897                  * To simulate decrement X from Y using increment operation
898                  * we need to add UINT32_MAX X times to Y.
899                  * Each adding of UINT32_MAX decrements Y by 1.
900                  */
901                 value *= UINT32_MAX;
902         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
903         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
904         item.type = RTE_FLOW_ITEM_TYPE_TCP;
905         item.spec = &tcp;
906         item.mask = &tcp_mask;
907         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
908                                              MLX5_MODIFICATION_TYPE_ADD, error);
909 }
910
911 /**
912  * Convert modify-header increment/decrement TCP Acknowledgment number
913  * to DV specification.
914  *
915  * @param[in,out] resource
916  *   Pointer to the modify-header resource.
917  * @param[in] action
918  *   Pointer to action specification.
919  * @param[out] error
920  *   Pointer to the error structure.
921  *
922  * @return
923  *   0 on success, a negative errno value otherwise and rte_errno is set.
924  */
925 static int
926 flow_dv_convert_action_modify_tcp_ack
927                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
928                          const struct rte_flow_action *action,
929                          struct rte_flow_error *error)
930 {
931         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
932         uint64_t value = rte_be_to_cpu_32(*conf);
933         struct rte_flow_item item;
934         struct rte_flow_item_tcp tcp;
935         struct rte_flow_item_tcp tcp_mask;
936
937         memset(&tcp, 0, sizeof(tcp));
938         memset(&tcp_mask, 0, sizeof(tcp_mask));
939         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
940                 /*
941                  * The HW has no decrement operation, only increment operation.
942                  * To simulate decrement X from Y using increment operation
943                  * we need to add UINT32_MAX X times to Y.
944                  * Each adding of UINT32_MAX decrements Y by 1.
945                  */
946                 value *= UINT32_MAX;
947         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
948         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
949         item.type = RTE_FLOW_ITEM_TYPE_TCP;
950         item.spec = &tcp;
951         item.mask = &tcp_mask;
952         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
953                                              MLX5_MODIFICATION_TYPE_ADD, error);
954 }
955
956 static enum mlx5_modification_field reg_to_field[] = {
957         [REG_NON] = MLX5_MODI_OUT_NONE,
958         [REG_A] = MLX5_MODI_META_DATA_REG_A,
959         [REG_B] = MLX5_MODI_META_DATA_REG_B,
960         [REG_C_0] = MLX5_MODI_META_REG_C_0,
961         [REG_C_1] = MLX5_MODI_META_REG_C_1,
962         [REG_C_2] = MLX5_MODI_META_REG_C_2,
963         [REG_C_3] = MLX5_MODI_META_REG_C_3,
964         [REG_C_4] = MLX5_MODI_META_REG_C_4,
965         [REG_C_5] = MLX5_MODI_META_REG_C_5,
966         [REG_C_6] = MLX5_MODI_META_REG_C_6,
967         [REG_C_7] = MLX5_MODI_META_REG_C_7,
968 };
969
970 /**
971  * Convert register set to DV specification.
972  *
973  * @param[in,out] resource
974  *   Pointer to the modify-header resource.
975  * @param[in] action
976  *   Pointer to action specification.
977  * @param[out] error
978  *   Pointer to the error structure.
979  *
980  * @return
981  *   0 on success, a negative errno value otherwise and rte_errno is set.
982  */
983 static int
984 flow_dv_convert_action_set_reg
985                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
986                          const struct rte_flow_action *action,
987                          struct rte_flow_error *error)
988 {
989         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
990         struct mlx5_modification_cmd *actions = resource->actions;
991         uint32_t i = resource->actions_num;
992
993         if (i >= MLX5_MAX_MODIFY_NUM)
994                 return rte_flow_error_set(error, EINVAL,
995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
996                                           "too many items to modify");
997         MLX5_ASSERT(conf->id != REG_NON);
998         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
999         actions[i] = (struct mlx5_modification_cmd) {
1000                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1001                 .field = reg_to_field[conf->id],
1002         };
1003         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1004         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1005         ++i;
1006         resource->actions_num = i;
1007         return 0;
1008 }
1009
1010 /**
1011  * Convert SET_TAG action to DV specification.
1012  *
1013  * @param[in] dev
1014  *   Pointer to the rte_eth_dev structure.
1015  * @param[in,out] resource
1016  *   Pointer to the modify-header resource.
1017  * @param[in] conf
1018  *   Pointer to action specification.
1019  * @param[out] error
1020  *   Pointer to the error structure.
1021  *
1022  * @return
1023  *   0 on success, a negative errno value otherwise and rte_errno is set.
1024  */
1025 static int
1026 flow_dv_convert_action_set_tag
1027                         (struct rte_eth_dev *dev,
1028                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1029                          const struct rte_flow_action_set_tag *conf,
1030                          struct rte_flow_error *error)
1031 {
1032         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1033         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1034         struct rte_flow_item item = {
1035                 .spec = &data,
1036                 .mask = &mask,
1037         };
1038         struct field_modify_info reg_c_x[] = {
1039                 [1] = {0, 0, 0},
1040         };
1041         enum mlx5_modification_field reg_type;
1042         int ret;
1043
1044         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1045         if (ret < 0)
1046                 return ret;
1047         MLX5_ASSERT(ret != REG_NON);
1048         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1049         reg_type = reg_to_field[ret];
1050         MLX5_ASSERT(reg_type > 0);
1051         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1052         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1053                                              MLX5_MODIFICATION_TYPE_SET, error);
1054 }
1055
1056 /**
1057  * Convert internal COPY_REG action to DV specification.
1058  *
1059  * @param[in] dev
1060  *   Pointer to the rte_eth_dev structure.
1061  * @param[in,out] res
1062  *   Pointer to the modify-header resource.
1063  * @param[in] action
1064  *   Pointer to action specification.
1065  * @param[out] error
1066  *   Pointer to the error structure.
1067  *
1068  * @return
1069  *   0 on success, a negative errno value otherwise and rte_errno is set.
1070  */
1071 static int
1072 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1073                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1074                                  const struct rte_flow_action *action,
1075                                  struct rte_flow_error *error)
1076 {
1077         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1078         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1079         struct rte_flow_item item = {
1080                 .spec = NULL,
1081                 .mask = &mask,
1082         };
1083         struct field_modify_info reg_src[] = {
1084                 {4, 0, reg_to_field[conf->src]},
1085                 {0, 0, 0},
1086         };
1087         struct field_modify_info reg_dst = {
1088                 .offset = 0,
1089                 .id = reg_to_field[conf->dst],
1090         };
1091         /* Adjust reg_c[0] usage according to reported mask. */
1092         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1093                 struct mlx5_priv *priv = dev->data->dev_private;
1094                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1095
1096                 MLX5_ASSERT(reg_c0);
1097                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1098                 if (conf->dst == REG_C_0) {
1099                         /* Copy to reg_c[0], within mask only. */
1100                         reg_dst.offset = rte_bsf32(reg_c0);
1101                         /*
1102                          * Mask is ignoring the enianness, because
1103                          * there is no conversion in datapath.
1104                          */
1105 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1106                         /* Copy from destination lower bits to reg_c[0]. */
1107                         mask = reg_c0 >> reg_dst.offset;
1108 #else
1109                         /* Copy from destination upper bits to reg_c[0]. */
1110                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1111                                           rte_fls_u32(reg_c0));
1112 #endif
1113                 } else {
1114                         mask = rte_cpu_to_be_32(reg_c0);
1115 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1116                         /* Copy from reg_c[0] to destination lower bits. */
1117                         reg_dst.offset = 0;
1118 #else
1119                         /* Copy from reg_c[0] to destination upper bits. */
1120                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1121                                          (rte_fls_u32(reg_c0) -
1122                                           rte_bsf32(reg_c0));
1123 #endif
1124                 }
1125         }
1126         return flow_dv_convert_modify_action(&item,
1127                                              reg_src, &reg_dst, res,
1128                                              MLX5_MODIFICATION_TYPE_COPY,
1129                                              error);
1130 }
1131
1132 /**
1133  * Convert MARK action to DV specification. This routine is used
1134  * in extensive metadata only and requires metadata register to be
1135  * handled. In legacy mode hardware tag resource is engaged.
1136  *
1137  * @param[in] dev
1138  *   Pointer to the rte_eth_dev structure.
1139  * @param[in] conf
1140  *   Pointer to MARK action specification.
1141  * @param[in,out] resource
1142  *   Pointer to the modify-header resource.
1143  * @param[out] error
1144  *   Pointer to the error structure.
1145  *
1146  * @return
1147  *   0 on success, a negative errno value otherwise and rte_errno is set.
1148  */
1149 static int
1150 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1151                             const struct rte_flow_action_mark *conf,
1152                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1153                             struct rte_flow_error *error)
1154 {
1155         struct mlx5_priv *priv = dev->data->dev_private;
1156         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1157                                            priv->sh->dv_mark_mask);
1158         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1159         struct rte_flow_item item = {
1160                 .spec = &data,
1161                 .mask = &mask,
1162         };
1163         struct field_modify_info reg_c_x[] = {
1164                 [1] = {0, 0, 0},
1165         };
1166         int reg;
1167
1168         if (!mask)
1169                 return rte_flow_error_set(error, EINVAL,
1170                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1171                                           NULL, "zero mark action mask");
1172         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1173         if (reg < 0)
1174                 return reg;
1175         MLX5_ASSERT(reg > 0);
1176         if (reg == REG_C_0) {
1177                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1178                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1179
1180                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1181                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1182                 mask = rte_cpu_to_be_32(mask << shl_c0);
1183         }
1184         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1185         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1186                                              MLX5_MODIFICATION_TYPE_SET, error);
1187 }
1188
1189 /**
1190  * Get metadata register index for specified steering domain.
1191  *
1192  * @param[in] dev
1193  *   Pointer to the rte_eth_dev structure.
1194  * @param[in] attr
1195  *   Attributes of flow to determine steering domain.
1196  * @param[out] error
1197  *   Pointer to the error structure.
1198  *
1199  * @return
1200  *   positive index on success, a negative errno value otherwise
1201  *   and rte_errno is set.
1202  */
1203 static enum modify_reg
1204 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1205                          const struct rte_flow_attr *attr,
1206                          struct rte_flow_error *error)
1207 {
1208         int reg =
1209                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1210                                           MLX5_METADATA_FDB :
1211                                             attr->egress ?
1212                                             MLX5_METADATA_TX :
1213                                             MLX5_METADATA_RX, 0, error);
1214         if (reg < 0)
1215                 return rte_flow_error_set(error,
1216                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1217                                           NULL, "unavailable "
1218                                           "metadata register");
1219         return reg;
1220 }
1221
1222 /**
1223  * Convert SET_META action to DV specification.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in,out] resource
1228  *   Pointer to the modify-header resource.
1229  * @param[in] attr
1230  *   Attributes of flow that includes this item.
1231  * @param[in] conf
1232  *   Pointer to action specification.
1233  * @param[out] error
1234  *   Pointer to the error structure.
1235  *
1236  * @return
1237  *   0 on success, a negative errno value otherwise and rte_errno is set.
1238  */
1239 static int
1240 flow_dv_convert_action_set_meta
1241                         (struct rte_eth_dev *dev,
1242                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1243                          const struct rte_flow_attr *attr,
1244                          const struct rte_flow_action_set_meta *conf,
1245                          struct rte_flow_error *error)
1246 {
1247         uint32_t data = conf->data;
1248         uint32_t mask = conf->mask;
1249         struct rte_flow_item item = {
1250                 .spec = &data,
1251                 .mask = &mask,
1252         };
1253         struct field_modify_info reg_c_x[] = {
1254                 [1] = {0, 0, 0},
1255         };
1256         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1257
1258         if (reg < 0)
1259                 return reg;
1260         /*
1261          * In datapath code there is no endianness
1262          * coversions for perfromance reasons, all
1263          * pattern conversions are done in rte_flow.
1264          */
1265         if (reg == REG_C_0) {
1266                 struct mlx5_priv *priv = dev->data->dev_private;
1267                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1268                 uint32_t shl_c0;
1269
1270                 MLX5_ASSERT(msk_c0);
1271 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1272                 shl_c0 = rte_bsf32(msk_c0);
1273 #else
1274                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1275 #endif
1276                 mask <<= shl_c0;
1277                 data <<= shl_c0;
1278                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1279         }
1280         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1281         /* The routine expects parameters in memory as big-endian ones. */
1282         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1283                                              MLX5_MODIFICATION_TYPE_SET, error);
1284 }
1285
1286 /**
1287  * Convert modify-header set IPv4 DSCP action to DV specification.
1288  *
1289  * @param[in,out] resource
1290  *   Pointer to the modify-header resource.
1291  * @param[in] action
1292  *   Pointer to action specification.
1293  * @param[out] error
1294  *   Pointer to the error structure.
1295  *
1296  * @return
1297  *   0 on success, a negative errno value otherwise and rte_errno is set.
1298  */
1299 static int
1300 flow_dv_convert_action_modify_ipv4_dscp
1301                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1302                          const struct rte_flow_action *action,
1303                          struct rte_flow_error *error)
1304 {
1305         const struct rte_flow_action_set_dscp *conf =
1306                 (const struct rte_flow_action_set_dscp *)(action->conf);
1307         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1308         struct rte_flow_item_ipv4 ipv4;
1309         struct rte_flow_item_ipv4 ipv4_mask;
1310
1311         memset(&ipv4, 0, sizeof(ipv4));
1312         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1313         ipv4.hdr.type_of_service = conf->dscp;
1314         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1315         item.spec = &ipv4;
1316         item.mask = &ipv4_mask;
1317         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1318                                              MLX5_MODIFICATION_TYPE_SET, error);
1319 }
1320
1321 /**
1322  * Convert modify-header set IPv6 DSCP action to DV specification.
1323  *
1324  * @param[in,out] resource
1325  *   Pointer to the modify-header resource.
1326  * @param[in] action
1327  *   Pointer to action specification.
1328  * @param[out] error
1329  *   Pointer to the error structure.
1330  *
1331  * @return
1332  *   0 on success, a negative errno value otherwise and rte_errno is set.
1333  */
1334 static int
1335 flow_dv_convert_action_modify_ipv6_dscp
1336                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1337                          const struct rte_flow_action *action,
1338                          struct rte_flow_error *error)
1339 {
1340         const struct rte_flow_action_set_dscp *conf =
1341                 (const struct rte_flow_action_set_dscp *)(action->conf);
1342         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1343         struct rte_flow_item_ipv6 ipv6;
1344         struct rte_flow_item_ipv6 ipv6_mask;
1345
1346         memset(&ipv6, 0, sizeof(ipv6));
1347         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1348         /*
1349          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1350          * rdma-core only accept the DSCP bits byte aligned start from
1351          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1352          * bits in IPv6 case as rdma-core requires byte aligned value.
1353          */
1354         ipv6.hdr.vtc_flow = conf->dscp;
1355         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1356         item.spec = &ipv6;
1357         item.mask = &ipv6_mask;
1358         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1359                                              MLX5_MODIFICATION_TYPE_SET, error);
1360 }
1361
1362 /**
1363  * Validate MARK item.
1364  *
1365  * @param[in] dev
1366  *   Pointer to the rte_eth_dev structure.
1367  * @param[in] item
1368  *   Item specification.
1369  * @param[in] attr
1370  *   Attributes of flow that includes this item.
1371  * @param[out] error
1372  *   Pointer to error structure.
1373  *
1374  * @return
1375  *   0 on success, a negative errno value otherwise and rte_errno is set.
1376  */
1377 static int
1378 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1379                            const struct rte_flow_item *item,
1380                            const struct rte_flow_attr *attr __rte_unused,
1381                            struct rte_flow_error *error)
1382 {
1383         struct mlx5_priv *priv = dev->data->dev_private;
1384         struct mlx5_dev_config *config = &priv->config;
1385         const struct rte_flow_item_mark *spec = item->spec;
1386         const struct rte_flow_item_mark *mask = item->mask;
1387         const struct rte_flow_item_mark nic_mask = {
1388                 .id = priv->sh->dv_mark_mask,
1389         };
1390         int ret;
1391
1392         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1393                 return rte_flow_error_set(error, ENOTSUP,
1394                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1395                                           "extended metadata feature"
1396                                           " isn't enabled");
1397         if (!mlx5_flow_ext_mreg_supported(dev))
1398                 return rte_flow_error_set(error, ENOTSUP,
1399                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1400                                           "extended metadata register"
1401                                           " isn't supported");
1402         if (!nic_mask.id)
1403                 return rte_flow_error_set(error, ENOTSUP,
1404                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1405                                           "extended metadata register"
1406                                           " isn't available");
1407         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1408         if (ret < 0)
1409                 return ret;
1410         if (!spec)
1411                 return rte_flow_error_set(error, EINVAL,
1412                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1413                                           item->spec,
1414                                           "data cannot be empty");
1415         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1416                 return rte_flow_error_set(error, EINVAL,
1417                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1418                                           &spec->id,
1419                                           "mark id exceeds the limit");
1420         if (!mask)
1421                 mask = &nic_mask;
1422         if (!mask->id)
1423                 return rte_flow_error_set(error, EINVAL,
1424                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1425                                         "mask cannot be zero");
1426
1427         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1428                                         (const uint8_t *)&nic_mask,
1429                                         sizeof(struct rte_flow_item_mark),
1430                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1431         if (ret < 0)
1432                 return ret;
1433         return 0;
1434 }
1435
1436 /**
1437  * Validate META item.
1438  *
1439  * @param[in] dev
1440  *   Pointer to the rte_eth_dev structure.
1441  * @param[in] item
1442  *   Item specification.
1443  * @param[in] attr
1444  *   Attributes of flow that includes this item.
1445  * @param[out] error
1446  *   Pointer to error structure.
1447  *
1448  * @return
1449  *   0 on success, a negative errno value otherwise and rte_errno is set.
1450  */
1451 static int
1452 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1453                            const struct rte_flow_item *item,
1454                            const struct rte_flow_attr *attr,
1455                            struct rte_flow_error *error)
1456 {
1457         struct mlx5_priv *priv = dev->data->dev_private;
1458         struct mlx5_dev_config *config = &priv->config;
1459         const struct rte_flow_item_meta *spec = item->spec;
1460         const struct rte_flow_item_meta *mask = item->mask;
1461         struct rte_flow_item_meta nic_mask = {
1462                 .data = UINT32_MAX
1463         };
1464         int reg;
1465         int ret;
1466
1467         if (!spec)
1468                 return rte_flow_error_set(error, EINVAL,
1469                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1470                                           item->spec,
1471                                           "data cannot be empty");
1472         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1473                 if (!mlx5_flow_ext_mreg_supported(dev))
1474                         return rte_flow_error_set(error, ENOTSUP,
1475                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1476                                           "extended metadata register"
1477                                           " isn't supported");
1478                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1479                 if (reg < 0)
1480                         return reg;
1481                 if (reg == REG_B)
1482                         return rte_flow_error_set(error, ENOTSUP,
1483                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1484                                           "match on reg_b "
1485                                           "isn't supported");
1486                 if (reg != REG_A)
1487                         nic_mask.data = priv->sh->dv_meta_mask;
1488         } else if (attr->transfer) {
1489                 return rte_flow_error_set(error, ENOTSUP,
1490                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1491                                         "extended metadata feature "
1492                                         "should be enabled when "
1493                                         "meta item is requested "
1494                                         "with e-switch mode ");
1495         }
1496         if (!mask)
1497                 mask = &rte_flow_item_meta_mask;
1498         if (!mask->data)
1499                 return rte_flow_error_set(error, EINVAL,
1500                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1501                                         "mask cannot be zero");
1502
1503         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1504                                         (const uint8_t *)&nic_mask,
1505                                         sizeof(struct rte_flow_item_meta),
1506                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1507         return ret;
1508 }
1509
1510 /**
1511  * Validate TAG item.
1512  *
1513  * @param[in] dev
1514  *   Pointer to the rte_eth_dev structure.
1515  * @param[in] item
1516  *   Item specification.
1517  * @param[in] attr
1518  *   Attributes of flow that includes this item.
1519  * @param[out] error
1520  *   Pointer to error structure.
1521  *
1522  * @return
1523  *   0 on success, a negative errno value otherwise and rte_errno is set.
1524  */
1525 static int
1526 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1527                           const struct rte_flow_item *item,
1528                           const struct rte_flow_attr *attr __rte_unused,
1529                           struct rte_flow_error *error)
1530 {
1531         const struct rte_flow_item_tag *spec = item->spec;
1532         const struct rte_flow_item_tag *mask = item->mask;
1533         const struct rte_flow_item_tag nic_mask = {
1534                 .data = RTE_BE32(UINT32_MAX),
1535                 .index = 0xff,
1536         };
1537         int ret;
1538
1539         if (!mlx5_flow_ext_mreg_supported(dev))
1540                 return rte_flow_error_set(error, ENOTSUP,
1541                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1542                                           "extensive metadata register"
1543                                           " isn't supported");
1544         if (!spec)
1545                 return rte_flow_error_set(error, EINVAL,
1546                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1547                                           item->spec,
1548                                           "data cannot be empty");
1549         if (!mask)
1550                 mask = &rte_flow_item_tag_mask;
1551         if (!mask->data)
1552                 return rte_flow_error_set(error, EINVAL,
1553                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1554                                         "mask cannot be zero");
1555
1556         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1557                                         (const uint8_t *)&nic_mask,
1558                                         sizeof(struct rte_flow_item_tag),
1559                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1560         if (ret < 0)
1561                 return ret;
1562         if (mask->index != 0xff)
1563                 return rte_flow_error_set(error, EINVAL,
1564                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1565                                           "partial mask for tag index"
1566                                           " is not supported");
1567         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1568         if (ret < 0)
1569                 return ret;
1570         MLX5_ASSERT(ret != REG_NON);
1571         return 0;
1572 }
1573
1574 /**
1575  * Validate vport item.
1576  *
1577  * @param[in] dev
1578  *   Pointer to the rte_eth_dev structure.
1579  * @param[in] item
1580  *   Item specification.
1581  * @param[in] attr
1582  *   Attributes of flow that includes this item.
1583  * @param[in] item_flags
1584  *   Bit-fields that holds the items detected until now.
1585  * @param[out] error
1586  *   Pointer to error structure.
1587  *
1588  * @return
1589  *   0 on success, a negative errno value otherwise and rte_errno is set.
1590  */
1591 static int
1592 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1593                               const struct rte_flow_item *item,
1594                               const struct rte_flow_attr *attr,
1595                               uint64_t item_flags,
1596                               struct rte_flow_error *error)
1597 {
1598         const struct rte_flow_item_port_id *spec = item->spec;
1599         const struct rte_flow_item_port_id *mask = item->mask;
1600         const struct rte_flow_item_port_id switch_mask = {
1601                         .id = 0xffffffff,
1602         };
1603         struct mlx5_priv *esw_priv;
1604         struct mlx5_priv *dev_priv;
1605         int ret;
1606
1607         if (!attr->transfer)
1608                 return rte_flow_error_set(error, EINVAL,
1609                                           RTE_FLOW_ERROR_TYPE_ITEM,
1610                                           NULL,
1611                                           "match on port id is valid only"
1612                                           " when transfer flag is enabled");
1613         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1614                 return rte_flow_error_set(error, ENOTSUP,
1615                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1616                                           "multiple source ports are not"
1617                                           " supported");
1618         if (!mask)
1619                 mask = &switch_mask;
1620         if (mask->id != 0xffffffff)
1621                 return rte_flow_error_set(error, ENOTSUP,
1622                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1623                                            mask,
1624                                            "no support for partial mask on"
1625                                            " \"id\" field");
1626         ret = mlx5_flow_item_acceptable
1627                                 (item, (const uint8_t *)mask,
1628                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1629                                  sizeof(struct rte_flow_item_port_id),
1630                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1631         if (ret)
1632                 return ret;
1633         if (!spec)
1634                 return 0;
1635         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1636         if (!esw_priv)
1637                 return rte_flow_error_set(error, rte_errno,
1638                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1639                                           "failed to obtain E-Switch info for"
1640                                           " port");
1641         dev_priv = mlx5_dev_to_eswitch_info(dev);
1642         if (!dev_priv)
1643                 return rte_flow_error_set(error, rte_errno,
1644                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1645                                           NULL,
1646                                           "failed to obtain E-Switch info");
1647         if (esw_priv->domain_id != dev_priv->domain_id)
1648                 return rte_flow_error_set(error, EINVAL,
1649                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1650                                           "cannot match on a port from a"
1651                                           " different E-Switch");
1652         return 0;
1653 }
1654
1655 /**
1656  * Validate VLAN item.
1657  *
1658  * @param[in] item
1659  *   Item specification.
1660  * @param[in] item_flags
1661  *   Bit-fields that holds the items detected until now.
1662  * @param[in] dev
1663  *   Ethernet device flow is being created on.
1664  * @param[out] error
1665  *   Pointer to error structure.
1666  *
1667  * @return
1668  *   0 on success, a negative errno value otherwise and rte_errno is set.
1669  */
1670 static int
1671 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1672                            uint64_t item_flags,
1673                            struct rte_eth_dev *dev,
1674                            struct rte_flow_error *error)
1675 {
1676         const struct rte_flow_item_vlan *mask = item->mask;
1677         const struct rte_flow_item_vlan nic_mask = {
1678                 .tci = RTE_BE16(UINT16_MAX),
1679                 .inner_type = RTE_BE16(UINT16_MAX),
1680                 .has_more_vlan = 1,
1681         };
1682         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1683         int ret;
1684         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1685                                         MLX5_FLOW_LAYER_INNER_L4) :
1686                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1687                                         MLX5_FLOW_LAYER_OUTER_L4);
1688         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1689                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1690
1691         if (item_flags & vlanm)
1692                 return rte_flow_error_set(error, EINVAL,
1693                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1694                                           "multiple VLAN layers not supported");
1695         else if ((item_flags & l34m) != 0)
1696                 return rte_flow_error_set(error, EINVAL,
1697                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1698                                           "VLAN cannot follow L3/L4 layer");
1699         if (!mask)
1700                 mask = &rte_flow_item_vlan_mask;
1701         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1702                                         (const uint8_t *)&nic_mask,
1703                                         sizeof(struct rte_flow_item_vlan),
1704                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1705         if (ret)
1706                 return ret;
1707         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1708                 struct mlx5_priv *priv = dev->data->dev_private;
1709
1710                 if (priv->vmwa_context) {
1711                         /*
1712                          * Non-NULL context means we have a virtual machine
1713                          * and SR-IOV enabled, we have to create VLAN interface
1714                          * to make hypervisor to setup E-Switch vport
1715                          * context correctly. We avoid creating the multiple
1716                          * VLAN interfaces, so we cannot support VLAN tag mask.
1717                          */
1718                         return rte_flow_error_set(error, EINVAL,
1719                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1720                                                   item,
1721                                                   "VLAN tag mask is not"
1722                                                   " supported in virtual"
1723                                                   " environment");
1724                 }
1725         }
1726         return 0;
1727 }
1728
1729 /*
1730  * GTP flags are contained in 1 byte of the format:
1731  * -------------------------------------------
1732  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1733  * |-----------------------------------------|
1734  * | value | Version | PT | Res | E | S | PN |
1735  * -------------------------------------------
1736  *
1737  * Matching is supported only for GTP flags E, S, PN.
1738  */
1739 #define MLX5_GTP_FLAGS_MASK     0x07
1740
1741 /**
1742  * Validate GTP item.
1743  *
1744  * @param[in] dev
1745  *   Pointer to the rte_eth_dev structure.
1746  * @param[in] item
1747  *   Item specification.
1748  * @param[in] item_flags
1749  *   Bit-fields that holds the items detected until now.
1750  * @param[out] error
1751  *   Pointer to error structure.
1752  *
1753  * @return
1754  *   0 on success, a negative errno value otherwise and rte_errno is set.
1755  */
1756 static int
1757 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1758                           const struct rte_flow_item *item,
1759                           uint64_t item_flags,
1760                           struct rte_flow_error *error)
1761 {
1762         struct mlx5_priv *priv = dev->data->dev_private;
1763         const struct rte_flow_item_gtp *spec = item->spec;
1764         const struct rte_flow_item_gtp *mask = item->mask;
1765         const struct rte_flow_item_gtp nic_mask = {
1766                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1767                 .msg_type = 0xff,
1768                 .teid = RTE_BE32(0xffffffff),
1769         };
1770
1771         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1772                 return rte_flow_error_set(error, ENOTSUP,
1773                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1774                                           "GTP support is not enabled");
1775         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1776                 return rte_flow_error_set(error, ENOTSUP,
1777                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1778                                           "multiple tunnel layers not"
1779                                           " supported");
1780         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1781                 return rte_flow_error_set(error, EINVAL,
1782                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1783                                           "no outer UDP layer found");
1784         if (!mask)
1785                 mask = &rte_flow_item_gtp_mask;
1786         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1787                 return rte_flow_error_set(error, ENOTSUP,
1788                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1789                                           "Match is supported for GTP"
1790                                           " flags only");
1791         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1792                                          (const uint8_t *)&nic_mask,
1793                                          sizeof(struct rte_flow_item_gtp),
1794                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1795 }
1796
1797 /**
1798  * Validate IPV4 item.
1799  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1800  * add specific validation of fragment_offset field,
1801  *
1802  * @param[in] item
1803  *   Item specification.
1804  * @param[in] item_flags
1805  *   Bit-fields that holds the items detected until now.
1806  * @param[out] error
1807  *   Pointer to error structure.
1808  *
1809  * @return
1810  *   0 on success, a negative errno value otherwise and rte_errno is set.
1811  */
1812 static int
1813 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1814                            uint64_t item_flags,
1815                            uint64_t last_item,
1816                            uint16_t ether_type,
1817                            struct rte_flow_error *error)
1818 {
1819         int ret;
1820         const struct rte_flow_item_ipv4 *spec = item->spec;
1821         const struct rte_flow_item_ipv4 *last = item->last;
1822         const struct rte_flow_item_ipv4 *mask = item->mask;
1823         rte_be16_t fragment_offset_spec = 0;
1824         rte_be16_t fragment_offset_last = 0;
1825         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1826                 .hdr = {
1827                         .src_addr = RTE_BE32(0xffffffff),
1828                         .dst_addr = RTE_BE32(0xffffffff),
1829                         .type_of_service = 0xff,
1830                         .fragment_offset = RTE_BE16(0xffff),
1831                         .next_proto_id = 0xff,
1832                         .time_to_live = 0xff,
1833                 },
1834         };
1835
1836         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1837                                            ether_type, &nic_ipv4_mask,
1838                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1839         if (ret < 0)
1840                 return ret;
1841         if (spec && mask)
1842                 fragment_offset_spec = spec->hdr.fragment_offset &
1843                                        mask->hdr.fragment_offset;
1844         if (!fragment_offset_spec)
1845                 return 0;
1846         /*
1847          * spec and mask are valid, enforce using full mask to make sure the
1848          * complete value is used correctly.
1849          */
1850         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1851                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1852                 return rte_flow_error_set(error, EINVAL,
1853                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1854                                           item, "must use full mask for"
1855                                           " fragment_offset");
1856         /*
1857          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1858          * indicating this is 1st fragment of fragmented packet.
1859          * This is not yet supported in MLX5, return appropriate error message.
1860          */
1861         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1862                 return rte_flow_error_set(error, ENOTSUP,
1863                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1864                                           "match on first fragment not "
1865                                           "supported");
1866         if (fragment_offset_spec && !last)
1867                 return rte_flow_error_set(error, ENOTSUP,
1868                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1869                                           "specified value not supported");
1870         /* spec and last are valid, validate the specified range. */
1871         fragment_offset_last = last->hdr.fragment_offset &
1872                                mask->hdr.fragment_offset;
1873         /*
1874          * Match on fragment_offset spec 0x2001 and last 0x3fff
1875          * means MF is 1 and frag-offset is > 0.
1876          * This packet is fragment 2nd and onward, excluding last.
1877          * This is not yet supported in MLX5, return appropriate
1878          * error message.
1879          */
1880         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1881             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1882                 return rte_flow_error_set(error, ENOTSUP,
1883                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1884                                           last, "match on following "
1885                                           "fragments not supported");
1886         /*
1887          * Match on fragment_offset spec 0x0001 and last 0x1fff
1888          * means MF is 0 and frag-offset is > 0.
1889          * This packet is last fragment of fragmented packet.
1890          * This is not yet supported in MLX5, return appropriate
1891          * error message.
1892          */
1893         if (fragment_offset_spec == RTE_BE16(1) &&
1894             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1895                 return rte_flow_error_set(error, ENOTSUP,
1896                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1897                                           last, "match on last "
1898                                           "fragment not supported");
1899         /*
1900          * Match on fragment_offset spec 0x0001 and last 0x3fff
1901          * means MF and/or frag-offset is not 0.
1902          * This is a fragmented packet.
1903          * Other range values are invalid and rejected.
1904          */
1905         if (!(fragment_offset_spec == RTE_BE16(1) &&
1906               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1907                 return rte_flow_error_set(error, ENOTSUP,
1908                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1909                                           "specified range not supported");
1910         return 0;
1911 }
1912
1913 /**
1914  * Validate IPV6 fragment extension item.
1915  *
1916  * @param[in] item
1917  *   Item specification.
1918  * @param[in] item_flags
1919  *   Bit-fields that holds the items detected until now.
1920  * @param[out] error
1921  *   Pointer to error structure.
1922  *
1923  * @return
1924  *   0 on success, a negative errno value otherwise and rte_errno is set.
1925  */
1926 static int
1927 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1928                                     uint64_t item_flags,
1929                                     struct rte_flow_error *error)
1930 {
1931         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1932         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1933         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1934         rte_be16_t frag_data_spec = 0;
1935         rte_be16_t frag_data_last = 0;
1936         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1937         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1938                                       MLX5_FLOW_LAYER_OUTER_L4;
1939         int ret = 0;
1940         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1941                 .hdr = {
1942                         .next_header = 0xff,
1943                         .frag_data = RTE_BE16(0xffff),
1944                 },
1945         };
1946
1947         if (item_flags & l4m)
1948                 return rte_flow_error_set(error, EINVAL,
1949                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1950                                           "ipv6 fragment extension item cannot "
1951                                           "follow L4 item.");
1952         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1953             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1954                 return rte_flow_error_set(error, EINVAL,
1955                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1956                                           "ipv6 fragment extension item must "
1957                                           "follow ipv6 item");
1958         if (spec && mask)
1959                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1960         if (!frag_data_spec)
1961                 return 0;
1962         /*
1963          * spec and mask are valid, enforce using full mask to make sure the
1964          * complete value is used correctly.
1965          */
1966         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1967                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1968                 return rte_flow_error_set(error, EINVAL,
1969                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1970                                           item, "must use full mask for"
1971                                           " frag_data");
1972         /*
1973          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1974          * This is 1st fragment of fragmented packet.
1975          */
1976         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1977                 return rte_flow_error_set(error, ENOTSUP,
1978                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1979                                           "match on first fragment not "
1980                                           "supported");
1981         if (frag_data_spec && !last)
1982                 return rte_flow_error_set(error, EINVAL,
1983                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1984                                           "specified value not supported");
1985         ret = mlx5_flow_item_acceptable
1986                                 (item, (const uint8_t *)mask,
1987                                  (const uint8_t *)&nic_mask,
1988                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1989                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1990         if (ret)
1991                 return ret;
1992         /* spec and last are valid, validate the specified range. */
1993         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1994         /*
1995          * Match on frag_data spec 0x0009 and last 0xfff9
1996          * means M is 1 and frag-offset is > 0.
1997          * This packet is fragment 2nd and onward, excluding last.
1998          * This is not yet supported in MLX5, return appropriate
1999          * error message.
2000          */
2001         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2002                                        RTE_IPV6_EHDR_MF_MASK) &&
2003             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2004                 return rte_flow_error_set(error, ENOTSUP,
2005                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2006                                           last, "match on following "
2007                                           "fragments not supported");
2008         /*
2009          * Match on frag_data spec 0x0008 and last 0xfff8
2010          * means M is 0 and frag-offset is > 0.
2011          * This packet is last fragment of fragmented packet.
2012          * This is not yet supported in MLX5, return appropriate
2013          * error message.
2014          */
2015         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2016             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2017                 return rte_flow_error_set(error, ENOTSUP,
2018                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2019                                           last, "match on last "
2020                                           "fragment not supported");
2021         /* Other range values are invalid and rejected. */
2022         return rte_flow_error_set(error, EINVAL,
2023                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2024                                   "specified range not supported");
2025 }
2026
2027 /**
2028  * Validate the pop VLAN action.
2029  *
2030  * @param[in] dev
2031  *   Pointer to the rte_eth_dev structure.
2032  * @param[in] action_flags
2033  *   Holds the actions detected until now.
2034  * @param[in] action
2035  *   Pointer to the pop vlan action.
2036  * @param[in] item_flags
2037  *   The items found in this flow rule.
2038  * @param[in] attr
2039  *   Pointer to flow attributes.
2040  * @param[out] error
2041  *   Pointer to error structure.
2042  *
2043  * @return
2044  *   0 on success, a negative errno value otherwise and rte_errno is set.
2045  */
2046 static int
2047 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2048                                  uint64_t action_flags,
2049                                  const struct rte_flow_action *action,
2050                                  uint64_t item_flags,
2051                                  const struct rte_flow_attr *attr,
2052                                  struct rte_flow_error *error)
2053 {
2054         const struct mlx5_priv *priv = dev->data->dev_private;
2055
2056         (void)action;
2057         (void)attr;
2058         if (!priv->sh->pop_vlan_action)
2059                 return rte_flow_error_set(error, ENOTSUP,
2060                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2061                                           NULL,
2062                                           "pop vlan action is not supported");
2063         if (attr->egress)
2064                 return rte_flow_error_set(error, ENOTSUP,
2065                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2066                                           NULL,
2067                                           "pop vlan action not supported for "
2068                                           "egress");
2069         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2070                 return rte_flow_error_set(error, ENOTSUP,
2071                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2072                                           "no support for multiple VLAN "
2073                                           "actions");
2074         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2075         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2076             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2077                 return rte_flow_error_set(error, ENOTSUP,
2078                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2079                                           NULL,
2080                                           "cannot pop vlan after decap without "
2081                                           "match on inner vlan in the flow");
2082         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2083         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2084             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2085                 return rte_flow_error_set(error, ENOTSUP,
2086                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2087                                           NULL,
2088                                           "cannot pop vlan without a "
2089                                           "match on (outer) vlan in the flow");
2090         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2091                 return rte_flow_error_set(error, EINVAL,
2092                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2093                                           "wrong action order, port_id should "
2094                                           "be after pop VLAN action");
2095         if (!attr->transfer && priv->representor)
2096                 return rte_flow_error_set(error, ENOTSUP,
2097                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2098                                           "pop vlan action for VF representor "
2099                                           "not supported on NIC table");
2100         return 0;
2101 }
2102
2103 /**
2104  * Get VLAN default info from vlan match info.
2105  *
2106  * @param[in] items
2107  *   the list of item specifications.
2108  * @param[out] vlan
2109  *   pointer VLAN info to fill to.
2110  *
2111  * @return
2112  *   0 on success, a negative errno value otherwise and rte_errno is set.
2113  */
2114 static void
2115 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2116                                   struct rte_vlan_hdr *vlan)
2117 {
2118         const struct rte_flow_item_vlan nic_mask = {
2119                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2120                                 MLX5DV_FLOW_VLAN_VID_MASK),
2121                 .inner_type = RTE_BE16(0xffff),
2122         };
2123
2124         if (items == NULL)
2125                 return;
2126         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2127                 int type = items->type;
2128
2129                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2130                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2131                         break;
2132         }
2133         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2134                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2135                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2136
2137                 /* If VLAN item in pattern doesn't contain data, return here. */
2138                 if (!vlan_v)
2139                         return;
2140                 if (!vlan_m)
2141                         vlan_m = &nic_mask;
2142                 /* Only full match values are accepted */
2143                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2144                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2145                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2146                         vlan->vlan_tci |=
2147                                 rte_be_to_cpu_16(vlan_v->tci &
2148                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2149                 }
2150                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2151                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2152                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2153                         vlan->vlan_tci |=
2154                                 rte_be_to_cpu_16(vlan_v->tci &
2155                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2156                 }
2157                 if (vlan_m->inner_type == nic_mask.inner_type)
2158                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2159                                                            vlan_m->inner_type);
2160         }
2161 }
2162
2163 /**
2164  * Validate the push VLAN action.
2165  *
2166  * @param[in] dev
2167  *   Pointer to the rte_eth_dev structure.
2168  * @param[in] action_flags
2169  *   Holds the actions detected until now.
2170  * @param[in] item_flags
2171  *   The items found in this flow rule.
2172  * @param[in] action
2173  *   Pointer to the action structure.
2174  * @param[in] attr
2175  *   Pointer to flow attributes
2176  * @param[out] error
2177  *   Pointer to error structure.
2178  *
2179  * @return
2180  *   0 on success, a negative errno value otherwise and rte_errno is set.
2181  */
2182 static int
2183 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2184                                   uint64_t action_flags,
2185                                   const struct rte_flow_item_vlan *vlan_m,
2186                                   const struct rte_flow_action *action,
2187                                   const struct rte_flow_attr *attr,
2188                                   struct rte_flow_error *error)
2189 {
2190         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2191         const struct mlx5_priv *priv = dev->data->dev_private;
2192
2193         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2194             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2195                 return rte_flow_error_set(error, EINVAL,
2196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2197                                           "invalid vlan ethertype");
2198         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2199                 return rte_flow_error_set(error, EINVAL,
2200                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2201                                           "wrong action order, port_id should "
2202                                           "be after push VLAN");
2203         if (!attr->transfer && priv->representor)
2204                 return rte_flow_error_set(error, ENOTSUP,
2205                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2206                                           "push vlan action for VF representor "
2207                                           "not supported on NIC table");
2208         if (vlan_m &&
2209             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2210             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2211                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2212             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2213             !(mlx5_flow_find_action
2214                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2215                 return rte_flow_error_set(error, EINVAL,
2216                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2217                                           "not full match mask on VLAN PCP and "
2218                                           "there is no of_set_vlan_pcp action, "
2219                                           "push VLAN action cannot figure out "
2220                                           "PCP value");
2221         if (vlan_m &&
2222             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2223             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2224                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2225             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2226             !(mlx5_flow_find_action
2227                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2228                 return rte_flow_error_set(error, EINVAL,
2229                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2230                                           "not full match mask on VLAN VID and "
2231                                           "there is no of_set_vlan_vid action, "
2232                                           "push VLAN action cannot figure out "
2233                                           "VID value");
2234         (void)attr;
2235         return 0;
2236 }
2237
2238 /**
2239  * Validate the set VLAN PCP.
2240  *
2241  * @param[in] action_flags
2242  *   Holds the actions detected until now.
2243  * @param[in] actions
2244  *   Pointer to the list of actions remaining in the flow rule.
2245  * @param[out] error
2246  *   Pointer to error structure.
2247  *
2248  * @return
2249  *   0 on success, a negative errno value otherwise and rte_errno is set.
2250  */
2251 static int
2252 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2253                                      const struct rte_flow_action actions[],
2254                                      struct rte_flow_error *error)
2255 {
2256         const struct rte_flow_action *action = actions;
2257         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2258
2259         if (conf->vlan_pcp > 7)
2260                 return rte_flow_error_set(error, EINVAL,
2261                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2262                                           "VLAN PCP value is too big");
2263         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2264                 return rte_flow_error_set(error, ENOTSUP,
2265                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2266                                           "set VLAN PCP action must follow "
2267                                           "the push VLAN action");
2268         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2269                 return rte_flow_error_set(error, ENOTSUP,
2270                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2271                                           "Multiple VLAN PCP modification are "
2272                                           "not supported");
2273         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2274                 return rte_flow_error_set(error, EINVAL,
2275                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2276                                           "wrong action order, port_id should "
2277                                           "be after set VLAN PCP");
2278         return 0;
2279 }
2280
2281 /**
2282  * Validate the set VLAN VID.
2283  *
2284  * @param[in] item_flags
2285  *   Holds the items detected in this rule.
2286  * @param[in] action_flags
2287  *   Holds the actions detected until now.
2288  * @param[in] actions
2289  *   Pointer to the list of actions remaining in the flow rule.
2290  * @param[out] error
2291  *   Pointer to error structure.
2292  *
2293  * @return
2294  *   0 on success, a negative errno value otherwise and rte_errno is set.
2295  */
2296 static int
2297 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2298                                      uint64_t action_flags,
2299                                      const struct rte_flow_action actions[],
2300                                      struct rte_flow_error *error)
2301 {
2302         const struct rte_flow_action *action = actions;
2303         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2304
2305         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2306                 return rte_flow_error_set(error, EINVAL,
2307                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2308                                           "VLAN VID value is too big");
2309         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2310             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2311                 return rte_flow_error_set(error, ENOTSUP,
2312                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2313                                           "set VLAN VID action must follow push"
2314                                           " VLAN action or match on VLAN item");
2315         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2316                 return rte_flow_error_set(error, ENOTSUP,
2317                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2318                                           "Multiple VLAN VID modifications are "
2319                                           "not supported");
2320         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2321                 return rte_flow_error_set(error, EINVAL,
2322                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2323                                           "wrong action order, port_id should "
2324                                           "be after set VLAN VID");
2325         return 0;
2326 }
2327
2328 /*
2329  * Validate the FLAG action.
2330  *
2331  * @param[in] dev
2332  *   Pointer to the rte_eth_dev structure.
2333  * @param[in] action_flags
2334  *   Holds the actions detected until now.
2335  * @param[in] attr
2336  *   Pointer to flow attributes
2337  * @param[out] error
2338  *   Pointer to error structure.
2339  *
2340  * @return
2341  *   0 on success, a negative errno value otherwise and rte_errno is set.
2342  */
2343 static int
2344 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2345                              uint64_t action_flags,
2346                              const struct rte_flow_attr *attr,
2347                              struct rte_flow_error *error)
2348 {
2349         struct mlx5_priv *priv = dev->data->dev_private;
2350         struct mlx5_dev_config *config = &priv->config;
2351         int ret;
2352
2353         /* Fall back if no extended metadata register support. */
2354         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2355                 return mlx5_flow_validate_action_flag(action_flags, attr,
2356                                                       error);
2357         /* Extensive metadata mode requires registers. */
2358         if (!mlx5_flow_ext_mreg_supported(dev))
2359                 return rte_flow_error_set(error, ENOTSUP,
2360                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2361                                           "no metadata registers "
2362                                           "to support flag action");
2363         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2364                 return rte_flow_error_set(error, ENOTSUP,
2365                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2366                                           "extended metadata register"
2367                                           " isn't available");
2368         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2369         if (ret < 0)
2370                 return ret;
2371         MLX5_ASSERT(ret > 0);
2372         if (action_flags & MLX5_FLOW_ACTION_MARK)
2373                 return rte_flow_error_set(error, EINVAL,
2374                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2375                                           "can't mark and flag in same flow");
2376         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2377                 return rte_flow_error_set(error, EINVAL,
2378                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2379                                           "can't have 2 flag"
2380                                           " actions in same flow");
2381         return 0;
2382 }
2383
2384 /**
2385  * Validate MARK action.
2386  *
2387  * @param[in] dev
2388  *   Pointer to the rte_eth_dev structure.
2389  * @param[in] action
2390  *   Pointer to action.
2391  * @param[in] action_flags
2392  *   Holds the actions detected until now.
2393  * @param[in] attr
2394  *   Pointer to flow attributes
2395  * @param[out] error
2396  *   Pointer to error structure.
2397  *
2398  * @return
2399  *   0 on success, a negative errno value otherwise and rte_errno is set.
2400  */
2401 static int
2402 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2403                              const struct rte_flow_action *action,
2404                              uint64_t action_flags,
2405                              const struct rte_flow_attr *attr,
2406                              struct rte_flow_error *error)
2407 {
2408         struct mlx5_priv *priv = dev->data->dev_private;
2409         struct mlx5_dev_config *config = &priv->config;
2410         const struct rte_flow_action_mark *mark = action->conf;
2411         int ret;
2412
2413         /* Fall back if no extended metadata register support. */
2414         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2415                 return mlx5_flow_validate_action_mark(action, action_flags,
2416                                                       attr, error);
2417         /* Extensive metadata mode requires registers. */
2418         if (!mlx5_flow_ext_mreg_supported(dev))
2419                 return rte_flow_error_set(error, ENOTSUP,
2420                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2421                                           "no metadata registers "
2422                                           "to support mark action");
2423         if (!priv->sh->dv_mark_mask)
2424                 return rte_flow_error_set(error, ENOTSUP,
2425                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2426                                           "extended metadata register"
2427                                           " isn't available");
2428         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2429         if (ret < 0)
2430                 return ret;
2431         MLX5_ASSERT(ret > 0);
2432         if (!mark)
2433                 return rte_flow_error_set(error, EINVAL,
2434                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2435                                           "configuration cannot be null");
2436         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2437                 return rte_flow_error_set(error, EINVAL,
2438                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2439                                           &mark->id,
2440                                           "mark id exceeds the limit");
2441         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2442                 return rte_flow_error_set(error, EINVAL,
2443                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2444                                           "can't flag and mark in same flow");
2445         if (action_flags & MLX5_FLOW_ACTION_MARK)
2446                 return rte_flow_error_set(error, EINVAL,
2447                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2448                                           "can't have 2 mark actions in same"
2449                                           " flow");
2450         return 0;
2451 }
2452
2453 /**
2454  * Validate SET_META action.
2455  *
2456  * @param[in] dev
2457  *   Pointer to the rte_eth_dev structure.
2458  * @param[in] action
2459  *   Pointer to the action structure.
2460  * @param[in] action_flags
2461  *   Holds the actions detected until now.
2462  * @param[in] attr
2463  *   Pointer to flow attributes
2464  * @param[out] error
2465  *   Pointer to error structure.
2466  *
2467  * @return
2468  *   0 on success, a negative errno value otherwise and rte_errno is set.
2469  */
2470 static int
2471 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2472                                  const struct rte_flow_action *action,
2473                                  uint64_t action_flags __rte_unused,
2474                                  const struct rte_flow_attr *attr,
2475                                  struct rte_flow_error *error)
2476 {
2477         const struct rte_flow_action_set_meta *conf;
2478         uint32_t nic_mask = UINT32_MAX;
2479         int reg;
2480
2481         if (!mlx5_flow_ext_mreg_supported(dev))
2482                 return rte_flow_error_set(error, ENOTSUP,
2483                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2484                                           "extended metadata register"
2485                                           " isn't supported");
2486         reg = flow_dv_get_metadata_reg(dev, attr, error);
2487         if (reg < 0)
2488                 return reg;
2489         if (reg != REG_A && reg != REG_B) {
2490                 struct mlx5_priv *priv = dev->data->dev_private;
2491
2492                 nic_mask = priv->sh->dv_meta_mask;
2493         }
2494         if (!(action->conf))
2495                 return rte_flow_error_set(error, EINVAL,
2496                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2497                                           "configuration cannot be null");
2498         conf = (const struct rte_flow_action_set_meta *)action->conf;
2499         if (!conf->mask)
2500                 return rte_flow_error_set(error, EINVAL,
2501                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2502                                           "zero mask doesn't have any effect");
2503         if (conf->mask & ~nic_mask)
2504                 return rte_flow_error_set(error, EINVAL,
2505                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2506                                           "meta data must be within reg C0");
2507         return 0;
2508 }
2509
2510 /**
2511  * Validate SET_TAG action.
2512  *
2513  * @param[in] dev
2514  *   Pointer to the rte_eth_dev structure.
2515  * @param[in] action
2516  *   Pointer to the action structure.
2517  * @param[in] action_flags
2518  *   Holds the actions detected until now.
2519  * @param[in] attr
2520  *   Pointer to flow attributes
2521  * @param[out] error
2522  *   Pointer to error structure.
2523  *
2524  * @return
2525  *   0 on success, a negative errno value otherwise and rte_errno is set.
2526  */
2527 static int
2528 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2529                                 const struct rte_flow_action *action,
2530                                 uint64_t action_flags,
2531                                 const struct rte_flow_attr *attr,
2532                                 struct rte_flow_error *error)
2533 {
2534         const struct rte_flow_action_set_tag *conf;
2535         const uint64_t terminal_action_flags =
2536                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2537                 MLX5_FLOW_ACTION_RSS;
2538         int ret;
2539
2540         if (!mlx5_flow_ext_mreg_supported(dev))
2541                 return rte_flow_error_set(error, ENOTSUP,
2542                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2543                                           "extensive metadata register"
2544                                           " isn't supported");
2545         if (!(action->conf))
2546                 return rte_flow_error_set(error, EINVAL,
2547                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2548                                           "configuration cannot be null");
2549         conf = (const struct rte_flow_action_set_tag *)action->conf;
2550         if (!conf->mask)
2551                 return rte_flow_error_set(error, EINVAL,
2552                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2553                                           "zero mask doesn't have any effect");
2554         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2555         if (ret < 0)
2556                 return ret;
2557         if (!attr->transfer && attr->ingress &&
2558             (action_flags & terminal_action_flags))
2559                 return rte_flow_error_set(error, EINVAL,
2560                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2561                                           "set_tag has no effect"
2562                                           " with terminal actions");
2563         return 0;
2564 }
2565
2566 /**
2567  * Validate count action.
2568  *
2569  * @param[in] dev
2570  *   Pointer to rte_eth_dev structure.
2571  * @param[out] error
2572  *   Pointer to error structure.
2573  *
2574  * @return
2575  *   0 on success, a negative errno value otherwise and rte_errno is set.
2576  */
2577 static int
2578 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2579                               struct rte_flow_error *error)
2580 {
2581         struct mlx5_priv *priv = dev->data->dev_private;
2582
2583         if (!priv->config.devx)
2584                 goto notsup_err;
2585 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2586         return 0;
2587 #endif
2588 notsup_err:
2589         return rte_flow_error_set
2590                       (error, ENOTSUP,
2591                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2592                        NULL,
2593                        "count action not supported");
2594 }
2595
2596 /**
2597  * Validate the L2 encap action.
2598  *
2599  * @param[in] dev
2600  *   Pointer to the rte_eth_dev structure.
2601  * @param[in] action_flags
2602  *   Holds the actions detected until now.
2603  * @param[in] action
2604  *   Pointer to the action structure.
2605  * @param[in] attr
2606  *   Pointer to flow attributes.
2607  * @param[out] error
2608  *   Pointer to error structure.
2609  *
2610  * @return
2611  *   0 on success, a negative errno value otherwise and rte_errno is set.
2612  */
2613 static int
2614 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2615                                  uint64_t action_flags,
2616                                  const struct rte_flow_action *action,
2617                                  const struct rte_flow_attr *attr,
2618                                  struct rte_flow_error *error)
2619 {
2620         const struct mlx5_priv *priv = dev->data->dev_private;
2621
2622         if (!(action->conf))
2623                 return rte_flow_error_set(error, EINVAL,
2624                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2625                                           "configuration cannot be null");
2626         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2627                 return rte_flow_error_set(error, EINVAL,
2628                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2629                                           "can only have a single encap action "
2630                                           "in a flow");
2631         if (!attr->transfer && priv->representor)
2632                 return rte_flow_error_set(error, ENOTSUP,
2633                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2634                                           "encap action for VF representor "
2635                                           "not supported on NIC table");
2636         return 0;
2637 }
2638
2639 /**
2640  * Validate a decap action.
2641  *
2642  * @param[in] dev
2643  *   Pointer to the rte_eth_dev structure.
2644  * @param[in] action_flags
2645  *   Holds the actions detected until now.
2646  * @param[in] attr
2647  *   Pointer to flow attributes
2648  * @param[out] error
2649  *   Pointer to error structure.
2650  *
2651  * @return
2652  *   0 on success, a negative errno value otherwise and rte_errno is set.
2653  */
2654 static int
2655 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2656                               uint64_t action_flags,
2657                               const struct rte_flow_attr *attr,
2658                               struct rte_flow_error *error)
2659 {
2660         const struct mlx5_priv *priv = dev->data->dev_private;
2661
2662         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2663             !priv->config.decap_en)
2664                 return rte_flow_error_set(error, ENOTSUP,
2665                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2666                                           "decap is not enabled");
2667         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2668                 return rte_flow_error_set(error, ENOTSUP,
2669                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2670                                           action_flags &
2671                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2672                                           "have a single decap action" : "decap "
2673                                           "after encap is not supported");
2674         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2675                 return rte_flow_error_set(error, EINVAL,
2676                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2677                                           "can't have decap action after"
2678                                           " modify action");
2679         if (attr->egress)
2680                 return rte_flow_error_set(error, ENOTSUP,
2681                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2682                                           NULL,
2683                                           "decap action not supported for "
2684                                           "egress");
2685         if (!attr->transfer && priv->representor)
2686                 return rte_flow_error_set(error, ENOTSUP,
2687                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2688                                           "decap action for VF representor "
2689                                           "not supported on NIC table");
2690         return 0;
2691 }
2692
2693 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2694
2695 /**
2696  * Validate the raw encap and decap actions.
2697  *
2698  * @param[in] dev
2699  *   Pointer to the rte_eth_dev structure.
2700  * @param[in] decap
2701  *   Pointer to the decap action.
2702  * @param[in] encap
2703  *   Pointer to the encap action.
2704  * @param[in] attr
2705  *   Pointer to flow attributes
2706  * @param[in/out] action_flags
2707  *   Holds the actions detected until now.
2708  * @param[out] actions_n
2709  *   pointer to the number of actions counter.
2710  * @param[out] error
2711  *   Pointer to error structure.
2712  *
2713  * @return
2714  *   0 on success, a negative errno value otherwise and rte_errno is set.
2715  */
2716 static int
2717 flow_dv_validate_action_raw_encap_decap
2718         (struct rte_eth_dev *dev,
2719          const struct rte_flow_action_raw_decap *decap,
2720          const struct rte_flow_action_raw_encap *encap,
2721          const struct rte_flow_attr *attr, uint64_t *action_flags,
2722          int *actions_n, struct rte_flow_error *error)
2723 {
2724         const struct mlx5_priv *priv = dev->data->dev_private;
2725         int ret;
2726
2727         if (encap && (!encap->size || !encap->data))
2728                 return rte_flow_error_set(error, EINVAL,
2729                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2730                                           "raw encap data cannot be empty");
2731         if (decap && encap) {
2732                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2733                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2734                         /* L3 encap. */
2735                         decap = NULL;
2736                 else if (encap->size <=
2737                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2738                            decap->size >
2739                            MLX5_ENCAPSULATION_DECISION_SIZE)
2740                         /* L3 decap. */
2741                         encap = NULL;
2742                 else if (encap->size >
2743                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2744                            decap->size >
2745                            MLX5_ENCAPSULATION_DECISION_SIZE)
2746                         /* 2 L2 actions: encap and decap. */
2747                         ;
2748                 else
2749                         return rte_flow_error_set(error,
2750                                 ENOTSUP,
2751                                 RTE_FLOW_ERROR_TYPE_ACTION,
2752                                 NULL, "unsupported too small "
2753                                 "raw decap and too small raw "
2754                                 "encap combination");
2755         }
2756         if (decap) {
2757                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2758                                                     error);
2759                 if (ret < 0)
2760                         return ret;
2761                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2762                 ++(*actions_n);
2763         }
2764         if (encap) {
2765                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2766                         return rte_flow_error_set(error, ENOTSUP,
2767                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2768                                                   NULL,
2769                                                   "small raw encap size");
2770                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2771                         return rte_flow_error_set(error, EINVAL,
2772                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2773                                                   NULL,
2774                                                   "more than one encap action");
2775                 if (!attr->transfer && priv->representor)
2776                         return rte_flow_error_set
2777                                         (error, ENOTSUP,
2778                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2779                                          "encap action for VF representor "
2780                                          "not supported on NIC table");
2781                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2782                 ++(*actions_n);
2783         }
2784         return 0;
2785 }
2786
2787 /**
2788  * Match encap_decap resource.
2789  *
2790  * @param entry
2791  *   Pointer to exist resource entry object.
2792  * @param ctx
2793  *   Pointer to new encap_decap resource.
2794  *
2795  * @return
2796  *   0 on matching, -1 otherwise.
2797  */
2798 static int
2799 flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
2800 {
2801         struct mlx5_flow_dv_encap_decap_resource *resource;
2802         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2803
2804         resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx;
2805         cache_resource = container_of(entry,
2806                                       struct mlx5_flow_dv_encap_decap_resource,
2807                                       entry);
2808         if (resource->entry.key == cache_resource->entry.key &&
2809             resource->reformat_type == cache_resource->reformat_type &&
2810             resource->ft_type == cache_resource->ft_type &&
2811             resource->flags == cache_resource->flags &&
2812             resource->size == cache_resource->size &&
2813             !memcmp((const void *)resource->buf,
2814                     (const void *)cache_resource->buf,
2815                     resource->size))
2816                 return 0;
2817         return -1;
2818 }
2819
2820 /**
2821  * Find existing encap/decap resource or create and register a new one.
2822  *
2823  * @param[in, out] dev
2824  *   Pointer to rte_eth_dev structure.
2825  * @param[in, out] resource
2826  *   Pointer to encap/decap resource.
2827  * @parm[in, out] dev_flow
2828  *   Pointer to the dev_flow.
2829  * @param[out] error
2830  *   pointer to error structure.
2831  *
2832  * @return
2833  *   0 on success otherwise -errno and errno is set.
2834  */
2835 static int
2836 flow_dv_encap_decap_resource_register
2837                         (struct rte_eth_dev *dev,
2838                          struct mlx5_flow_dv_encap_decap_resource *resource,
2839                          struct mlx5_flow *dev_flow,
2840                          struct rte_flow_error *error)
2841 {
2842         struct mlx5_priv *priv = dev->data->dev_private;
2843         struct mlx5_dev_ctx_shared *sh = priv->sh;
2844         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2845         struct mlx5dv_dr_domain *domain;
2846         struct mlx5_hlist_entry *entry;
2847         union mlx5_flow_encap_decap_key encap_decap_key = {
2848                 {
2849                         .ft_type = resource->ft_type,
2850                         .refmt_type = resource->reformat_type,
2851                         .buf_size = resource->size,
2852                         .table_level = !!dev_flow->dv.group,
2853                         .cksum = 0,
2854                 }
2855         };
2856         int ret;
2857
2858         resource->flags = dev_flow->dv.group ? 0 : 1;
2859         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2860                 domain = sh->fdb_domain;
2861         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2862                 domain = sh->rx_domain;
2863         else
2864                 domain = sh->tx_domain;
2865         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2866                                                 resource->size, 0);
2867         resource->entry.key = encap_decap_key.v64;
2868         /* Lookup a matching resource from cache. */
2869         entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key,
2870                                      flow_dv_encap_decap_resource_match,
2871                                      (void *)resource);
2872         if (entry) {
2873                 cache_resource = container_of(entry,
2874                         struct mlx5_flow_dv_encap_decap_resource, entry);
2875                 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2876                         (void *)cache_resource,
2877                         __atomic_load_n(&cache_resource->refcnt,
2878                                         __ATOMIC_RELAXED));
2879                 __atomic_fetch_add(&cache_resource->refcnt, 1,
2880                                    __ATOMIC_RELAXED);
2881                 dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx;
2882                 dev_flow->dv.encap_decap = cache_resource;
2883                 return 0;
2884         }
2885         /* Register new encap/decap resource. */
2886         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2887                                        &dev_flow->handle->dvh.rix_encap_decap);
2888         if (!cache_resource)
2889                 return rte_flow_error_set(error, ENOMEM,
2890                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2891                                           "cannot allocate resource memory");
2892         *cache_resource = *resource;
2893         cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap;
2894         ret = mlx5_flow_os_create_flow_action_packet_reformat
2895                                         (sh->ctx, domain, cache_resource,
2896                                          &cache_resource->action);
2897         if (ret) {
2898                 mlx5_free(cache_resource);
2899                 return rte_flow_error_set(error, ENOMEM,
2900                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2901                                           NULL, "cannot create action");
2902         }
2903         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
2904         if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry,
2905                                  flow_dv_encap_decap_resource_match,
2906                                  (void *)cache_resource)) {
2907                 claim_zero(mlx5_flow_os_destroy_flow_action
2908                                                 (cache_resource->action));
2909                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2910                                 cache_resource->idx);
2911                 return rte_flow_error_set(error, EEXIST,
2912                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2913                                           NULL, "action exist");
2914         }
2915         dev_flow->dv.encap_decap = cache_resource;
2916         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2917                 (void *)cache_resource,
2918                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
2919         return 0;
2920 }
2921
2922 /**
2923  * Find existing table jump resource or create and register a new one.
2924  *
2925  * @param[in, out] dev
2926  *   Pointer to rte_eth_dev structure.
2927  * @param[in, out] tbl
2928  *   Pointer to flow table resource.
2929  * @parm[in, out] dev_flow
2930  *   Pointer to the dev_flow.
2931  * @param[out] error
2932  *   pointer to error structure.
2933  *
2934  * @return
2935  *   0 on success otherwise -errno and errno is set.
2936  */
2937 static int
2938 flow_dv_jump_tbl_resource_register
2939                         (struct rte_eth_dev *dev __rte_unused,
2940                          struct mlx5_flow_tbl_resource *tbl,
2941                          struct mlx5_flow *dev_flow,
2942                          struct rte_flow_error *error)
2943 {
2944         struct mlx5_flow_tbl_data_entry *tbl_data =
2945                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2946         int cnt, ret;
2947
2948         MLX5_ASSERT(tbl);
2949         cnt = __atomic_load_n(&tbl_data->jump.refcnt, __ATOMIC_ACQUIRE);
2950         if (!cnt) {
2951                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
2952                                 (tbl->obj, &tbl_data->jump.action);
2953                 if (ret)
2954                         return rte_flow_error_set(error, ENOMEM,
2955                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2956                                         NULL, "cannot create jump action");
2957                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2958                         (void *)&tbl_data->jump, cnt);
2959         } else {
2960                 /* old jump should not make the table ref++. */
2961                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
2962                 MLX5_ASSERT(tbl_data->jump.action);
2963                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2964                         (void *)&tbl_data->jump, cnt);
2965         }
2966         __atomic_fetch_add(&tbl_data->jump.refcnt, 1, __ATOMIC_RELEASE);
2967         dev_flow->handle->rix_jump = tbl_data->idx;
2968         dev_flow->dv.jump = &tbl_data->jump;
2969         return 0;
2970 }
2971
2972 /**
2973  * Find existing default miss resource or create and register a new one.
2974  *
2975  * @param[in, out] dev
2976  *   Pointer to rte_eth_dev structure.
2977  * @param[out] error
2978  *   pointer to error structure.
2979  *
2980  * @return
2981  *   0 on success otherwise -errno and errno is set.
2982  */
2983 static int
2984 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
2985                 struct rte_flow_error *error)
2986 {
2987         struct mlx5_priv *priv = dev->data->dev_private;
2988         struct mlx5_dev_ctx_shared *sh = priv->sh;
2989         struct mlx5_flow_default_miss_resource *cache_resource =
2990                         &sh->default_miss;
2991         int cnt = __atomic_load_n(&cache_resource->refcnt, __ATOMIC_ACQUIRE);
2992
2993         if (!cnt) {
2994                 MLX5_ASSERT(cache_resource->action);
2995                 cache_resource->action =
2996                 mlx5_glue->dr_create_flow_action_default_miss();
2997                 if (!cache_resource->action)
2998                         return rte_flow_error_set(error, ENOMEM,
2999                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3000                                         "cannot create default miss action");
3001                 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
3002                                 (void *)cache_resource->action, cnt);
3003         }
3004         __atomic_fetch_add(&cache_resource->refcnt, 1, __ATOMIC_RELEASE);
3005         return 0;
3006 }
3007
3008 /**
3009  * Find existing table port ID resource or create and register a new one.
3010  *
3011  * @param[in, out] dev
3012  *   Pointer to rte_eth_dev structure.
3013  * @param[in, out] resource
3014  *   Pointer to port ID action resource.
3015  * @parm[in, out] dev_flow
3016  *   Pointer to the dev_flow.
3017  * @param[out] error
3018  *   pointer to error structure.
3019  *
3020  * @return
3021  *   0 on success otherwise -errno and errno is set.
3022  */
3023 static int
3024 flow_dv_port_id_action_resource_register
3025                         (struct rte_eth_dev *dev,
3026                          struct mlx5_flow_dv_port_id_action_resource *resource,
3027                          struct mlx5_flow *dev_flow,
3028                          struct rte_flow_error *error)
3029 {
3030         struct mlx5_priv *priv = dev->data->dev_private;
3031         struct mlx5_dev_ctx_shared *sh = priv->sh;
3032         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
3033         uint32_t idx = 0;
3034         int ret;
3035
3036         /* Lookup a matching resource from cache. */
3037         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
3038                       idx, cache_resource, next) {
3039                 if (resource->port_id == cache_resource->port_id) {
3040                         DRV_LOG(DEBUG, "port id action resource resource %p: "
3041                                 "refcnt %d++",
3042                                 (void *)cache_resource,
3043                                 __atomic_load_n(&cache_resource->refcnt,
3044                                                 __ATOMIC_RELAXED));
3045                         __atomic_fetch_add(&cache_resource->refcnt, 1,
3046                                            __ATOMIC_RELAXED);
3047                         dev_flow->handle->rix_port_id_action = idx;
3048                         dev_flow->dv.port_id_action = cache_resource;
3049                         return 0;
3050                 }
3051         }
3052         /* Register new port id action resource. */
3053         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
3054                                        &dev_flow->handle->rix_port_id_action);
3055         if (!cache_resource)
3056                 return rte_flow_error_set(error, ENOMEM,
3057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3058                                           "cannot allocate resource memory");
3059         *cache_resource = *resource;
3060         ret = mlx5_flow_os_create_flow_action_dest_port
3061                                 (priv->sh->fdb_domain, resource->port_id,
3062                                  &cache_resource->action);
3063         if (ret) {
3064                 mlx5_free(cache_resource);
3065                 return rte_flow_error_set(error, ENOMEM,
3066                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3067                                           NULL, "cannot create action");
3068         }
3069         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
3070         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
3071                      dev_flow->handle->rix_port_id_action, cache_resource,
3072                      next);
3073         dev_flow->dv.port_id_action = cache_resource;
3074         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
3075                 (void *)cache_resource,
3076                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
3077         return 0;
3078 }
3079
3080 /**
3081  * Find existing push vlan resource or create and register a new one.
3082  *
3083  * @param [in, out] dev
3084  *   Pointer to rte_eth_dev structure.
3085  * @param[in, out] resource
3086  *   Pointer to port ID action resource.
3087  * @parm[in, out] dev_flow
3088  *   Pointer to the dev_flow.
3089  * @param[out] error
3090  *   pointer to error structure.
3091  *
3092  * @return
3093  *   0 on success otherwise -errno and errno is set.
3094  */
3095 static int
3096 flow_dv_push_vlan_action_resource_register
3097                        (struct rte_eth_dev *dev,
3098                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3099                         struct mlx5_flow *dev_flow,
3100                         struct rte_flow_error *error)
3101 {
3102         struct mlx5_priv *priv = dev->data->dev_private;
3103         struct mlx5_dev_ctx_shared *sh = priv->sh;
3104         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
3105         struct mlx5dv_dr_domain *domain;
3106         uint32_t idx = 0;
3107         int ret;
3108
3109         /* Lookup a matching resource from cache. */
3110         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3111                       sh->push_vlan_action_list, idx, cache_resource, next) {
3112                 if (resource->vlan_tag == cache_resource->vlan_tag &&
3113                     resource->ft_type == cache_resource->ft_type) {
3114                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
3115                                 "refcnt %d++",
3116                                 (void *)cache_resource,
3117                                 __atomic_load_n(&cache_resource->refcnt,
3118                                                 __ATOMIC_RELAXED));
3119                         __atomic_fetch_add(&cache_resource->refcnt, 1,
3120                                            __ATOMIC_RELAXED);
3121                         dev_flow->handle->dvh.rix_push_vlan = idx;
3122                         dev_flow->dv.push_vlan_res = cache_resource;
3123                         return 0;
3124                 }
3125         }
3126         /* Register new push_vlan action resource. */
3127         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3128                                        &dev_flow->handle->dvh.rix_push_vlan);
3129         if (!cache_resource)
3130                 return rte_flow_error_set(error, ENOMEM,
3131                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3132                                           "cannot allocate resource memory");
3133         *cache_resource = *resource;
3134         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3135                 domain = sh->fdb_domain;
3136         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3137                 domain = sh->rx_domain;
3138         else
3139                 domain = sh->tx_domain;
3140         ret = mlx5_flow_os_create_flow_action_push_vlan
3141                                         (domain, resource->vlan_tag,
3142                                          &cache_resource->action);
3143         if (ret) {
3144                 mlx5_free(cache_resource);
3145                 return rte_flow_error_set(error, ENOMEM,
3146                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3147                                           NULL, "cannot create action");
3148         }
3149         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
3150         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3151                      &sh->push_vlan_action_list,
3152                      dev_flow->handle->dvh.rix_push_vlan,
3153                      cache_resource, next);
3154         dev_flow->dv.push_vlan_res = cache_resource;
3155         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
3156                 (void *)cache_resource,
3157                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
3158         return 0;
3159 }
3160 /**
3161  * Get the size of specific rte_flow_item_type hdr size
3162  *
3163  * @param[in] item_type
3164  *   Tested rte_flow_item_type.
3165  *
3166  * @return
3167  *   sizeof struct item_type, 0 if void or irrelevant.
3168  */
3169 static size_t
3170 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3171 {
3172         size_t retval;
3173
3174         switch (item_type) {
3175         case RTE_FLOW_ITEM_TYPE_ETH:
3176                 retval = sizeof(struct rte_ether_hdr);
3177                 break;
3178         case RTE_FLOW_ITEM_TYPE_VLAN:
3179                 retval = sizeof(struct rte_vlan_hdr);
3180                 break;
3181         case RTE_FLOW_ITEM_TYPE_IPV4:
3182                 retval = sizeof(struct rte_ipv4_hdr);
3183                 break;
3184         case RTE_FLOW_ITEM_TYPE_IPV6:
3185                 retval = sizeof(struct rte_ipv6_hdr);
3186                 break;
3187         case RTE_FLOW_ITEM_TYPE_UDP:
3188                 retval = sizeof(struct rte_udp_hdr);
3189                 break;
3190         case RTE_FLOW_ITEM_TYPE_TCP:
3191                 retval = sizeof(struct rte_tcp_hdr);
3192                 break;
3193         case RTE_FLOW_ITEM_TYPE_VXLAN:
3194         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3195                 retval = sizeof(struct rte_vxlan_hdr);
3196                 break;
3197         case RTE_FLOW_ITEM_TYPE_GRE:
3198         case RTE_FLOW_ITEM_TYPE_NVGRE:
3199                 retval = sizeof(struct rte_gre_hdr);
3200                 break;
3201         case RTE_FLOW_ITEM_TYPE_MPLS:
3202                 retval = sizeof(struct rte_mpls_hdr);
3203                 break;
3204         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3205         default:
3206                 retval = 0;
3207                 break;
3208         }
3209         return retval;
3210 }
3211
3212 #define MLX5_ENCAP_IPV4_VERSION         0x40
3213 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3214 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3215 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3216 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3217 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3218 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3219
3220 /**
3221  * Convert the encap action data from list of rte_flow_item to raw buffer
3222  *
3223  * @param[in] items
3224  *   Pointer to rte_flow_item objects list.
3225  * @param[out] buf
3226  *   Pointer to the output buffer.
3227  * @param[out] size
3228  *   Pointer to the output buffer size.
3229  * @param[out] error
3230  *   Pointer to the error structure.
3231  *
3232  * @return
3233  *   0 on success, a negative errno value otherwise and rte_errno is set.
3234  */
3235 static int
3236 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3237                            size_t *size, struct rte_flow_error *error)
3238 {
3239         struct rte_ether_hdr *eth = NULL;
3240         struct rte_vlan_hdr *vlan = NULL;
3241         struct rte_ipv4_hdr *ipv4 = NULL;
3242         struct rte_ipv6_hdr *ipv6 = NULL;
3243         struct rte_udp_hdr *udp = NULL;
3244         struct rte_vxlan_hdr *vxlan = NULL;
3245         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3246         struct rte_gre_hdr *gre = NULL;
3247         size_t len;
3248         size_t temp_size = 0;
3249
3250         if (!items)
3251                 return rte_flow_error_set(error, EINVAL,
3252                                           RTE_FLOW_ERROR_TYPE_ACTION,
3253                                           NULL, "invalid empty data");
3254         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3255                 len = flow_dv_get_item_hdr_len(items->type);
3256                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3257                         return rte_flow_error_set(error, EINVAL,
3258                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3259                                                   (void *)items->type,
3260                                                   "items total size is too big"
3261                                                   " for encap action");
3262                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3263                 switch (items->type) {
3264                 case RTE_FLOW_ITEM_TYPE_ETH:
3265                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3266                         break;
3267                 case RTE_FLOW_ITEM_TYPE_VLAN:
3268                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3269                         if (!eth)
3270                                 return rte_flow_error_set(error, EINVAL,
3271                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3272                                                 (void *)items->type,
3273                                                 "eth header not found");
3274                         if (!eth->ether_type)
3275                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3276                         break;
3277                 case RTE_FLOW_ITEM_TYPE_IPV4:
3278                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3279                         if (!vlan && !eth)
3280                                 return rte_flow_error_set(error, EINVAL,
3281                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3282                                                 (void *)items->type,
3283                                                 "neither eth nor vlan"
3284                                                 " header found");
3285                         if (vlan && !vlan->eth_proto)
3286                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3287                         else if (eth && !eth->ether_type)
3288                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3289                         if (!ipv4->version_ihl)
3290                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3291                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3292                         if (!ipv4->time_to_live)
3293                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3294                         break;
3295                 case RTE_FLOW_ITEM_TYPE_IPV6:
3296                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3297                         if (!vlan && !eth)
3298                                 return rte_flow_error_set(error, EINVAL,
3299                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3300                                                 (void *)items->type,
3301                                                 "neither eth nor vlan"
3302                                                 " header found");
3303                         if (vlan && !vlan->eth_proto)
3304                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3305                         else if (eth && !eth->ether_type)
3306                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3307                         if (!ipv6->vtc_flow)
3308                                 ipv6->vtc_flow =
3309                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3310                         if (!ipv6->hop_limits)
3311                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3312                         break;
3313                 case RTE_FLOW_ITEM_TYPE_UDP:
3314                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3315                         if (!ipv4 && !ipv6)
3316                                 return rte_flow_error_set(error, EINVAL,
3317                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3318                                                 (void *)items->type,
3319                                                 "ip header not found");
3320                         if (ipv4 && !ipv4->next_proto_id)
3321                                 ipv4->next_proto_id = IPPROTO_UDP;
3322                         else if (ipv6 && !ipv6->proto)
3323                                 ipv6->proto = IPPROTO_UDP;
3324                         break;
3325                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3326                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3327                         if (!udp)
3328                                 return rte_flow_error_set(error, EINVAL,
3329                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3330                                                 (void *)items->type,
3331                                                 "udp header not found");
3332                         if (!udp->dst_port)
3333                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3334                         if (!vxlan->vx_flags)
3335                                 vxlan->vx_flags =
3336                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3337                         break;
3338                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3339                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3340                         if (!udp)
3341                                 return rte_flow_error_set(error, EINVAL,
3342                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3343                                                 (void *)items->type,
3344                                                 "udp header not found");
3345                         if (!vxlan_gpe->proto)
3346                                 return rte_flow_error_set(error, EINVAL,
3347                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3348                                                 (void *)items->type,
3349                                                 "next protocol not found");
3350                         if (!udp->dst_port)
3351                                 udp->dst_port =
3352                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3353                         if (!vxlan_gpe->vx_flags)
3354                                 vxlan_gpe->vx_flags =
3355                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3356                         break;
3357                 case RTE_FLOW_ITEM_TYPE_GRE:
3358                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3359                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3360                         if (!gre->proto)
3361                                 return rte_flow_error_set(error, EINVAL,
3362                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3363                                                 (void *)items->type,
3364                                                 "next protocol not found");
3365                         if (!ipv4 && !ipv6)
3366                                 return rte_flow_error_set(error, EINVAL,
3367                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3368                                                 (void *)items->type,
3369                                                 "ip header not found");
3370                         if (ipv4 && !ipv4->next_proto_id)
3371                                 ipv4->next_proto_id = IPPROTO_GRE;
3372                         else if (ipv6 && !ipv6->proto)
3373                                 ipv6->proto = IPPROTO_GRE;
3374                         break;
3375                 case RTE_FLOW_ITEM_TYPE_VOID:
3376                         break;
3377                 default:
3378                         return rte_flow_error_set(error, EINVAL,
3379                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3380                                                   (void *)items->type,
3381                                                   "unsupported item type");
3382                         break;
3383                 }
3384                 temp_size += len;
3385         }
3386         *size = temp_size;
3387         return 0;
3388 }
3389
3390 static int
3391 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3392 {
3393         struct rte_ether_hdr *eth = NULL;
3394         struct rte_vlan_hdr *vlan = NULL;
3395         struct rte_ipv6_hdr *ipv6 = NULL;
3396         struct rte_udp_hdr *udp = NULL;
3397         char *next_hdr;
3398         uint16_t proto;
3399
3400         eth = (struct rte_ether_hdr *)data;
3401         next_hdr = (char *)(eth + 1);
3402         proto = RTE_BE16(eth->ether_type);
3403
3404         /* VLAN skipping */
3405         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3406                 vlan = (struct rte_vlan_hdr *)next_hdr;
3407                 proto = RTE_BE16(vlan->eth_proto);
3408                 next_hdr += sizeof(struct rte_vlan_hdr);
3409         }
3410
3411         /* HW calculates IPv4 csum. no need to proceed */
3412         if (proto == RTE_ETHER_TYPE_IPV4)
3413                 return 0;
3414
3415         /* non IPv4/IPv6 header. not supported */
3416         if (proto != RTE_ETHER_TYPE_IPV6) {
3417                 return rte_flow_error_set(error, ENOTSUP,
3418                                           RTE_FLOW_ERROR_TYPE_ACTION,
3419                                           NULL, "Cannot offload non IPv4/IPv6");
3420         }
3421
3422         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3423
3424         /* ignore non UDP */
3425         if (ipv6->proto != IPPROTO_UDP)
3426                 return 0;
3427
3428         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3429         udp->dgram_cksum = 0;
3430
3431         return 0;
3432 }
3433
3434 /**
3435  * Convert L2 encap action to DV specification.
3436  *
3437  * @param[in] dev
3438  *   Pointer to rte_eth_dev structure.
3439  * @param[in] action
3440  *   Pointer to action structure.
3441  * @param[in, out] dev_flow
3442  *   Pointer to the mlx5_flow.
3443  * @param[in] transfer
3444  *   Mark if the flow is E-Switch flow.
3445  * @param[out] error
3446  *   Pointer to the error structure.
3447  *
3448  * @return
3449  *   0 on success, a negative errno value otherwise and rte_errno is set.
3450  */
3451 static int
3452 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3453                                const struct rte_flow_action *action,
3454                                struct mlx5_flow *dev_flow,
3455                                uint8_t transfer,
3456                                struct rte_flow_error *error)
3457 {
3458         const struct rte_flow_item *encap_data;
3459         const struct rte_flow_action_raw_encap *raw_encap_data;
3460         struct mlx5_flow_dv_encap_decap_resource res = {
3461                 .reformat_type =
3462                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3463                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3464                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3465         };
3466
3467         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3468                 raw_encap_data =
3469                         (const struct rte_flow_action_raw_encap *)action->conf;
3470                 res.size = raw_encap_data->size;
3471                 memcpy(res.buf, raw_encap_data->data, res.size);
3472         } else {
3473                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3474                         encap_data =
3475                                 ((const struct rte_flow_action_vxlan_encap *)
3476                                                 action->conf)->definition;
3477                 else
3478                         encap_data =
3479                                 ((const struct rte_flow_action_nvgre_encap *)
3480                                                 action->conf)->definition;
3481                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3482                                                &res.size, error))
3483                         return -rte_errno;
3484         }
3485         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3486                 return -rte_errno;
3487         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3488                 return rte_flow_error_set(error, EINVAL,
3489                                           RTE_FLOW_ERROR_TYPE_ACTION,
3490                                           NULL, "can't create L2 encap action");
3491         return 0;
3492 }
3493
3494 /**
3495  * Convert L2 decap action to DV specification.
3496  *
3497  * @param[in] dev
3498  *   Pointer to rte_eth_dev structure.
3499  * @param[in, out] dev_flow
3500  *   Pointer to the mlx5_flow.
3501  * @param[in] transfer
3502  *   Mark if the flow is E-Switch flow.
3503  * @param[out] error
3504  *   Pointer to the error structure.
3505  *
3506  * @return
3507  *   0 on success, a negative errno value otherwise and rte_errno is set.
3508  */
3509 static int
3510 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3511                                struct mlx5_flow *dev_flow,
3512                                uint8_t transfer,
3513                                struct rte_flow_error *error)
3514 {
3515         struct mlx5_flow_dv_encap_decap_resource res = {
3516                 .size = 0,
3517                 .reformat_type =
3518                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3519                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3520                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3521         };
3522
3523         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3524                 return rte_flow_error_set(error, EINVAL,
3525                                           RTE_FLOW_ERROR_TYPE_ACTION,
3526                                           NULL, "can't create L2 decap action");
3527         return 0;
3528 }
3529
3530 /**
3531  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3532  *
3533  * @param[in] dev
3534  *   Pointer to rte_eth_dev structure.
3535  * @param[in] action
3536  *   Pointer to action structure.
3537  * @param[in, out] dev_flow
3538  *   Pointer to the mlx5_flow.
3539  * @param[in] attr
3540  *   Pointer to the flow attributes.
3541  * @param[out] error
3542  *   Pointer to the error structure.
3543  *
3544  * @return
3545  *   0 on success, a negative errno value otherwise and rte_errno is set.
3546  */
3547 static int
3548 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3549                                 const struct rte_flow_action *action,
3550                                 struct mlx5_flow *dev_flow,
3551                                 const struct rte_flow_attr *attr,
3552                                 struct rte_flow_error *error)
3553 {
3554         const struct rte_flow_action_raw_encap *encap_data;
3555         struct mlx5_flow_dv_encap_decap_resource res;
3556
3557         memset(&res, 0, sizeof(res));
3558         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3559         res.size = encap_data->size;
3560         memcpy(res.buf, encap_data->data, res.size);
3561         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3562                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3563                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3564         if (attr->transfer)
3565                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3566         else
3567                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3568                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3569         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3570                 return rte_flow_error_set(error, EINVAL,
3571                                           RTE_FLOW_ERROR_TYPE_ACTION,
3572                                           NULL, "can't create encap action");
3573         return 0;
3574 }
3575
3576 /**
3577  * Create action push VLAN.
3578  *
3579  * @param[in] dev
3580  *   Pointer to rte_eth_dev structure.
3581  * @param[in] attr
3582  *   Pointer to the flow attributes.
3583  * @param[in] vlan
3584  *   Pointer to the vlan to push to the Ethernet header.
3585  * @param[in, out] dev_flow
3586  *   Pointer to the mlx5_flow.
3587  * @param[out] error
3588  *   Pointer to the error structure.
3589  *
3590  * @return
3591  *   0 on success, a negative errno value otherwise and rte_errno is set.
3592  */
3593 static int
3594 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3595                                 const struct rte_flow_attr *attr,
3596                                 const struct rte_vlan_hdr *vlan,
3597                                 struct mlx5_flow *dev_flow,
3598                                 struct rte_flow_error *error)
3599 {
3600         struct mlx5_flow_dv_push_vlan_action_resource res;
3601
3602         memset(&res, 0, sizeof(res));
3603         res.vlan_tag =
3604                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3605                                  vlan->vlan_tci);
3606         if (attr->transfer)
3607                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3608         else
3609                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3610                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3611         return flow_dv_push_vlan_action_resource_register
3612                                             (dev, &res, dev_flow, error);
3613 }
3614
3615 static int fdb_mirror;
3616
3617 /**
3618  * Validate the modify-header actions.
3619  *
3620  * @param[in] action_flags
3621  *   Holds the actions detected until now.
3622  * @param[in] action
3623  *   Pointer to the modify action.
3624  * @param[out] error
3625  *   Pointer to error structure.
3626  *
3627  * @return
3628  *   0 on success, a negative errno value otherwise and rte_errno is set.
3629  */
3630 static int
3631 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3632                                    const struct rte_flow_action *action,
3633                                    struct rte_flow_error *error)
3634 {
3635         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3636                 return rte_flow_error_set(error, EINVAL,
3637                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3638                                           NULL, "action configuration not set");
3639         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3640                 return rte_flow_error_set(error, EINVAL,
3641                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3642                                           "can't have encap action before"
3643                                           " modify action");
3644         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3645                 return rte_flow_error_set(error, EINVAL,
3646                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3647                                           "can't support sample action before"
3648                                           " modify action for E-Switch"
3649                                           " mirroring");
3650         return 0;
3651 }
3652
3653 /**
3654  * Validate the modify-header MAC address actions.
3655  *
3656  * @param[in] action_flags
3657  *   Holds the actions detected until now.
3658  * @param[in] action
3659  *   Pointer to the modify action.
3660  * @param[in] item_flags
3661  *   Holds the items detected.
3662  * @param[out] error
3663  *   Pointer to error structure.
3664  *
3665  * @return
3666  *   0 on success, a negative errno value otherwise and rte_errno is set.
3667  */
3668 static int
3669 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3670                                    const struct rte_flow_action *action,
3671                                    const uint64_t item_flags,
3672                                    struct rte_flow_error *error)
3673 {
3674         int ret = 0;
3675
3676         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3677         if (!ret) {
3678                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3679                         return rte_flow_error_set(error, EINVAL,
3680                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3681                                                   NULL,
3682                                                   "no L2 item in pattern");
3683         }
3684         return ret;
3685 }
3686
3687 /**
3688  * Validate the modify-header IPv4 address actions.
3689  *
3690  * @param[in] action_flags
3691  *   Holds the actions detected until now.
3692  * @param[in] action
3693  *   Pointer to the modify action.
3694  * @param[in] item_flags
3695  *   Holds the items detected.
3696  * @param[out] error
3697  *   Pointer to error structure.
3698  *
3699  * @return
3700  *   0 on success, a negative errno value otherwise and rte_errno is set.
3701  */
3702 static int
3703 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3704                                     const struct rte_flow_action *action,
3705                                     const uint64_t item_flags,
3706                                     struct rte_flow_error *error)
3707 {
3708         int ret = 0;
3709         uint64_t layer;
3710
3711         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3712         if (!ret) {
3713                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3714                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3715                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3716                 if (!(item_flags & layer))
3717                         return rte_flow_error_set(error, EINVAL,
3718                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3719                                                   NULL,
3720                                                   "no ipv4 item in pattern");
3721         }
3722         return ret;
3723 }
3724
3725 /**
3726  * Validate the modify-header IPv6 address actions.
3727  *
3728  * @param[in] action_flags
3729  *   Holds the actions detected until now.
3730  * @param[in] action
3731  *   Pointer to the modify action.
3732  * @param[in] item_flags
3733  *   Holds the items detected.
3734  * @param[out] error
3735  *   Pointer to error structure.
3736  *
3737  * @return
3738  *   0 on success, a negative errno value otherwise and rte_errno is set.
3739  */
3740 static int
3741 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3742                                     const struct rte_flow_action *action,
3743                                     const uint64_t item_flags,
3744                                     struct rte_flow_error *error)
3745 {
3746         int ret = 0;
3747         uint64_t layer;
3748
3749         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3750         if (!ret) {
3751                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3752                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3753                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3754                 if (!(item_flags & layer))
3755                         return rte_flow_error_set(error, EINVAL,
3756                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3757                                                   NULL,
3758                                                   "no ipv6 item in pattern");
3759         }
3760         return ret;
3761 }
3762
3763 /**
3764  * Validate the modify-header TP actions.
3765  *
3766  * @param[in] action_flags
3767  *   Holds the actions detected until now.
3768  * @param[in] action
3769  *   Pointer to the modify action.
3770  * @param[in] item_flags
3771  *   Holds the items detected.
3772  * @param[out] error
3773  *   Pointer to error structure.
3774  *
3775  * @return
3776  *   0 on success, a negative errno value otherwise and rte_errno is set.
3777  */
3778 static int
3779 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3780                                   const struct rte_flow_action *action,
3781                                   const uint64_t item_flags,
3782                                   struct rte_flow_error *error)
3783 {
3784         int ret = 0;
3785         uint64_t layer;
3786
3787         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3788         if (!ret) {
3789                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3790                                  MLX5_FLOW_LAYER_INNER_L4 :
3791                                  MLX5_FLOW_LAYER_OUTER_L4;
3792                 if (!(item_flags & layer))
3793                         return rte_flow_error_set(error, EINVAL,
3794                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3795                                                   NULL, "no transport layer "
3796                                                   "in pattern");
3797         }
3798         return ret;
3799 }
3800
3801 /**
3802  * Validate the modify-header actions of increment/decrement
3803  * TCP Sequence-number.
3804  *
3805  * @param[in] action_flags
3806  *   Holds the actions detected until now.
3807  * @param[in] action
3808  *   Pointer to the modify action.
3809  * @param[in] item_flags
3810  *   Holds the items detected.
3811  * @param[out] error
3812  *   Pointer to error structure.
3813  *
3814  * @return
3815  *   0 on success, a negative errno value otherwise and rte_errno is set.
3816  */
3817 static int
3818 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3819                                        const struct rte_flow_action *action,
3820                                        const uint64_t item_flags,
3821                                        struct rte_flow_error *error)
3822 {
3823         int ret = 0;
3824         uint64_t layer;
3825
3826         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3827         if (!ret) {
3828                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3829                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3830                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3831                 if (!(item_flags & layer))
3832                         return rte_flow_error_set(error, EINVAL,
3833                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3834                                                   NULL, "no TCP item in"
3835                                                   " pattern");
3836                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3837                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3838                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3839                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3840                         return rte_flow_error_set(error, EINVAL,
3841                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3842                                                   NULL,
3843                                                   "cannot decrease and increase"
3844                                                   " TCP sequence number"
3845                                                   " at the same time");
3846         }
3847         return ret;
3848 }
3849
3850 /**
3851  * Validate the modify-header actions of increment/decrement
3852  * TCP Acknowledgment number.
3853  *
3854  * @param[in] action_flags
3855  *   Holds the actions detected until now.
3856  * @param[in] action
3857  *   Pointer to the modify action.
3858  * @param[in] item_flags
3859  *   Holds the items detected.
3860  * @param[out] error
3861  *   Pointer to error structure.
3862  *
3863  * @return
3864  *   0 on success, a negative errno value otherwise and rte_errno is set.
3865  */
3866 static int
3867 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3868                                        const struct rte_flow_action *action,
3869                                        const uint64_t item_flags,
3870                                        struct rte_flow_error *error)
3871 {
3872         int ret = 0;
3873         uint64_t layer;
3874
3875         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3876         if (!ret) {
3877                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3878                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3879                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3880                 if (!(item_flags & layer))
3881                         return rte_flow_error_set(error, EINVAL,
3882                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3883                                                   NULL, "no TCP item in"
3884                                                   " pattern");
3885                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3886                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3887                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3888                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3889                         return rte_flow_error_set(error, EINVAL,
3890                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3891                                                   NULL,
3892                                                   "cannot decrease and increase"
3893                                                   " TCP acknowledgment number"
3894                                                   " at the same time");
3895         }
3896         return ret;
3897 }
3898
3899 /**
3900  * Validate the modify-header TTL actions.
3901  *
3902  * @param[in] action_flags
3903  *   Holds the actions detected until now.
3904  * @param[in] action
3905  *   Pointer to the modify action.
3906  * @param[in] item_flags
3907  *   Holds the items detected.
3908  * @param[out] error
3909  *   Pointer to error structure.
3910  *
3911  * @return
3912  *   0 on success, a negative errno value otherwise and rte_errno is set.
3913  */
3914 static int
3915 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3916                                    const struct rte_flow_action *action,
3917                                    const uint64_t item_flags,
3918                                    struct rte_flow_error *error)
3919 {
3920         int ret = 0;
3921         uint64_t layer;
3922
3923         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3924         if (!ret) {
3925                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3926                                  MLX5_FLOW_LAYER_INNER_L3 :
3927                                  MLX5_FLOW_LAYER_OUTER_L3;
3928                 if (!(item_flags & layer))
3929                         return rte_flow_error_set(error, EINVAL,
3930                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3931                                                   NULL,
3932                                                   "no IP protocol in pattern");
3933         }
3934         return ret;
3935 }
3936
3937 /**
3938  * Validate jump action.
3939  *
3940  * @param[in] action
3941  *   Pointer to the jump action.
3942  * @param[in] action_flags
3943  *   Holds the actions detected until now.
3944  * @param[in] attributes
3945  *   Pointer to flow attributes
3946  * @param[in] external
3947  *   Action belongs to flow rule created by request external to PMD.
3948  * @param[out] error
3949  *   Pointer to error structure.
3950  *
3951  * @return
3952  *   0 on success, a negative errno value otherwise and rte_errno is set.
3953  */
3954 static int
3955 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3956                              const struct mlx5_flow_tunnel *tunnel,
3957                              const struct rte_flow_action *action,
3958                              uint64_t action_flags,
3959                              const struct rte_flow_attr *attributes,
3960                              bool external, struct rte_flow_error *error)
3961 {
3962         uint32_t target_group, table;
3963         int ret = 0;
3964         struct flow_grp_info grp_info = {
3965                 .external = !!external,
3966                 .transfer = !!attributes->transfer,
3967                 .fdb_def_rule = 1,
3968                 .std_tbl_fix = 0
3969         };
3970         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3971                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3972                 return rte_flow_error_set(error, EINVAL,
3973                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3974                                           "can't have 2 fate actions in"
3975                                           " same flow");
3976         if (action_flags & MLX5_FLOW_ACTION_METER)
3977                 return rte_flow_error_set(error, ENOTSUP,
3978                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3979                                           "jump with meter not support");
3980         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3981                 return rte_flow_error_set(error, EINVAL,
3982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3983                                           "E-Switch mirroring can't support"
3984                                           " Sample action and jump action in"
3985                                           " same flow now");
3986         if (!action->conf)
3987                 return rte_flow_error_set(error, EINVAL,
3988                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3989                                           NULL, "action configuration not set");
3990         target_group =
3991                 ((const struct rte_flow_action_jump *)action->conf)->group;
3992         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3993                                        grp_info, error);
3994         if (ret)
3995                 return ret;
3996         if (attributes->group == target_group &&
3997             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3998                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3999                 return rte_flow_error_set(error, EINVAL,
4000                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4001                                           "target group must be other than"
4002                                           " the current flow group");
4003         return 0;
4004 }
4005
4006 /*
4007  * Validate the port_id action.
4008  *
4009  * @param[in] dev
4010  *   Pointer to rte_eth_dev structure.
4011  * @param[in] action_flags
4012  *   Bit-fields that holds the actions detected until now.
4013  * @param[in] action
4014  *   Port_id RTE action structure.
4015  * @param[in] attr
4016  *   Attributes of flow that includes this action.
4017  * @param[out] error
4018  *   Pointer to error structure.
4019  *
4020  * @return
4021  *   0 on success, a negative errno value otherwise and rte_errno is set.
4022  */
4023 static int
4024 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4025                                 uint64_t action_flags,
4026                                 const struct rte_flow_action *action,
4027                                 const struct rte_flow_attr *attr,
4028                                 struct rte_flow_error *error)
4029 {
4030         const struct rte_flow_action_port_id *port_id;
4031         struct mlx5_priv *act_priv;
4032         struct mlx5_priv *dev_priv;
4033         uint16_t port;
4034
4035         if (!attr->transfer)
4036                 return rte_flow_error_set(error, ENOTSUP,
4037                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4038                                           NULL,
4039                                           "port id action is valid in transfer"
4040                                           " mode only");
4041         if (!action || !action->conf)
4042                 return rte_flow_error_set(error, ENOTSUP,
4043                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4044                                           NULL,
4045                                           "port id action parameters must be"
4046                                           " specified");
4047         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4048                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4049                 return rte_flow_error_set(error, EINVAL,
4050                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4051                                           "can have only one fate actions in"
4052                                           " a flow");
4053         dev_priv = mlx5_dev_to_eswitch_info(dev);
4054         if (!dev_priv)
4055                 return rte_flow_error_set(error, rte_errno,
4056                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4057                                           NULL,
4058                                           "failed to obtain E-Switch info");
4059         port_id = action->conf;
4060         port = port_id->original ? dev->data->port_id : port_id->id;
4061         act_priv = mlx5_port_to_eswitch_info(port, false);
4062         if (!act_priv)
4063                 return rte_flow_error_set
4064                                 (error, rte_errno,
4065                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4066                                  "failed to obtain E-Switch port id for port");
4067         if (act_priv->domain_id != dev_priv->domain_id)
4068                 return rte_flow_error_set
4069                                 (error, EINVAL,
4070                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4071                                  "port does not belong to"
4072                                  " E-Switch being configured");
4073         return 0;
4074 }
4075
4076 /**
4077  * Get the maximum number of modify header actions.
4078  *
4079  * @param dev
4080  *   Pointer to rte_eth_dev structure.
4081  * @param flags
4082  *   Flags bits to check if root level.
4083  *
4084  * @return
4085  *   Max number of modify header actions device can support.
4086  */
4087 static inline unsigned int
4088 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4089                               uint64_t flags)
4090 {
4091         /*
4092          * There's no way to directly query the max capacity from FW.
4093          * The maximal value on root table should be assumed to be supported.
4094          */
4095         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4096                 return MLX5_MAX_MODIFY_NUM;
4097         else
4098                 return MLX5_ROOT_TBL_MODIFY_NUM;
4099 }
4100
4101 /**
4102  * Validate the meter action.
4103  *
4104  * @param[in] dev
4105  *   Pointer to rte_eth_dev structure.
4106  * @param[in] action_flags
4107  *   Bit-fields that holds the actions detected until now.
4108  * @param[in] action
4109  *   Pointer to the meter action.
4110  * @param[in] attr
4111  *   Attributes of flow that includes this action.
4112  * @param[out] error
4113  *   Pointer to error structure.
4114  *
4115  * @return
4116  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4117  */
4118 static int
4119 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4120                                 uint64_t action_flags,
4121                                 const struct rte_flow_action *action,
4122                                 const struct rte_flow_attr *attr,
4123                                 struct rte_flow_error *error)
4124 {
4125         struct mlx5_priv *priv = dev->data->dev_private;
4126         const struct rte_flow_action_meter *am = action->conf;
4127         struct mlx5_flow_meter *fm;
4128
4129         if (!am)
4130                 return rte_flow_error_set(error, EINVAL,
4131                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4132                                           "meter action conf is NULL");
4133
4134         if (action_flags & MLX5_FLOW_ACTION_METER)
4135                 return rte_flow_error_set(error, ENOTSUP,
4136                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4137                                           "meter chaining not support");
4138         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4139                 return rte_flow_error_set(error, ENOTSUP,
4140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4141                                           "meter with jump not support");
4142         if (!priv->mtr_en)
4143                 return rte_flow_error_set(error, ENOTSUP,
4144                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4145                                           NULL,
4146                                           "meter action not supported");
4147         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4148         if (!fm)
4149                 return rte_flow_error_set(error, EINVAL,
4150                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4151                                           "Meter not found");
4152         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4153               (!fm->ingress && !attr->ingress && attr->egress) ||
4154               (!fm->egress && !attr->egress && attr->ingress))))
4155                 return rte_flow_error_set(error, EINVAL,
4156                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4157                                           "Flow attributes are either invalid "
4158                                           "or have a conflict with current "
4159                                           "meter attributes");
4160         return 0;
4161 }
4162
4163 /**
4164  * Validate the age action.
4165  *
4166  * @param[in] action_flags
4167  *   Holds the actions detected until now.
4168  * @param[in] action
4169  *   Pointer to the age action.
4170  * @param[in] dev
4171  *   Pointer to the Ethernet device structure.
4172  * @param[out] error
4173  *   Pointer to error structure.
4174  *
4175  * @return
4176  *   0 on success, a negative errno value otherwise and rte_errno is set.
4177  */
4178 static int
4179 flow_dv_validate_action_age(uint64_t action_flags,
4180                             const struct rte_flow_action *action,
4181                             struct rte_eth_dev *dev,
4182                             struct rte_flow_error *error)
4183 {
4184         struct mlx5_priv *priv = dev->data->dev_private;
4185         const struct rte_flow_action_age *age = action->conf;
4186
4187         if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4188                 return rte_flow_error_set(error, ENOTSUP,
4189                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4190                                           NULL,
4191                                           "age action not supported");
4192         if (!(action->conf))
4193                 return rte_flow_error_set(error, EINVAL,
4194                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4195                                           "configuration cannot be null");
4196         if (!(age->timeout))
4197                 return rte_flow_error_set(error, EINVAL,
4198                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4199                                           "invalid timeout value 0");
4200         if (action_flags & MLX5_FLOW_ACTION_AGE)
4201                 return rte_flow_error_set(error, EINVAL,
4202                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4203                                           "duplicate age actions set");
4204         return 0;
4205 }
4206
4207 /**
4208  * Validate the modify-header IPv4 DSCP actions.
4209  *
4210  * @param[in] action_flags
4211  *   Holds the actions detected until now.
4212  * @param[in] action
4213  *   Pointer to the modify action.
4214  * @param[in] item_flags
4215  *   Holds the items detected.
4216  * @param[out] error
4217  *   Pointer to error structure.
4218  *
4219  * @return
4220  *   0 on success, a negative errno value otherwise and rte_errno is set.
4221  */
4222 static int
4223 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4224                                          const struct rte_flow_action *action,
4225                                          const uint64_t item_flags,
4226                                          struct rte_flow_error *error)
4227 {
4228         int ret = 0;
4229
4230         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4231         if (!ret) {
4232                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4233                         return rte_flow_error_set(error, EINVAL,
4234                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4235                                                   NULL,
4236                                                   "no ipv4 item in pattern");
4237         }
4238         return ret;
4239 }
4240
4241 /**
4242  * Validate the modify-header IPv6 DSCP actions.
4243  *
4244  * @param[in] action_flags
4245  *   Holds the actions detected until now.
4246  * @param[in] action
4247  *   Pointer to the modify action.
4248  * @param[in] item_flags
4249  *   Holds the items detected.
4250  * @param[out] error
4251  *   Pointer to error structure.
4252  *
4253  * @return
4254  *   0 on success, a negative errno value otherwise and rte_errno is set.
4255  */
4256 static int
4257 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4258                                          const struct rte_flow_action *action,
4259                                          const uint64_t item_flags,
4260                                          struct rte_flow_error *error)
4261 {
4262         int ret = 0;
4263
4264         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4265         if (!ret) {
4266                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4267                         return rte_flow_error_set(error, EINVAL,
4268                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4269                                                   NULL,
4270                                                   "no ipv6 item in pattern");
4271         }
4272         return ret;
4273 }
4274
4275 /**
4276  * Match modify-header resource.
4277  *
4278  * @param entry
4279  *   Pointer to exist resource entry object.
4280  * @param ctx
4281  *   Pointer to new modify-header resource.
4282  *
4283  * @return
4284  *   0 on matching, -1 otherwise.
4285  */
4286 static int
4287 flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
4288 {
4289         struct mlx5_flow_dv_modify_hdr_resource *resource;
4290         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
4291         uint32_t actions_len;
4292
4293         resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx;
4294         cache_resource = container_of(entry,
4295                                       struct mlx5_flow_dv_modify_hdr_resource,
4296                                       entry);
4297         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4298         if (resource->entry.key == cache_resource->entry.key &&
4299             resource->ft_type == cache_resource->ft_type &&
4300             resource->actions_num == cache_resource->actions_num &&
4301             resource->flags == cache_resource->flags &&
4302             !memcmp((const void *)resource->actions,
4303                     (const void *)cache_resource->actions,
4304                     actions_len))
4305                 return 0;
4306         return -1;
4307 }
4308
4309 /**
4310  * Validate the sample action.
4311  *
4312  * @param[in] action_flags
4313  *   Holds the actions detected until now.
4314  * @param[in] action
4315  *   Pointer to the sample action.
4316  * @param[in] dev
4317  *   Pointer to the Ethernet device structure.
4318  * @param[in] attr
4319  *   Attributes of flow that includes this action.
4320  * @param[out] error
4321  *   Pointer to error structure.
4322  *
4323  * @return
4324  *   0 on success, a negative errno value otherwise and rte_errno is set.
4325  */
4326 static int
4327 flow_dv_validate_action_sample(uint64_t action_flags,
4328                                const struct rte_flow_action *action,
4329                                struct rte_eth_dev *dev,
4330                                const struct rte_flow_attr *attr,
4331                                struct rte_flow_error *error)
4332 {
4333         struct mlx5_priv *priv = dev->data->dev_private;
4334         struct mlx5_dev_config *dev_conf = &priv->config;
4335         const struct rte_flow_action_sample *sample = action->conf;
4336         const struct rte_flow_action *act;
4337         uint64_t sub_action_flags = 0;
4338         uint16_t queue_index = 0xFFFF;
4339         int actions_n = 0;
4340         int ret;
4341         fdb_mirror = 0;
4342
4343         if (!sample)
4344                 return rte_flow_error_set(error, EINVAL,
4345                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4346                                           "configuration cannot be NULL");
4347         if (sample->ratio == 0)
4348                 return rte_flow_error_set(error, EINVAL,
4349                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4350                                           "ratio value starts from 1");
4351         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4352                 return rte_flow_error_set(error, ENOTSUP,
4353                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4354                                           NULL,
4355                                           "sample action not supported");
4356         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4357                 return rte_flow_error_set(error, EINVAL,
4358                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4359                                           "Multiple sample actions not "
4360                                           "supported");
4361         if (action_flags & MLX5_FLOW_ACTION_METER)
4362                 return rte_flow_error_set(error, EINVAL,
4363                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4364                                           "wrong action order, meter should "
4365                                           "be after sample action");
4366         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4367                 return rte_flow_error_set(error, EINVAL,
4368                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4369                                           "wrong action order, jump should "
4370                                           "be after sample action");
4371         act = sample->actions;
4372         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4373                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4374                         return rte_flow_error_set(error, ENOTSUP,
4375                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4376                                                   act, "too many actions");
4377                 switch (act->type) {
4378                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4379                         ret = mlx5_flow_validate_action_queue(act,
4380                                                               sub_action_flags,
4381                                                               dev,
4382                                                               attr, error);
4383                         if (ret < 0)
4384                                 return ret;
4385                         queue_index = ((const struct rte_flow_action_queue *)
4386                                                         (act->conf))->index;
4387                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4388                         ++actions_n;
4389                         break;
4390                 case RTE_FLOW_ACTION_TYPE_MARK:
4391                         ret = flow_dv_validate_action_mark(dev, act,
4392                                                            sub_action_flags,
4393                                                            attr, error);
4394                         if (ret < 0)
4395                                 return ret;
4396                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4397                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4398                                                 MLX5_FLOW_ACTION_MARK_EXT;
4399                         else
4400                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4401                         ++actions_n;
4402                         break;
4403                 case RTE_FLOW_ACTION_TYPE_COUNT:
4404                         ret = flow_dv_validate_action_count(dev, error);
4405                         if (ret < 0)
4406                                 return ret;
4407                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4408                         ++actions_n;
4409                         break;
4410                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4411                         ret = flow_dv_validate_action_port_id(dev,
4412                                                               sub_action_flags,
4413                                                               act,
4414                                                               attr,
4415                                                               error);
4416                         if (ret)
4417                                 return ret;
4418                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4419                         ++actions_n;
4420                         break;
4421                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4422                         ret = flow_dv_validate_action_raw_encap_decap
4423                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4424                                  &actions_n, error);
4425                         if (ret < 0)
4426                                 return ret;
4427                         ++actions_n;
4428                         break;
4429                 default:
4430                         return rte_flow_error_set(error, ENOTSUP,
4431                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4432                                                   NULL,
4433                                                   "Doesn't support optional "
4434                                                   "action");
4435                 }
4436         }
4437         if (attr->ingress && !attr->transfer) {
4438                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4439                         return rte_flow_error_set(error, EINVAL,
4440                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4441                                                   NULL,
4442                                                   "Ingress must has a dest "
4443                                                   "QUEUE for Sample");
4444         } else if (attr->egress && !attr->transfer) {
4445                 return rte_flow_error_set(error, ENOTSUP,
4446                                           RTE_FLOW_ERROR_TYPE_ACTION,
4447                                           NULL,
4448                                           "Sample Only support Ingress "
4449                                           "or E-Switch");
4450         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4451                 MLX5_ASSERT(attr->transfer);
4452                 if (sample->ratio > 1)
4453                         return rte_flow_error_set(error, ENOTSUP,
4454                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4455                                                   NULL,
4456                                                   "E-Switch doesn't support "
4457                                                   "any optional action "
4458                                                   "for sampling");
4459                 fdb_mirror = 1;
4460                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4461                         return rte_flow_error_set(error, ENOTSUP,
4462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4463                                                   NULL,
4464                                                   "unsupported action QUEUE");
4465                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4466                         return rte_flow_error_set(error, EINVAL,
4467                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4468                                                   NULL,
4469                                                   "E-Switch must has a dest "
4470                                                   "port for mirroring");
4471         }
4472         /* Continue validation for Xcap actions.*/
4473         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4474             (queue_index == 0xFFFF ||
4475              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4476                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4477                      MLX5_FLOW_XCAP_ACTIONS)
4478                         return rte_flow_error_set(error, ENOTSUP,
4479                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4480                                                   NULL, "encap and decap "
4481                                                   "combination aren't "
4482                                                   "supported");
4483                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4484                                                         MLX5_FLOW_ACTION_ENCAP))
4485                         return rte_flow_error_set(error, ENOTSUP,
4486                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4487                                                   NULL, "encap is not supported"
4488                                                   " for ingress traffic");
4489         }
4490         return 0;
4491 }
4492
4493 /**
4494  * Find existing modify-header resource or create and register a new one.
4495  *
4496  * @param dev[in, out]
4497  *   Pointer to rte_eth_dev structure.
4498  * @param[in, out] resource
4499  *   Pointer to modify-header resource.
4500  * @parm[in, out] dev_flow
4501  *   Pointer to the dev_flow.
4502  * @param[out] error
4503  *   pointer to error structure.
4504  *
4505  * @return
4506  *   0 on success otherwise -errno and errno is set.
4507  */
4508 static int
4509 flow_dv_modify_hdr_resource_register
4510                         (struct rte_eth_dev *dev,
4511                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4512                          struct mlx5_flow *dev_flow,
4513                          struct rte_flow_error *error)
4514 {
4515         struct mlx5_priv *priv = dev->data->dev_private;
4516         struct mlx5_dev_ctx_shared *sh = priv->sh;
4517         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
4518         struct mlx5dv_dr_domain *ns;
4519         uint32_t actions_len;
4520         struct mlx5_hlist_entry *entry;
4521         union mlx5_flow_modify_hdr_key hdr_mod_key = {
4522                 {
4523                         .ft_type = resource->ft_type,
4524                         .actions_num = resource->actions_num,
4525                         .group = dev_flow->dv.group,
4526                         .cksum = 0,
4527                 }
4528         };
4529         int ret;
4530
4531         resource->flags = dev_flow->dv.group ? 0 :
4532                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4533         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4534                                     resource->flags))
4535                 return rte_flow_error_set(error, EOVERFLOW,
4536                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4537                                           "too many modify header items");
4538         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4539                 ns = sh->fdb_domain;
4540         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4541                 ns = sh->tx_domain;
4542         else
4543                 ns = sh->rx_domain;
4544         /* Lookup a matching resource from cache. */
4545         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4546         hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0);
4547         resource->entry.key = hdr_mod_key.v64;
4548         entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key,
4549                                      flow_dv_modify_hdr_resource_match,
4550                                      (void *)resource);
4551         if (entry) {
4552                 cache_resource = container_of(entry,
4553                                         struct mlx5_flow_dv_modify_hdr_resource,
4554                                         entry);
4555                 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
4556                         (void *)cache_resource,
4557                         __atomic_load_n(&cache_resource->refcnt,
4558                                         __ATOMIC_RELAXED));
4559                 __atomic_fetch_add(&cache_resource->refcnt, 1,
4560                                    __ATOMIC_RELAXED);
4561                 dev_flow->handle->dvh.modify_hdr = cache_resource;
4562                 return 0;
4563
4564         }
4565         /* Register new modify-header resource. */
4566         cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
4567                                     sizeof(*cache_resource) + actions_len, 0,
4568                                     SOCKET_ID_ANY);
4569         if (!cache_resource)
4570                 return rte_flow_error_set(error, ENOMEM,
4571                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4572                                           "cannot allocate resource memory");
4573         *cache_resource = *resource;
4574         rte_memcpy(cache_resource->actions, resource->actions, actions_len);
4575         ret = mlx5_flow_os_create_flow_action_modify_header
4576                                         (sh->ctx, ns, cache_resource,
4577                                          actions_len, &cache_resource->action);
4578         if (ret) {
4579                 mlx5_free(cache_resource);
4580                 return rte_flow_error_set(error, ENOMEM,
4581                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4582                                           NULL, "cannot create action");
4583         }
4584         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
4585         if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry,
4586                                  flow_dv_modify_hdr_resource_match,
4587                                  (void *)cache_resource)) {
4588                 claim_zero(mlx5_flow_os_destroy_flow_action
4589                                                 (cache_resource->action));
4590                 mlx5_free(cache_resource);
4591                 return rte_flow_error_set(error, EEXIST,
4592                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4593                                           NULL, "action exist");
4594         }
4595         dev_flow->handle->dvh.modify_hdr = cache_resource;
4596         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
4597                 (void *)cache_resource,
4598                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
4599         return 0;
4600 }
4601
4602 /**
4603  * Get DV flow counter by index.
4604  *
4605  * @param[in] dev
4606  *   Pointer to the Ethernet device structure.
4607  * @param[in] idx
4608  *   mlx5 flow counter index in the container.
4609  * @param[out] ppool
4610  *   mlx5 flow counter pool in the container,
4611  *
4612  * @return
4613  *   Pointer to the counter, NULL otherwise.
4614  */
4615 static struct mlx5_flow_counter *
4616 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4617                            uint32_t idx,
4618                            struct mlx5_flow_counter_pool **ppool)
4619 {
4620         struct mlx5_priv *priv = dev->data->dev_private;
4621         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4622         struct mlx5_flow_counter_pool *pool;
4623
4624         /* Decrease to original index and clear shared bit. */
4625         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4626         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4627         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4628         MLX5_ASSERT(pool);
4629         if (ppool)
4630                 *ppool = pool;
4631         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4632 }
4633
4634 /**
4635  * Check the devx counter belongs to the pool.
4636  *
4637  * @param[in] pool
4638  *   Pointer to the counter pool.
4639  * @param[in] id
4640  *   The counter devx ID.
4641  *
4642  * @return
4643  *   True if counter belongs to the pool, false otherwise.
4644  */
4645 static bool
4646 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4647 {
4648         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4649                    MLX5_COUNTERS_PER_POOL;
4650
4651         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4652                 return true;
4653         return false;
4654 }
4655
4656 /**
4657  * Get a pool by devx counter ID.
4658  *
4659  * @param[in] cmng
4660  *   Pointer to the counter management.
4661  * @param[in] id
4662  *   The counter devx ID.
4663  *
4664  * @return
4665  *   The counter pool pointer if exists, NULL otherwise,
4666  */
4667 static struct mlx5_flow_counter_pool *
4668 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4669 {
4670         uint32_t i;
4671         struct mlx5_flow_counter_pool *pool = NULL;
4672
4673         rte_spinlock_lock(&cmng->pool_update_sl);
4674         /* Check last used pool. */
4675         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4676             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4677                 pool = cmng->pools[cmng->last_pool_idx];
4678                 goto out;
4679         }
4680         /* ID out of range means no suitable pool in the container. */
4681         if (id > cmng->max_id || id < cmng->min_id)
4682                 goto out;
4683         /*
4684          * Find the pool from the end of the container, since mostly counter
4685          * ID is sequence increasing, and the last pool should be the needed
4686          * one.
4687          */
4688         i = cmng->n_valid;
4689         while (i--) {
4690                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4691
4692                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4693                         pool = pool_tmp;
4694                         break;
4695                 }
4696         }
4697 out:
4698         rte_spinlock_unlock(&cmng->pool_update_sl);
4699         return pool;
4700 }
4701
4702 /**
4703  * Resize a counter container.
4704  *
4705  * @param[in] dev
4706  *   Pointer to the Ethernet device structure.
4707  *
4708  * @return
4709  *   0 on success, otherwise negative errno value and rte_errno is set.
4710  */
4711 static int
4712 flow_dv_container_resize(struct rte_eth_dev *dev)
4713 {
4714         struct mlx5_priv *priv = dev->data->dev_private;
4715         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4716         void *old_pools = cmng->pools;
4717         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4718         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4719         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4720
4721         if (!pools) {
4722                 rte_errno = ENOMEM;
4723                 return -ENOMEM;
4724         }
4725         if (old_pools)
4726                 memcpy(pools, old_pools, cmng->n *
4727                                        sizeof(struct mlx5_flow_counter_pool *));
4728         cmng->n = resize;
4729         cmng->pools = pools;
4730         if (old_pools)
4731                 mlx5_free(old_pools);
4732         return 0;
4733 }
4734
4735 /**
4736  * Query a devx flow counter.
4737  *
4738  * @param[in] dev
4739  *   Pointer to the Ethernet device structure.
4740  * @param[in] cnt
4741  *   Index to the flow counter.
4742  * @param[out] pkts
4743  *   The statistics value of packets.
4744  * @param[out] bytes
4745  *   The statistics value of bytes.
4746  *
4747  * @return
4748  *   0 on success, otherwise a negative errno value and rte_errno is set.
4749  */
4750 static inline int
4751 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4752                      uint64_t *bytes)
4753 {
4754         struct mlx5_priv *priv = dev->data->dev_private;
4755         struct mlx5_flow_counter_pool *pool = NULL;
4756         struct mlx5_flow_counter *cnt;
4757         int offset;
4758
4759         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4760         MLX5_ASSERT(pool);
4761         if (priv->sh->cmng.counter_fallback)
4762                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4763                                         0, pkts, bytes, 0, NULL, NULL, 0);
4764         rte_spinlock_lock(&pool->sl);
4765         if (!pool->raw) {
4766                 *pkts = 0;
4767                 *bytes = 0;
4768         } else {
4769                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4770                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4771                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4772         }
4773         rte_spinlock_unlock(&pool->sl);
4774         return 0;
4775 }
4776
4777 /**
4778  * Create and initialize a new counter pool.
4779  *
4780  * @param[in] dev
4781  *   Pointer to the Ethernet device structure.
4782  * @param[out] dcs
4783  *   The devX counter handle.
4784  * @param[in] age
4785  *   Whether the pool is for counter that was allocated for aging.
4786  * @param[in/out] cont_cur
4787  *   Pointer to the container pointer, it will be update in pool resize.
4788  *
4789  * @return
4790  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4791  */
4792 static struct mlx5_flow_counter_pool *
4793 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4794                     uint32_t age)
4795 {
4796         struct mlx5_priv *priv = dev->data->dev_private;
4797         struct mlx5_flow_counter_pool *pool;
4798         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4799         bool fallback = priv->sh->cmng.counter_fallback;
4800         uint32_t size = sizeof(*pool);
4801
4802         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4803         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4804         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4805         if (!pool) {
4806                 rte_errno = ENOMEM;
4807                 return NULL;
4808         }
4809         pool->raw = NULL;
4810         pool->is_aged = !!age;
4811         pool->query_gen = 0;
4812         pool->min_dcs = dcs;
4813         rte_spinlock_init(&pool->sl);
4814         rte_spinlock_init(&pool->csl);
4815         TAILQ_INIT(&pool->counters[0]);
4816         TAILQ_INIT(&pool->counters[1]);
4817         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4818         rte_spinlock_lock(&cmng->pool_update_sl);
4819         pool->index = cmng->n_valid;
4820         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4821                 mlx5_free(pool);
4822                 rte_spinlock_unlock(&cmng->pool_update_sl);
4823                 return NULL;
4824         }
4825         cmng->pools[pool->index] = pool;
4826         cmng->n_valid++;
4827         if (unlikely(fallback)) {
4828                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4829
4830                 if (base < cmng->min_id)
4831                         cmng->min_id = base;
4832                 if (base > cmng->max_id)
4833                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4834                 cmng->last_pool_idx = pool->index;
4835         }
4836         rte_spinlock_unlock(&cmng->pool_update_sl);
4837         return pool;
4838 }
4839
4840 /**
4841  * Prepare a new counter and/or a new counter pool.
4842  *
4843  * @param[in] dev
4844  *   Pointer to the Ethernet device structure.
4845  * @param[out] cnt_free
4846  *   Where to put the pointer of a new counter.
4847  * @param[in] age
4848  *   Whether the pool is for counter that was allocated for aging.
4849  *
4850  * @return
4851  *   The counter pool pointer and @p cnt_free is set on success,
4852  *   NULL otherwise and rte_errno is set.
4853  */
4854 static struct mlx5_flow_counter_pool *
4855 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4856                              struct mlx5_flow_counter **cnt_free,
4857                              uint32_t age)
4858 {
4859         struct mlx5_priv *priv = dev->data->dev_private;
4860         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4861         struct mlx5_flow_counter_pool *pool;
4862         struct mlx5_counters tmp_tq;
4863         struct mlx5_devx_obj *dcs = NULL;
4864         struct mlx5_flow_counter *cnt;
4865         enum mlx5_counter_type cnt_type =
4866                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4867         bool fallback = priv->sh->cmng.counter_fallback;
4868         uint32_t i;
4869
4870         if (fallback) {
4871                 /* bulk_bitmap must be 0 for single counter allocation. */
4872                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4873                 if (!dcs)
4874                         return NULL;
4875                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4876                 if (!pool) {
4877                         pool = flow_dv_pool_create(dev, dcs, age);
4878                         if (!pool) {
4879                                 mlx5_devx_cmd_destroy(dcs);
4880                                 return NULL;
4881                         }
4882                 }
4883                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4884                 cnt = MLX5_POOL_GET_CNT(pool, i);
4885                 cnt->pool = pool;
4886                 cnt->dcs_when_free = dcs;
4887                 *cnt_free = cnt;
4888                 return pool;
4889         }
4890         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4891         if (!dcs) {
4892                 rte_errno = ENODATA;
4893                 return NULL;
4894         }
4895         pool = flow_dv_pool_create(dev, dcs, age);
4896         if (!pool) {
4897                 mlx5_devx_cmd_destroy(dcs);
4898                 return NULL;
4899         }
4900         TAILQ_INIT(&tmp_tq);
4901         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4902                 cnt = MLX5_POOL_GET_CNT(pool, i);
4903                 cnt->pool = pool;
4904                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4905         }
4906         rte_spinlock_lock(&cmng->csl[cnt_type]);
4907         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4908         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4909         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4910         (*cnt_free)->pool = pool;
4911         return pool;
4912 }
4913
4914 /**
4915  * Allocate a flow counter.
4916  *
4917  * @param[in] dev
4918  *   Pointer to the Ethernet device structure.
4919  * @param[in] age
4920  *   Whether the counter was allocated for aging.
4921  *
4922  * @return
4923  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4924  */
4925 static uint32_t
4926 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4927 {
4928         struct mlx5_priv *priv = dev->data->dev_private;
4929         struct mlx5_flow_counter_pool *pool = NULL;
4930         struct mlx5_flow_counter *cnt_free = NULL;
4931         bool fallback = priv->sh->cmng.counter_fallback;
4932         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4933         enum mlx5_counter_type cnt_type =
4934                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4935         uint32_t cnt_idx;
4936
4937         if (!priv->config.devx) {
4938                 rte_errno = ENOTSUP;
4939                 return 0;
4940         }
4941         /* Get free counters from container. */
4942         rte_spinlock_lock(&cmng->csl[cnt_type]);
4943         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4944         if (cnt_free)
4945                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4946         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4947         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4948                 goto err;
4949         pool = cnt_free->pool;
4950         if (fallback)
4951                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4952         /* Create a DV counter action only in the first time usage. */
4953         if (!cnt_free->action) {
4954                 uint16_t offset;
4955                 struct mlx5_devx_obj *dcs;
4956                 int ret;
4957
4958                 if (!fallback) {
4959                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4960                         dcs = pool->min_dcs;
4961                 } else {
4962                         offset = 0;
4963                         dcs = cnt_free->dcs_when_free;
4964                 }
4965                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4966                                                             &cnt_free->action);
4967                 if (ret) {
4968                         rte_errno = errno;
4969                         goto err;
4970                 }
4971         }
4972         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4973                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4974         /* Update the counter reset values. */
4975         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4976                                  &cnt_free->bytes))
4977                 goto err;
4978         if (!fallback && !priv->sh->cmng.query_thread_on)
4979                 /* Start the asynchronous batch query by the host thread. */
4980                 mlx5_set_query_alarm(priv->sh);
4981         return cnt_idx;
4982 err:
4983         if (cnt_free) {
4984                 cnt_free->pool = pool;
4985                 if (fallback)
4986                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4987                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4988                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4989                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4990         }
4991         return 0;
4992 }
4993
4994 /**
4995  * Allocate a shared flow counter.
4996  *
4997  * @param[in] ctx
4998  *   Pointer to the shared counter configuration.
4999  * @param[in] data
5000  *   Pointer to save the allocated counter index.
5001  *
5002  * @return
5003  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5004  */
5005
5006 static int32_t
5007 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5008 {
5009         struct mlx5_shared_counter_conf *conf = ctx;
5010         struct rte_eth_dev *dev = conf->dev;
5011         struct mlx5_flow_counter *cnt;
5012
5013         data->dword = flow_dv_counter_alloc(dev, 0);
5014         data->dword |= MLX5_CNT_SHARED_OFFSET;
5015         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5016         cnt->shared_info.id = conf->id;
5017         return 0;
5018 }
5019
5020 /**
5021  * Get a shared flow counter.
5022  *
5023  * @param[in] dev
5024  *   Pointer to the Ethernet device structure.
5025  * @param[in] id
5026  *   Counter identifier.
5027  *
5028  * @return
5029  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5030  */
5031 static uint32_t
5032 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5033 {
5034         struct mlx5_priv *priv = dev->data->dev_private;
5035         struct mlx5_shared_counter_conf conf = {
5036                 .dev = dev,
5037                 .id = id,
5038         };
5039         union mlx5_l3t_data data = {
5040                 .dword = 0,
5041         };
5042
5043         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5044                                flow_dv_counter_alloc_shared_cb, &conf);
5045         return data.dword;
5046 }
5047
5048 /**
5049  * Get age param from counter index.
5050  *
5051  * @param[in] dev
5052  *   Pointer to the Ethernet device structure.
5053  * @param[in] counter
5054  *   Index to the counter handler.
5055  *
5056  * @return
5057  *   The aging parameter specified for the counter index.
5058  */
5059 static struct mlx5_age_param*
5060 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5061                                 uint32_t counter)
5062 {
5063         struct mlx5_flow_counter *cnt;
5064         struct mlx5_flow_counter_pool *pool = NULL;
5065
5066         flow_dv_counter_get_by_idx(dev, counter, &pool);
5067         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5068         cnt = MLX5_POOL_GET_CNT(pool, counter);
5069         return MLX5_CNT_TO_AGE(cnt);
5070 }
5071
5072 /**
5073  * Remove a flow counter from aged counter list.
5074  *
5075  * @param[in] dev
5076  *   Pointer to the Ethernet device structure.
5077  * @param[in] counter
5078  *   Index to the counter handler.
5079  * @param[in] cnt
5080  *   Pointer to the counter handler.
5081  */
5082 static void
5083 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5084                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5085 {
5086         struct mlx5_age_info *age_info;
5087         struct mlx5_age_param *age_param;
5088         struct mlx5_priv *priv = dev->data->dev_private;
5089         uint16_t expected = AGE_CANDIDATE;
5090
5091         age_info = GET_PORT_AGE_INFO(priv);
5092         age_param = flow_dv_counter_idx_get_age(dev, counter);
5093         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5094                                          AGE_FREE, false, __ATOMIC_RELAXED,
5095                                          __ATOMIC_RELAXED)) {
5096                 /**
5097                  * We need the lock even it is age timeout,
5098                  * since counter may still in process.
5099                  */
5100                 rte_spinlock_lock(&age_info->aged_sl);
5101                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5102                 rte_spinlock_unlock(&age_info->aged_sl);
5103                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5104         }
5105 }
5106
5107 /**
5108  * Release a flow counter.
5109  *
5110  * @param[in] dev
5111  *   Pointer to the Ethernet device structure.
5112  * @param[in] counter
5113  *   Index to the counter handler.
5114  */
5115 static void
5116 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5117 {
5118         struct mlx5_priv *priv = dev->data->dev_private;
5119         struct mlx5_flow_counter_pool *pool = NULL;
5120         struct mlx5_flow_counter *cnt;
5121         enum mlx5_counter_type cnt_type;
5122
5123         if (!counter)
5124                 return;
5125         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5126         MLX5_ASSERT(pool);
5127         if (IS_SHARED_CNT(counter) &&
5128             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5129                 return;
5130         if (pool->is_aged)
5131                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5132         cnt->pool = pool;
5133         /*
5134          * Put the counter back to list to be updated in none fallback mode.
5135          * Currently, we are using two list alternately, while one is in query,
5136          * add the freed counter to the other list based on the pool query_gen
5137          * value. After query finishes, add counter the list to the global
5138          * container counter list. The list changes while query starts. In
5139          * this case, lock will not be needed as query callback and release
5140          * function both operate with the different list.
5141          *
5142          */
5143         if (!priv->sh->cmng.counter_fallback) {
5144                 rte_spinlock_lock(&pool->csl);
5145                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5146                 rte_spinlock_unlock(&pool->csl);
5147         } else {
5148                 cnt->dcs_when_free = cnt->dcs_when_active;
5149                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5150                                            MLX5_COUNTER_TYPE_ORIGIN;
5151                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5152                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5153                                   cnt, next);
5154                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5155         }
5156 }
5157
5158 /**
5159  * Verify the @p attributes will be correctly understood by the NIC and store
5160  * them in the @p flow if everything is correct.
5161  *
5162  * @param[in] dev
5163  *   Pointer to dev struct.
5164  * @param[in] attributes
5165  *   Pointer to flow attributes
5166  * @param[in] external
5167  *   This flow rule is created by request external to PMD.
5168  * @param[out] error
5169  *   Pointer to error structure.
5170  *
5171  * @return
5172  *   - 0 on success and non root table.
5173  *   - 1 on success and root table.
5174  *   - a negative errno value otherwise and rte_errno is set.
5175  */
5176 static int
5177 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5178                             const struct mlx5_flow_tunnel *tunnel,
5179                             const struct rte_flow_attr *attributes,
5180                             struct flow_grp_info grp_info,
5181                             struct rte_flow_error *error)
5182 {
5183         struct mlx5_priv *priv = dev->data->dev_private;
5184         uint32_t priority_max = priv->config.flow_prio - 1;
5185         int ret = 0;
5186
5187 #ifndef HAVE_MLX5DV_DR
5188         RTE_SET_USED(tunnel);
5189         RTE_SET_USED(grp_info);
5190         if (attributes->group)
5191                 return rte_flow_error_set(error, ENOTSUP,
5192                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5193                                           NULL,
5194                                           "groups are not supported");
5195 #else
5196         uint32_t table = 0;
5197
5198         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5199                                        grp_info, error);
5200         if (ret)
5201                 return ret;
5202         if (!table)
5203                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5204 #endif
5205         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5206             attributes->priority >= priority_max)
5207                 return rte_flow_error_set(error, ENOTSUP,
5208                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5209                                           NULL,
5210                                           "priority out of range");
5211         if (attributes->transfer) {
5212                 if (!priv->config.dv_esw_en)
5213                         return rte_flow_error_set
5214                                 (error, ENOTSUP,
5215                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5216                                  "E-Switch dr is not supported");
5217                 if (!(priv->representor || priv->master))
5218                         return rte_flow_error_set
5219                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5220                                  NULL, "E-Switch configuration can only be"
5221                                  " done by a master or a representor device");
5222                 if (attributes->egress)
5223                         return rte_flow_error_set
5224                                 (error, ENOTSUP,
5225                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5226                                  "egress is not supported");
5227         }
5228         if (!(attributes->egress ^ attributes->ingress))
5229                 return rte_flow_error_set(error, ENOTSUP,
5230                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5231                                           "must specify exactly one of "
5232                                           "ingress or egress");
5233         return ret;
5234 }
5235
5236 /**
5237  * Internal validation function. For validating both actions and items.
5238  *
5239  * @param[in] dev
5240  *   Pointer to the rte_eth_dev structure.
5241  * @param[in] attr
5242  *   Pointer to the flow attributes.
5243  * @param[in] items
5244  *   Pointer to the list of items.
5245  * @param[in] actions
5246  *   Pointer to the list of actions.
5247  * @param[in] external
5248  *   This flow rule is created by request external to PMD.
5249  * @param[in] hairpin
5250  *   Number of hairpin TX actions, 0 means classic flow.
5251  * @param[out] error
5252  *   Pointer to the error structure.
5253  *
5254  * @return
5255  *   0 on success, a negative errno value otherwise and rte_errno is set.
5256  */
5257 static int
5258 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5259                  const struct rte_flow_item items[],
5260                  const struct rte_flow_action actions[],
5261                  bool external, int hairpin, struct rte_flow_error *error)
5262 {
5263         int ret;
5264         uint64_t action_flags = 0;
5265         uint64_t item_flags = 0;
5266         uint64_t last_item = 0;
5267         uint8_t next_protocol = 0xff;
5268         uint16_t ether_type = 0;
5269         int actions_n = 0;
5270         uint8_t item_ipv6_proto = 0;
5271         const struct rte_flow_item *gre_item = NULL;
5272         const struct rte_flow_action_raw_decap *decap;
5273         const struct rte_flow_action_raw_encap *encap;
5274         const struct rte_flow_action_rss *rss;
5275         const struct rte_flow_item_tcp nic_tcp_mask = {
5276                 .hdr = {
5277                         .tcp_flags = 0xFF,
5278                         .src_port = RTE_BE16(UINT16_MAX),
5279                         .dst_port = RTE_BE16(UINT16_MAX),
5280                 }
5281         };
5282         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5283                 .hdr = {
5284                         .src_addr =
5285                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5286                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5287                         .dst_addr =
5288                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5289                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5290                         .vtc_flow = RTE_BE32(0xffffffff),
5291                         .proto = 0xff,
5292                         .hop_limits = 0xff,
5293                 },
5294                 .has_frag_ext = 1,
5295         };
5296         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5297                 .hdr = {
5298                         .common = {
5299                                 .u32 =
5300                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5301                                         .type = 0xFF,
5302                                         }).u32),
5303                         },
5304                         .dummy[0] = 0xffffffff,
5305                 },
5306         };
5307         struct mlx5_priv *priv = dev->data->dev_private;
5308         struct mlx5_dev_config *dev_conf = &priv->config;
5309         uint16_t queue_index = 0xFFFF;
5310         const struct rte_flow_item_vlan *vlan_m = NULL;
5311         int16_t rw_act_num = 0;
5312         uint64_t is_root;
5313         const struct mlx5_flow_tunnel *tunnel;
5314         struct flow_grp_info grp_info = {
5315                 .external = !!external,
5316                 .transfer = !!attr->transfer,
5317                 .fdb_def_rule = !!priv->fdb_def_rule,
5318         };
5319         const struct rte_eth_hairpin_conf *conf;
5320
5321         if (items == NULL)
5322                 return -1;
5323         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5324                 tunnel = flow_items_to_tunnel(items);
5325                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5326                                 MLX5_FLOW_ACTION_DECAP;
5327         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5328                 tunnel = flow_actions_to_tunnel(actions);
5329                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5330         } else {
5331                 tunnel = NULL;
5332         }
5333         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5334                                 (dev, tunnel, attr, items, actions);
5335         ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5336         if (ret < 0)
5337                 return ret;
5338         is_root = (uint64_t)ret;
5339         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5340                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5341                 int type = items->type;
5342
5343                 if (!mlx5_flow_os_item_supported(type))
5344                         return rte_flow_error_set(error, ENOTSUP,
5345                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5346                                                   NULL, "item not supported");
5347                 switch (type) {
5348                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5349                         if (items[0].type != (typeof(items[0].type))
5350                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5351                                 return rte_flow_error_set
5352                                                 (error, EINVAL,
5353                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5354                                                 NULL, "MLX5 private items "
5355                                                 "must be the first");
5356                         break;
5357                 case RTE_FLOW_ITEM_TYPE_VOID:
5358                         break;
5359                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5360                         ret = flow_dv_validate_item_port_id
5361                                         (dev, items, attr, item_flags, error);
5362                         if (ret < 0)
5363                                 return ret;
5364                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5365                         break;
5366                 case RTE_FLOW_ITEM_TYPE_ETH:
5367                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5368                                                           true, error);
5369                         if (ret < 0)
5370                                 return ret;
5371                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5372                                              MLX5_FLOW_LAYER_OUTER_L2;
5373                         if (items->mask != NULL && items->spec != NULL) {
5374                                 ether_type =
5375                                         ((const struct rte_flow_item_eth *)
5376                                          items->spec)->type;
5377                                 ether_type &=
5378                                         ((const struct rte_flow_item_eth *)
5379                                          items->mask)->type;
5380                                 ether_type = rte_be_to_cpu_16(ether_type);
5381                         } else {
5382                                 ether_type = 0;
5383                         }
5384                         break;
5385                 case RTE_FLOW_ITEM_TYPE_VLAN:
5386                         ret = flow_dv_validate_item_vlan(items, item_flags,
5387                                                          dev, error);
5388                         if (ret < 0)
5389                                 return ret;
5390                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5391                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5392                         if (items->mask != NULL && items->spec != NULL) {
5393                                 ether_type =
5394                                         ((const struct rte_flow_item_vlan *)
5395                                          items->spec)->inner_type;
5396                                 ether_type &=
5397                                         ((const struct rte_flow_item_vlan *)
5398                                          items->mask)->inner_type;
5399                                 ether_type = rte_be_to_cpu_16(ether_type);
5400                         } else {
5401                                 ether_type = 0;
5402                         }
5403                         /* Store outer VLAN mask for of_push_vlan action. */
5404                         if (!tunnel)
5405                                 vlan_m = items->mask;
5406                         break;
5407                 case RTE_FLOW_ITEM_TYPE_IPV4:
5408                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5409                                                   &item_flags, &tunnel);
5410                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5411                                                          last_item, ether_type,
5412                                                          error);
5413                         if (ret < 0)
5414                                 return ret;
5415                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5416                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5417                         if (items->mask != NULL &&
5418                             ((const struct rte_flow_item_ipv4 *)
5419                              items->mask)->hdr.next_proto_id) {
5420                                 next_protocol =
5421                                         ((const struct rte_flow_item_ipv4 *)
5422                                          (items->spec))->hdr.next_proto_id;
5423                                 next_protocol &=
5424                                         ((const struct rte_flow_item_ipv4 *)
5425                                          (items->mask))->hdr.next_proto_id;
5426                         } else {
5427                                 /* Reset for inner layer. */
5428                                 next_protocol = 0xff;
5429                         }
5430                         break;
5431                 case RTE_FLOW_ITEM_TYPE_IPV6:
5432                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5433                                                   &item_flags, &tunnel);
5434                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5435                                                            last_item,
5436                                                            ether_type,
5437                                                            &nic_ipv6_mask,
5438                                                            error);
5439                         if (ret < 0)
5440                                 return ret;
5441                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5442                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5443                         if (items->mask != NULL &&
5444                             ((const struct rte_flow_item_ipv6 *)
5445                              items->mask)->hdr.proto) {
5446                                 item_ipv6_proto =
5447                                         ((const struct rte_flow_item_ipv6 *)
5448                                          items->spec)->hdr.proto;
5449                                 next_protocol =
5450                                         ((const struct rte_flow_item_ipv6 *)
5451                                          items->spec)->hdr.proto;
5452                                 next_protocol &=
5453                                         ((const struct rte_flow_item_ipv6 *)
5454                                          items->mask)->hdr.proto;
5455                         } else {
5456                                 /* Reset for inner layer. */
5457                                 next_protocol = 0xff;
5458                         }
5459                         break;
5460                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5461                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5462                                                                   item_flags,
5463                                                                   error);
5464                         if (ret < 0)
5465                                 return ret;
5466                         last_item = tunnel ?
5467                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5468                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5469                         if (items->mask != NULL &&
5470                             ((const struct rte_flow_item_ipv6_frag_ext *)
5471                              items->mask)->hdr.next_header) {
5472                                 next_protocol =
5473                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5474                                  items->spec)->hdr.next_header;
5475                                 next_protocol &=
5476                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5477                                  items->mask)->hdr.next_header;
5478                         } else {
5479                                 /* Reset for inner layer. */
5480                                 next_protocol = 0xff;
5481                         }
5482                         break;
5483                 case RTE_FLOW_ITEM_TYPE_TCP:
5484                         ret = mlx5_flow_validate_item_tcp
5485                                                 (items, item_flags,
5486                                                  next_protocol,
5487                                                  &nic_tcp_mask,
5488                                                  error);
5489                         if (ret < 0)
5490                                 return ret;
5491                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5492                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5493                         break;
5494                 case RTE_FLOW_ITEM_TYPE_UDP:
5495                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5496                                                           next_protocol,
5497                                                           error);
5498                         if (ret < 0)
5499                                 return ret;
5500                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5501                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5502                         break;
5503                 case RTE_FLOW_ITEM_TYPE_GRE:
5504                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5505                                                           next_protocol, error);
5506                         if (ret < 0)
5507                                 return ret;
5508                         gre_item = items;
5509                         last_item = MLX5_FLOW_LAYER_GRE;
5510                         break;
5511                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5512                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5513                                                             next_protocol,
5514                                                             error);
5515                         if (ret < 0)
5516                                 return ret;
5517                         last_item = MLX5_FLOW_LAYER_NVGRE;
5518                         break;
5519                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5520                         ret = mlx5_flow_validate_item_gre_key
5521                                 (items, item_flags, gre_item, error);
5522                         if (ret < 0)
5523                                 return ret;
5524                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5525                         break;
5526                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5527                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5528                                                             error);
5529                         if (ret < 0)
5530                                 return ret;
5531                         last_item = MLX5_FLOW_LAYER_VXLAN;
5532                         break;
5533                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5534                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5535                                                                 item_flags, dev,
5536                                                                 error);
5537                         if (ret < 0)
5538                                 return ret;
5539                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5540                         break;
5541                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5542                         ret = mlx5_flow_validate_item_geneve(items,
5543                                                              item_flags, dev,
5544                                                              error);
5545                         if (ret < 0)
5546                                 return ret;
5547                         last_item = MLX5_FLOW_LAYER_GENEVE;
5548                         break;
5549                 case RTE_FLOW_ITEM_TYPE_MPLS:
5550                         ret = mlx5_flow_validate_item_mpls(dev, items,
5551                                                            item_flags,
5552                                                            last_item, error);
5553                         if (ret < 0)
5554                                 return ret;
5555                         last_item = MLX5_FLOW_LAYER_MPLS;
5556                         break;
5557
5558                 case RTE_FLOW_ITEM_TYPE_MARK:
5559                         ret = flow_dv_validate_item_mark(dev, items, attr,
5560                                                          error);
5561                         if (ret < 0)
5562                                 return ret;
5563                         last_item = MLX5_FLOW_ITEM_MARK;
5564                         break;
5565                 case RTE_FLOW_ITEM_TYPE_META:
5566                         ret = flow_dv_validate_item_meta(dev, items, attr,
5567                                                          error);
5568                         if (ret < 0)
5569                                 return ret;
5570                         last_item = MLX5_FLOW_ITEM_METADATA;
5571                         break;
5572                 case RTE_FLOW_ITEM_TYPE_ICMP:
5573                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5574                                                            next_protocol,
5575                                                            error);
5576                         if (ret < 0)
5577                                 return ret;
5578                         last_item = MLX5_FLOW_LAYER_ICMP;
5579                         break;
5580                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5581                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5582                                                             next_protocol,
5583                                                             error);
5584                         if (ret < 0)
5585                                 return ret;
5586                         item_ipv6_proto = IPPROTO_ICMPV6;
5587                         last_item = MLX5_FLOW_LAYER_ICMP6;
5588                         break;
5589                 case RTE_FLOW_ITEM_TYPE_TAG:
5590                         ret = flow_dv_validate_item_tag(dev, items,
5591                                                         attr, error);
5592                         if (ret < 0)
5593                                 return ret;
5594                         last_item = MLX5_FLOW_ITEM_TAG;
5595                         break;
5596                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5597                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5598                         break;
5599                 case RTE_FLOW_ITEM_TYPE_GTP:
5600                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5601                                                         error);
5602                         if (ret < 0)
5603                                 return ret;
5604                         last_item = MLX5_FLOW_LAYER_GTP;
5605                         break;
5606                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5607                         /* Capacity will be checked in the translate stage. */
5608                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5609                                                             last_item,
5610                                                             ether_type,
5611                                                             &nic_ecpri_mask,
5612                                                             error);
5613                         if (ret < 0)
5614                                 return ret;
5615                         last_item = MLX5_FLOW_LAYER_ECPRI;
5616                         break;
5617                 default:
5618                         return rte_flow_error_set(error, ENOTSUP,
5619                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5620                                                   NULL, "item not supported");
5621                 }
5622                 item_flags |= last_item;
5623         }
5624         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5625                 int type = actions->type;
5626
5627                 if (!mlx5_flow_os_action_supported(type))
5628                         return rte_flow_error_set(error, ENOTSUP,
5629                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5630                                                   actions,
5631                                                   "action not supported");
5632                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5633                         return rte_flow_error_set(error, ENOTSUP,
5634                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5635                                                   actions, "too many actions");
5636                 switch (type) {
5637                 case RTE_FLOW_ACTION_TYPE_VOID:
5638                         break;
5639                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5640                         ret = flow_dv_validate_action_port_id(dev,
5641                                                               action_flags,
5642                                                               actions,
5643                                                               attr,
5644                                                               error);
5645                         if (ret)
5646                                 return ret;
5647                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5648                         ++actions_n;
5649                         break;
5650                 case RTE_FLOW_ACTION_TYPE_FLAG:
5651                         ret = flow_dv_validate_action_flag(dev, action_flags,
5652                                                            attr, error);
5653                         if (ret < 0)
5654                                 return ret;
5655                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5656                                 /* Count all modify-header actions as one. */
5657                                 if (!(action_flags &
5658                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5659                                         ++actions_n;
5660                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5661                                                 MLX5_FLOW_ACTION_MARK_EXT;
5662                         } else {
5663                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5664                                 ++actions_n;
5665                         }
5666                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5667                         break;
5668                 case RTE_FLOW_ACTION_TYPE_MARK:
5669                         ret = flow_dv_validate_action_mark(dev, actions,
5670                                                            action_flags,
5671                                                            attr, error);
5672                         if (ret < 0)
5673                                 return ret;
5674                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5675                                 /* Count all modify-header actions as one. */
5676                                 if (!(action_flags &
5677                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5678                                         ++actions_n;
5679                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5680                                                 MLX5_FLOW_ACTION_MARK_EXT;
5681                         } else {
5682                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5683                                 ++actions_n;
5684                         }
5685                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5686                         break;
5687                 case RTE_FLOW_ACTION_TYPE_SET_META:
5688                         ret = flow_dv_validate_action_set_meta(dev, actions,
5689                                                                action_flags,
5690                                                                attr, error);
5691                         if (ret < 0)
5692                                 return ret;
5693                         /* Count all modify-header actions as one action. */
5694                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5695                                 ++actions_n;
5696                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5697                         rw_act_num += MLX5_ACT_NUM_SET_META;
5698                         break;
5699                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5700                         ret = flow_dv_validate_action_set_tag(dev, actions,
5701                                                               action_flags,
5702                                                               attr, error);
5703                         if (ret < 0)
5704                                 return ret;
5705                         /* Count all modify-header actions as one action. */
5706                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5707                                 ++actions_n;
5708                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5709                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5710                         break;
5711                 case RTE_FLOW_ACTION_TYPE_DROP:
5712                         ret = mlx5_flow_validate_action_drop(action_flags,
5713                                                              attr, error);
5714                         if (ret < 0)
5715                                 return ret;
5716                         action_flags |= MLX5_FLOW_ACTION_DROP;
5717                         ++actions_n;
5718                         break;
5719                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5720                         ret = mlx5_flow_validate_action_queue(actions,
5721                                                               action_flags, dev,
5722                                                               attr, error);
5723                         if (ret < 0)
5724                                 return ret;
5725                         queue_index = ((const struct rte_flow_action_queue *)
5726                                                         (actions->conf))->index;
5727                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5728                         ++actions_n;
5729                         break;
5730                 case RTE_FLOW_ACTION_TYPE_RSS:
5731                         rss = actions->conf;
5732                         ret = mlx5_flow_validate_action_rss(actions,
5733                                                             action_flags, dev,
5734                                                             attr, item_flags,
5735                                                             error);
5736                         if (ret < 0)
5737                                 return ret;
5738                         if (rss != NULL && rss->queue_num)
5739                                 queue_index = rss->queue[0];
5740                         action_flags |= MLX5_FLOW_ACTION_RSS;
5741                         ++actions_n;
5742                         break;
5743                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5744                         ret =
5745                         mlx5_flow_validate_action_default_miss(action_flags,
5746                                         attr, error);
5747                         if (ret < 0)
5748                                 return ret;
5749                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5750                         ++actions_n;
5751                         break;
5752                 case RTE_FLOW_ACTION_TYPE_COUNT:
5753                         ret = flow_dv_validate_action_count(dev, error);
5754                         if (ret < 0)
5755                                 return ret;
5756                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5757                         ++actions_n;
5758                         break;
5759                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5760                         if (flow_dv_validate_action_pop_vlan(dev,
5761                                                              action_flags,
5762                                                              actions,
5763                                                              item_flags, attr,
5764                                                              error))
5765                                 return -rte_errno;
5766                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5767                         ++actions_n;
5768                         break;
5769                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5770                         ret = flow_dv_validate_action_push_vlan(dev,
5771                                                                 action_flags,
5772                                                                 vlan_m,
5773                                                                 actions, attr,
5774                                                                 error);
5775                         if (ret < 0)
5776                                 return ret;
5777                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5778                         ++actions_n;
5779                         break;
5780                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5781                         ret = flow_dv_validate_action_set_vlan_pcp
5782                                                 (action_flags, actions, error);
5783                         if (ret < 0)
5784                                 return ret;
5785                         /* Count PCP with push_vlan command. */
5786                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5787                         break;
5788                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5789                         ret = flow_dv_validate_action_set_vlan_vid
5790                                                 (item_flags, action_flags,
5791                                                  actions, error);
5792                         if (ret < 0)
5793                                 return ret;
5794                         /* Count VID with push_vlan command. */
5795                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5796                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5797                         break;
5798                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5799                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5800                         ret = flow_dv_validate_action_l2_encap(dev,
5801                                                                action_flags,
5802                                                                actions, attr,
5803                                                                error);
5804                         if (ret < 0)
5805                                 return ret;
5806                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5807                         ++actions_n;
5808                         break;
5809                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5810                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5811                         ret = flow_dv_validate_action_decap(dev, action_flags,
5812                                                             attr, error);
5813                         if (ret < 0)
5814                                 return ret;
5815                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5816                         ++actions_n;
5817                         break;
5818                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5819                         ret = flow_dv_validate_action_raw_encap_decap
5820                                 (dev, NULL, actions->conf, attr, &action_flags,
5821                                  &actions_n, error);
5822                         if (ret < 0)
5823                                 return ret;
5824                         break;
5825                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5826                         decap = actions->conf;
5827                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5828                                 ;
5829                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5830                                 encap = NULL;
5831                                 actions--;
5832                         } else {
5833                                 encap = actions->conf;
5834                         }
5835                         ret = flow_dv_validate_action_raw_encap_decap
5836                                            (dev,
5837                                             decap ? decap : &empty_decap, encap,
5838                                             attr, &action_flags, &actions_n,
5839                                             error);
5840                         if (ret < 0)
5841                                 return ret;
5842                         break;
5843                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5844                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5845                         ret = flow_dv_validate_action_modify_mac(action_flags,
5846                                                                  actions,
5847                                                                  item_flags,
5848                                                                  error);
5849                         if (ret < 0)
5850                                 return ret;
5851                         /* Count all modify-header actions as one action. */
5852                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5853                                 ++actions_n;
5854                         action_flags |= actions->type ==
5855                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5856                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5857                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5858                         /*
5859                          * Even if the source and destination MAC addresses have
5860                          * overlap in the header with 4B alignment, the convert
5861                          * function will handle them separately and 4 SW actions
5862                          * will be created. And 2 actions will be added each
5863                          * time no matter how many bytes of address will be set.
5864                          */
5865                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5866                         break;
5867                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5868                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5869                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5870                                                                   actions,
5871                                                                   item_flags,
5872                                                                   error);
5873                         if (ret < 0)
5874                                 return ret;
5875                         /* Count all modify-header actions as one action. */
5876                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5877                                 ++actions_n;
5878                         action_flags |= actions->type ==
5879                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5880                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5881                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5882                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5883                         break;
5884                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5885                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5886                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5887                                                                   actions,
5888                                                                   item_flags,
5889                                                                   error);
5890                         if (ret < 0)
5891                                 return ret;
5892                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5893                                 return rte_flow_error_set(error, ENOTSUP,
5894                                         RTE_FLOW_ERROR_TYPE_ACTION,
5895                                         actions,
5896                                         "Can't change header "
5897                                         "with ICMPv6 proto");
5898                         /* Count all modify-header actions as one action. */
5899                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5900                                 ++actions_n;
5901                         action_flags |= actions->type ==
5902                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5903                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5904                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5905                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5906                         break;
5907                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5908                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5909                         ret = flow_dv_validate_action_modify_tp(action_flags,
5910                                                                 actions,
5911                                                                 item_flags,
5912                                                                 error);
5913                         if (ret < 0)
5914                                 return ret;
5915                         /* Count all modify-header actions as one action. */
5916                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5917                                 ++actions_n;
5918                         action_flags |= actions->type ==
5919                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5920                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5921                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5922                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5923                         break;
5924                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5925                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5926                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5927                                                                  actions,
5928                                                                  item_flags,
5929                                                                  error);
5930                         if (ret < 0)
5931                                 return ret;
5932                         /* Count all modify-header actions as one action. */
5933                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5934                                 ++actions_n;
5935                         action_flags |= actions->type ==
5936                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5937                                                 MLX5_FLOW_ACTION_SET_TTL :
5938                                                 MLX5_FLOW_ACTION_DEC_TTL;
5939                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5940                         break;
5941                 case RTE_FLOW_ACTION_TYPE_JUMP:
5942                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5943                                                            action_flags,
5944                                                            attr, external,
5945                                                            error);
5946                         if (ret)
5947                                 return ret;
5948                         ++actions_n;
5949                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5950                         break;
5951                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5952                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5953                         ret = flow_dv_validate_action_modify_tcp_seq
5954                                                                 (action_flags,
5955                                                                  actions,
5956                                                                  item_flags,
5957                                                                  error);
5958                         if (ret < 0)
5959                                 return ret;
5960                         /* Count all modify-header actions as one action. */
5961                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5962                                 ++actions_n;
5963                         action_flags |= actions->type ==
5964                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5965                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5966                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5967                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5968                         break;
5969                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5970                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5971                         ret = flow_dv_validate_action_modify_tcp_ack
5972                                                                 (action_flags,
5973                                                                  actions,
5974                                                                  item_flags,
5975                                                                  error);
5976                         if (ret < 0)
5977                                 return ret;
5978                         /* Count all modify-header actions as one action. */
5979                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5980                                 ++actions_n;
5981                         action_flags |= actions->type ==
5982                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5983                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5984                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5985                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5986                         break;
5987                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5988                         break;
5989                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5990                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5991                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5992                         break;
5993                 case RTE_FLOW_ACTION_TYPE_METER:
5994                         ret = mlx5_flow_validate_action_meter(dev,
5995                                                               action_flags,
5996                                                               actions, attr,
5997                                                               error);
5998                         if (ret < 0)
5999                                 return ret;
6000                         action_flags |= MLX5_FLOW_ACTION_METER;
6001                         ++actions_n;
6002                         /* Meter action will add one more TAG action. */
6003                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6004                         break;
6005                 case RTE_FLOW_ACTION_TYPE_AGE:
6006                         ret = flow_dv_validate_action_age(action_flags,
6007                                                           actions, dev,
6008                                                           error);
6009                         if (ret < 0)
6010                                 return ret;
6011                         action_flags |= MLX5_FLOW_ACTION_AGE;
6012                         ++actions_n;
6013                         break;
6014                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6015                         ret = flow_dv_validate_action_modify_ipv4_dscp
6016                                                          (action_flags,
6017                                                           actions,
6018                                                           item_flags,
6019                                                           error);
6020                         if (ret < 0)
6021                                 return ret;
6022                         /* Count all modify-header actions as one action. */
6023                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6024                                 ++actions_n;
6025                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
6026                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6027                         break;
6028                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6029                         ret = flow_dv_validate_action_modify_ipv6_dscp
6030                                                                 (action_flags,
6031                                                                  actions,
6032                                                                  item_flags,
6033                                                                  error);
6034                         if (ret < 0)
6035                                 return ret;
6036                         /* Count all modify-header actions as one action. */
6037                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6038                                 ++actions_n;
6039                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6040                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6041                         break;
6042                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6043                         ret = flow_dv_validate_action_sample(action_flags,
6044                                                              actions, dev,
6045                                                              attr, error);
6046                         if (ret < 0)
6047                                 return ret;
6048                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6049                         ++actions_n;
6050                         break;
6051                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6052                         if (actions[0].type != (typeof(actions[0].type))
6053                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6054                                 return rte_flow_error_set
6055                                                 (error, EINVAL,
6056                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6057                                                 NULL, "MLX5 private action "
6058                                                 "must be the first");
6059
6060                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6061                         break;
6062                 default:
6063                         return rte_flow_error_set(error, ENOTSUP,
6064                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6065                                                   actions,
6066                                                   "action not supported");
6067                 }
6068         }
6069         /*
6070          * Validate actions in flow rules
6071          * - Explicit decap action is prohibited by the tunnel offload API.
6072          * - Drop action in tunnel steer rule is prohibited by the API.
6073          * - Application cannot use MARK action because it's value can mask
6074          *   tunnel default miss nitification.
6075          * - JUMP in tunnel match rule has no support in current PMD
6076          *   implementation.
6077          * - TAG & META are reserved for future uses.
6078          */
6079         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6080                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6081                                             MLX5_FLOW_ACTION_MARK     |
6082                                             MLX5_FLOW_ACTION_SET_TAG  |
6083                                             MLX5_FLOW_ACTION_SET_META |
6084                                             MLX5_FLOW_ACTION_DROP;
6085
6086                 if (action_flags & bad_actions_mask)
6087                         return rte_flow_error_set
6088                                         (error, EINVAL,
6089                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6090                                         "Invalid RTE action in tunnel "
6091                                         "set decap rule");
6092                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6093                         return rte_flow_error_set
6094                                         (error, EINVAL,
6095                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6096                                         "tunnel set decap rule must terminate "
6097                                         "with JUMP");
6098                 if (!attr->ingress)
6099                         return rte_flow_error_set
6100                                         (error, EINVAL,
6101                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6102                                         "tunnel flows for ingress traffic only");
6103         }
6104         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6105                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6106                                             MLX5_FLOW_ACTION_MARK    |
6107                                             MLX5_FLOW_ACTION_SET_TAG |
6108                                             MLX5_FLOW_ACTION_SET_META;
6109
6110                 if (action_flags & bad_actions_mask)
6111                         return rte_flow_error_set
6112                                         (error, EINVAL,
6113                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6114                                         "Invalid RTE action in tunnel "
6115                                         "set match rule");
6116         }
6117         /*
6118          * Validate the drop action mutual exclusion with other actions.
6119          * Drop action is mutually-exclusive with any other action, except for
6120          * Count action.
6121          */
6122         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6123             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6124                 return rte_flow_error_set(error, EINVAL,
6125                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6126                                           "Drop action is mutually-exclusive "
6127                                           "with any other action, except for "
6128                                           "Count action");
6129         /* Eswitch has few restrictions on using items and actions */
6130         if (attr->transfer) {
6131                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6132                     action_flags & MLX5_FLOW_ACTION_FLAG)
6133                         return rte_flow_error_set(error, ENOTSUP,
6134                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6135                                                   NULL,
6136                                                   "unsupported action FLAG");
6137                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6138                     action_flags & MLX5_FLOW_ACTION_MARK)
6139                         return rte_flow_error_set(error, ENOTSUP,
6140                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6141                                                   NULL,
6142                                                   "unsupported action MARK");
6143                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6144                         return rte_flow_error_set(error, ENOTSUP,
6145                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6146                                                   NULL,
6147                                                   "unsupported action QUEUE");
6148                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6149                         return rte_flow_error_set(error, ENOTSUP,
6150                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6151                                                   NULL,
6152                                                   "unsupported action RSS");
6153                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6154                         return rte_flow_error_set(error, EINVAL,
6155                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6156                                                   actions,
6157                                                   "no fate action is found");
6158         } else {
6159                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6160                         return rte_flow_error_set(error, EINVAL,
6161                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6162                                                   actions,
6163                                                   "no fate action is found");
6164         }
6165         /*
6166          * Continue validation for Xcap and VLAN actions.
6167          * If hairpin is working in explicit TX rule mode, there is no actions
6168          * splitting and the validation of hairpin ingress flow should be the
6169          * same as other standard flows.
6170          */
6171         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6172                              MLX5_FLOW_VLAN_ACTIONS)) &&
6173             (queue_index == 0xFFFF ||
6174              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6175              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6176              conf->tx_explicit != 0))) {
6177                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6178                     MLX5_FLOW_XCAP_ACTIONS)
6179                         return rte_flow_error_set(error, ENOTSUP,
6180                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6181                                                   NULL, "encap and decap "
6182                                                   "combination aren't supported");
6183                 if (!attr->transfer && attr->ingress) {
6184                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6185                                 return rte_flow_error_set
6186                                                 (error, ENOTSUP,
6187                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6188                                                  NULL, "encap is not supported"
6189                                                  " for ingress traffic");
6190                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6191                                 return rte_flow_error_set
6192                                                 (error, ENOTSUP,
6193                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6194                                                  NULL, "push VLAN action not "
6195                                                  "supported for ingress");
6196                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6197                                         MLX5_FLOW_VLAN_ACTIONS)
6198                                 return rte_flow_error_set
6199                                                 (error, ENOTSUP,
6200                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6201                                                  NULL, "no support for "
6202                                                  "multiple VLAN actions");
6203                 }
6204         }
6205         /*
6206          * Hairpin flow will add one more TAG action in TX implicit mode.
6207          * In TX explicit mode, there will be no hairpin flow ID.
6208          */
6209         if (hairpin > 0)
6210                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6211         /* extra metadata enabled: one more TAG action will be add. */
6212         if (dev_conf->dv_flow_en &&
6213             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6214             mlx5_flow_ext_mreg_supported(dev))
6215                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6216         if ((uint32_t)rw_act_num >
6217                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6218                 return rte_flow_error_set(error, ENOTSUP,
6219                                           RTE_FLOW_ERROR_TYPE_ACTION,
6220                                           NULL, "too many header modify"
6221                                           " actions to support");
6222         }
6223         return 0;
6224 }
6225
6226 /**
6227  * Internal preparation function. Allocates the DV flow size,
6228  * this size is constant.
6229  *
6230  * @param[in] dev
6231  *   Pointer to the rte_eth_dev structure.
6232  * @param[in] attr
6233  *   Pointer to the flow attributes.
6234  * @param[in] items
6235  *   Pointer to the list of items.
6236  * @param[in] actions
6237  *   Pointer to the list of actions.
6238  * @param[out] error
6239  *   Pointer to the error structure.
6240  *
6241  * @return
6242  *   Pointer to mlx5_flow object on success,
6243  *   otherwise NULL and rte_errno is set.
6244  */
6245 static struct mlx5_flow *
6246 flow_dv_prepare(struct rte_eth_dev *dev,
6247                 const struct rte_flow_attr *attr __rte_unused,
6248                 const struct rte_flow_item items[] __rte_unused,
6249                 const struct rte_flow_action actions[] __rte_unused,
6250                 struct rte_flow_error *error)
6251 {
6252         uint32_t handle_idx = 0;
6253         struct mlx5_flow *dev_flow;
6254         struct mlx5_flow_handle *dev_handle;
6255         struct mlx5_priv *priv = dev->data->dev_private;
6256         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6257
6258         MLX5_ASSERT(wks);
6259         /* In case of corrupting the memory. */
6260         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6261                 rte_flow_error_set(error, ENOSPC,
6262                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6263                                    "not free temporary device flow");
6264                 return NULL;
6265         }
6266         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6267                                    &handle_idx);
6268         if (!dev_handle) {
6269                 rte_flow_error_set(error, ENOMEM,
6270                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6271                                    "not enough memory to create flow handle");
6272                 return NULL;
6273         }
6274         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6275         dev_flow = &wks->flows[wks->flow_idx++];
6276         dev_flow->handle = dev_handle;
6277         dev_flow->handle_idx = handle_idx;
6278         /*
6279          * In some old rdma-core releases, before continuing, a check of the
6280          * length of matching parameter will be done at first. It needs to use
6281          * the length without misc4 param. If the flow has misc4 support, then
6282          * the length needs to be adjusted accordingly. Each param member is
6283          * aligned with a 64B boundary naturally.
6284          */
6285         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6286                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6287         /*
6288          * The matching value needs to be cleared to 0 before using. In the
6289          * past, it will be automatically cleared when using rte_*alloc
6290          * API. The time consumption will be almost the same as before.
6291          */
6292         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6293         dev_flow->ingress = attr->ingress;
6294         dev_flow->dv.transfer = attr->transfer;
6295         return dev_flow;
6296 }
6297
6298 #ifdef RTE_LIBRTE_MLX5_DEBUG
6299 /**
6300  * Sanity check for match mask and value. Similar to check_valid_spec() in
6301  * kernel driver. If unmasked bit is present in value, it returns failure.
6302  *
6303  * @param match_mask
6304  *   pointer to match mask buffer.
6305  * @param match_value
6306  *   pointer to match value buffer.
6307  *
6308  * @return
6309  *   0 if valid, -EINVAL otherwise.
6310  */
6311 static int
6312 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6313 {
6314         uint8_t *m = match_mask;
6315         uint8_t *v = match_value;
6316         unsigned int i;
6317
6318         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6319                 if (v[i] & ~m[i]) {
6320                         DRV_LOG(ERR,
6321                                 "match_value differs from match_criteria"
6322                                 " %p[%u] != %p[%u]",
6323                                 match_value, i, match_mask, i);
6324                         return -EINVAL;
6325                 }
6326         }
6327         return 0;
6328 }
6329 #endif
6330
6331 /**
6332  * Add match of ip_version.
6333  *
6334  * @param[in] group
6335  *   Flow group.
6336  * @param[in] headers_v
6337  *   Values header pointer.
6338  * @param[in] headers_m
6339  *   Masks header pointer.
6340  * @param[in] ip_version
6341  *   The IP version to set.
6342  */
6343 static inline void
6344 flow_dv_set_match_ip_version(uint32_t group,
6345                              void *headers_v,
6346                              void *headers_m,
6347                              uint8_t ip_version)
6348 {
6349         if (group == 0)
6350                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6351         else
6352                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6353                          ip_version);
6354         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6355         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6356         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6357 }
6358
6359 /**
6360  * Add Ethernet item to matcher and to the value.
6361  *
6362  * @param[in, out] matcher
6363  *   Flow matcher.
6364  * @param[in, out] key
6365  *   Flow matcher value.
6366  * @param[in] item
6367  *   Flow pattern to translate.
6368  * @param[in] inner
6369  *   Item is inner pattern.
6370  */
6371 static void
6372 flow_dv_translate_item_eth(void *matcher, void *key,
6373                            const struct rte_flow_item *item, int inner,
6374                            uint32_t group)
6375 {
6376         const struct rte_flow_item_eth *eth_m = item->mask;
6377         const struct rte_flow_item_eth *eth_v = item->spec;
6378         const struct rte_flow_item_eth nic_mask = {
6379                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6380                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6381                 .type = RTE_BE16(0xffff),
6382                 .has_vlan = 0,
6383         };
6384         void *hdrs_m;
6385         void *hdrs_v;
6386         char *l24_v;
6387         unsigned int i;
6388
6389         if (!eth_v)
6390                 return;
6391         if (!eth_m)
6392                 eth_m = &nic_mask;
6393         if (inner) {
6394                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6395                                          inner_headers);
6396                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6397         } else {
6398                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6399                                          outer_headers);
6400                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6401         }
6402         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6403                &eth_m->dst, sizeof(eth_m->dst));
6404         /* The value must be in the range of the mask. */
6405         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6406         for (i = 0; i < sizeof(eth_m->dst); ++i)
6407                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6408         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6409                &eth_m->src, sizeof(eth_m->src));
6410         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6411         /* The value must be in the range of the mask. */
6412         for (i = 0; i < sizeof(eth_m->dst); ++i)
6413                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6414         /*
6415          * HW supports match on one Ethertype, the Ethertype following the last
6416          * VLAN tag of the packet (see PRM).
6417          * Set match on ethertype only if ETH header is not followed by VLAN.
6418          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6419          * ethertype, and use ip_version field instead.
6420          * eCPRI over Ether layer will use type value 0xAEFE.
6421          */
6422         if (eth_m->type == 0xFFFF) {
6423                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6424                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6425                 switch (eth_v->type) {
6426                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6427                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6428                         return;
6429                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6430                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6431                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6432                         return;
6433                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6434                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6435                         return;
6436                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6437                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6438                         return;
6439                 default:
6440                         break;
6441                 }
6442         }
6443         if (eth_m->has_vlan) {
6444                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6445                 if (eth_v->has_vlan) {
6446                         /*
6447                          * Here, when also has_more_vlan field in VLAN item is
6448                          * not set, only single-tagged packets will be matched.
6449                          */
6450                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6451                         return;
6452                 }
6453         }
6454         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6455                  rte_be_to_cpu_16(eth_m->type));
6456         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6457         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6458 }
6459
6460 /**
6461  * Add VLAN item to matcher and to the value.
6462  *
6463  * @param[in, out] dev_flow
6464  *   Flow descriptor.
6465  * @param[in, out] matcher
6466  *   Flow matcher.
6467  * @param[in, out] key
6468  *   Flow matcher value.
6469  * @param[in] item
6470  *   Flow pattern to translate.
6471  * @param[in] inner
6472  *   Item is inner pattern.
6473  */
6474 static void
6475 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6476                             void *matcher, void *key,
6477                             const struct rte_flow_item *item,
6478                             int inner, uint32_t group)
6479 {
6480         const struct rte_flow_item_vlan *vlan_m = item->mask;
6481         const struct rte_flow_item_vlan *vlan_v = item->spec;
6482         void *hdrs_m;
6483         void *hdrs_v;
6484         uint16_t tci_m;
6485         uint16_t tci_v;
6486
6487         if (inner) {
6488                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6489                                          inner_headers);
6490                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6491         } else {
6492                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6493                                          outer_headers);
6494                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6495                 /*
6496                  * This is workaround, masks are not supported,
6497                  * and pre-validated.
6498                  */
6499                 if (vlan_v)
6500                         dev_flow->handle->vf_vlan.tag =
6501                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6502         }
6503         /*
6504          * When VLAN item exists in flow, mark packet as tagged,
6505          * even if TCI is not specified.
6506          */
6507         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6508                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6509                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6510         }
6511         if (!vlan_v)
6512                 return;
6513         if (!vlan_m)
6514                 vlan_m = &rte_flow_item_vlan_mask;
6515         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6516         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6517         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6518         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6519         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6520         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6521         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6522         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6523         /*
6524          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6525          * ethertype, and use ip_version field instead.
6526          */
6527         if (vlan_m->inner_type == 0xFFFF) {
6528                 switch (vlan_v->inner_type) {
6529                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6530                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6531                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6532                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6533                         return;
6534                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6535                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6536                         return;
6537                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6538                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6539                         return;
6540                 default:
6541                         break;
6542                 }
6543         }
6544         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6545                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6546                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6547                 /* Only one vlan_tag bit can be set. */
6548                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6549                 return;
6550         }
6551         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6552                  rte_be_to_cpu_16(vlan_m->inner_type));
6553         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6554                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6555 }
6556
6557 /**
6558  * Add IPV4 item to matcher and to the value.
6559  *
6560  * @param[in, out] matcher
6561  *   Flow matcher.
6562  * @param[in, out] key
6563  *   Flow matcher value.
6564  * @param[in] item
6565  *   Flow pattern to translate.
6566  * @param[in] inner
6567  *   Item is inner pattern.
6568  * @param[in] group
6569  *   The group to insert the rule.
6570  */
6571 static void
6572 flow_dv_translate_item_ipv4(void *matcher, void *key,
6573                             const struct rte_flow_item *item,
6574                             int inner, uint32_t group)
6575 {
6576         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6577         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6578         const struct rte_flow_item_ipv4 nic_mask = {
6579                 .hdr = {
6580                         .src_addr = RTE_BE32(0xffffffff),
6581                         .dst_addr = RTE_BE32(0xffffffff),
6582                         .type_of_service = 0xff,
6583                         .next_proto_id = 0xff,
6584                         .time_to_live = 0xff,
6585                 },
6586         };
6587         void *headers_m;
6588         void *headers_v;
6589         char *l24_m;
6590         char *l24_v;
6591         uint8_t tos;
6592
6593         if (inner) {
6594                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6595                                          inner_headers);
6596                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6597         } else {
6598                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6599                                          outer_headers);
6600                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6601         }
6602         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6603         if (!ipv4_v)
6604                 return;
6605         if (!ipv4_m)
6606                 ipv4_m = &nic_mask;
6607         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6608                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6609         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6610                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6611         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6612         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6613         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6614                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6615         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6616                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6617         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6618         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6619         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6620         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6621                  ipv4_m->hdr.type_of_service);
6622         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6623         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6624                  ipv4_m->hdr.type_of_service >> 2);
6625         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6626         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6627                  ipv4_m->hdr.next_proto_id);
6628         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6629                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6630         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6631                  ipv4_m->hdr.time_to_live);
6632         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6633                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6634         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6635                  !!(ipv4_m->hdr.fragment_offset));
6636         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6637                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6638 }
6639
6640 /**
6641  * Add IPV6 item to matcher and to the value.
6642  *
6643  * @param[in, out] matcher
6644  *   Flow matcher.
6645  * @param[in, out] key
6646  *   Flow matcher value.
6647  * @param[in] item
6648  *   Flow pattern to translate.
6649  * @param[in] inner
6650  *   Item is inner pattern.
6651  * @param[in] group
6652  *   The group to insert the rule.
6653  */
6654 static void
6655 flow_dv_translate_item_ipv6(void *matcher, void *key,
6656                             const struct rte_flow_item *item,
6657                             int inner, uint32_t group)
6658 {
6659         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6660         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6661         const struct rte_flow_item_ipv6 nic_mask = {
6662                 .hdr = {
6663                         .src_addr =
6664                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6665                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6666                         .dst_addr =
6667                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6668                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6669                         .vtc_flow = RTE_BE32(0xffffffff),
6670                         .proto = 0xff,
6671                         .hop_limits = 0xff,
6672                 },
6673         };
6674         void *headers_m;
6675         void *headers_v;
6676         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6677         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6678         char *l24_m;
6679         char *l24_v;
6680         uint32_t vtc_m;
6681         uint32_t vtc_v;
6682         int i;
6683         int size;
6684
6685         if (inner) {
6686                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6687                                          inner_headers);
6688                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6689         } else {
6690                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6691                                          outer_headers);
6692                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6693         }
6694         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6695         if (!ipv6_v)
6696                 return;
6697         if (!ipv6_m)
6698                 ipv6_m = &nic_mask;
6699         size = sizeof(ipv6_m->hdr.dst_addr);
6700         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6701                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6702         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6703                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6704         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6705         for (i = 0; i < size; ++i)
6706                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6707         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6708                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6709         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6710                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6711         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6712         for (i = 0; i < size; ++i)
6713                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6714         /* TOS. */
6715         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6716         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6717         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6718         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6719         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6720         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6721         /* Label. */
6722         if (inner) {
6723                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6724                          vtc_m);
6725                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6726                          vtc_v);
6727         } else {
6728                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6729                          vtc_m);
6730                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6731                          vtc_v);
6732         }
6733         /* Protocol. */
6734         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6735                  ipv6_m->hdr.proto);
6736         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6737                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6738         /* Hop limit. */
6739         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6740                  ipv6_m->hdr.hop_limits);
6741         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6742                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6743         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6744                  !!(ipv6_m->has_frag_ext));
6745         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6746                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6747 }
6748
6749 /**
6750  * Add IPV6 fragment extension item to matcher and to the value.
6751  *
6752  * @param[in, out] matcher
6753  *   Flow matcher.
6754  * @param[in, out] key
6755  *   Flow matcher value.
6756  * @param[in] item
6757  *   Flow pattern to translate.
6758  * @param[in] inner
6759  *   Item is inner pattern.
6760  */
6761 static void
6762 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6763                                      const struct rte_flow_item *item,
6764                                      int inner)
6765 {
6766         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6767         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6768         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6769                 .hdr = {
6770                         .next_header = 0xff,
6771                         .frag_data = RTE_BE16(0xffff),
6772                 },
6773         };
6774         void *headers_m;
6775         void *headers_v;
6776
6777         if (inner) {
6778                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6779                                          inner_headers);
6780                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6781         } else {
6782                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6783                                          outer_headers);
6784                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6785         }
6786         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6787         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6788         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6789         if (!ipv6_frag_ext_v)
6790                 return;
6791         if (!ipv6_frag_ext_m)
6792                 ipv6_frag_ext_m = &nic_mask;
6793         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6794                  ipv6_frag_ext_m->hdr.next_header);
6795         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6796                  ipv6_frag_ext_v->hdr.next_header &
6797                  ipv6_frag_ext_m->hdr.next_header);
6798 }
6799
6800 /**
6801  * Add TCP item to matcher and to the value.
6802  *
6803  * @param[in, out] matcher
6804  *   Flow matcher.
6805  * @param[in, out] key
6806  *   Flow matcher value.
6807  * @param[in] item
6808  *   Flow pattern to translate.
6809  * @param[in] inner
6810  *   Item is inner pattern.
6811  */
6812 static void
6813 flow_dv_translate_item_tcp(void *matcher, void *key,
6814                            const struct rte_flow_item *item,
6815                            int inner)
6816 {
6817         const struct rte_flow_item_tcp *tcp_m = item->mask;
6818         const struct rte_flow_item_tcp *tcp_v = item->spec;
6819         void *headers_m;
6820         void *headers_v;
6821
6822         if (inner) {
6823                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6824                                          inner_headers);
6825                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6826         } else {
6827                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6828                                          outer_headers);
6829                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6830         }
6831         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6832         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6833         if (!tcp_v)
6834                 return;
6835         if (!tcp_m)
6836                 tcp_m = &rte_flow_item_tcp_mask;
6837         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6838                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6839         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6840                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6841         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6842                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6843         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6844                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6845         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6846                  tcp_m->hdr.tcp_flags);
6847         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6848                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6849 }
6850
6851 /**
6852  * Add UDP item to matcher and to the value.
6853  *
6854  * @param[in, out] matcher
6855  *   Flow matcher.
6856  * @param[in, out] key
6857  *   Flow matcher value.
6858  * @param[in] item
6859  *   Flow pattern to translate.
6860  * @param[in] inner
6861  *   Item is inner pattern.
6862  */
6863 static void
6864 flow_dv_translate_item_udp(void *matcher, void *key,
6865                            const struct rte_flow_item *item,
6866                            int inner)
6867 {
6868         const struct rte_flow_item_udp *udp_m = item->mask;
6869         const struct rte_flow_item_udp *udp_v = item->spec;
6870         void *headers_m;
6871         void *headers_v;
6872
6873         if (inner) {
6874                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6875                                          inner_headers);
6876                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6877         } else {
6878                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6879                                          outer_headers);
6880                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6881         }
6882         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6883         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6884         if (!udp_v)
6885                 return;
6886         if (!udp_m)
6887                 udp_m = &rte_flow_item_udp_mask;
6888         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6889                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6890         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6891                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6892         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6893                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6894         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6895                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6896 }
6897
6898 /**
6899  * Add GRE optional Key item to matcher and to the value.
6900  *
6901  * @param[in, out] matcher
6902  *   Flow matcher.
6903  * @param[in, out] key
6904  *   Flow matcher value.
6905  * @param[in] item
6906  *   Flow pattern to translate.
6907  * @param[in] inner
6908  *   Item is inner pattern.
6909  */
6910 static void
6911 flow_dv_translate_item_gre_key(void *matcher, void *key,
6912                                    const struct rte_flow_item *item)
6913 {
6914         const rte_be32_t *key_m = item->mask;
6915         const rte_be32_t *key_v = item->spec;
6916         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6917         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6918         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6919
6920         /* GRE K bit must be on and should already be validated */
6921         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6922         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6923         if (!key_v)
6924                 return;
6925         if (!key_m)
6926                 key_m = &gre_key_default_mask;
6927         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6928                  rte_be_to_cpu_32(*key_m) >> 8);
6929         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6930                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6931         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6932                  rte_be_to_cpu_32(*key_m) & 0xFF);
6933         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6934                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6935 }
6936
6937 /**
6938  * Add GRE item to matcher and to the value.
6939  *
6940  * @param[in, out] matcher
6941  *   Flow matcher.
6942  * @param[in, out] key
6943  *   Flow matcher value.
6944  * @param[in] item
6945  *   Flow pattern to translate.
6946  * @param[in] inner
6947  *   Item is inner pattern.
6948  */
6949 static void
6950 flow_dv_translate_item_gre(void *matcher, void *key,
6951                            const struct rte_flow_item *item,
6952                            int inner)
6953 {
6954         const struct rte_flow_item_gre *gre_m = item->mask;
6955         const struct rte_flow_item_gre *gre_v = item->spec;
6956         void *headers_m;
6957         void *headers_v;
6958         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6959         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6960         struct {
6961                 union {
6962                         __extension__
6963                         struct {
6964                                 uint16_t version:3;
6965                                 uint16_t rsvd0:9;
6966                                 uint16_t s_present:1;
6967                                 uint16_t k_present:1;
6968                                 uint16_t rsvd_bit1:1;
6969                                 uint16_t c_present:1;
6970                         };
6971                         uint16_t value;
6972                 };
6973         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6974
6975         if (inner) {
6976                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6977                                          inner_headers);
6978                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6979         } else {
6980                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6981                                          outer_headers);
6982                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6983         }
6984         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6985         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6986         if (!gre_v)
6987                 return;
6988         if (!gre_m)
6989                 gre_m = &rte_flow_item_gre_mask;
6990         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6991                  rte_be_to_cpu_16(gre_m->protocol));
6992         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6993                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6994         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6995         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6996         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6997                  gre_crks_rsvd0_ver_m.c_present);
6998         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6999                  gre_crks_rsvd0_ver_v.c_present &
7000                  gre_crks_rsvd0_ver_m.c_present);
7001         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
7002                  gre_crks_rsvd0_ver_m.k_present);
7003         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
7004                  gre_crks_rsvd0_ver_v.k_present &
7005                  gre_crks_rsvd0_ver_m.k_present);
7006         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
7007                  gre_crks_rsvd0_ver_m.s_present);
7008         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
7009                  gre_crks_rsvd0_ver_v.s_present &
7010                  gre_crks_rsvd0_ver_m.s_present);
7011 }
7012
7013 /**
7014  * Add NVGRE item to matcher and to the value.
7015  *
7016  * @param[in, out] matcher
7017  *   Flow matcher.
7018  * @param[in, out] key
7019  *   Flow matcher value.
7020  * @param[in] item
7021  *   Flow pattern to translate.
7022  * @param[in] inner
7023  *   Item is inner pattern.
7024  */
7025 static void
7026 flow_dv_translate_item_nvgre(void *matcher, void *key,
7027                              const struct rte_flow_item *item,
7028                              int inner)
7029 {
7030         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
7031         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
7032         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7033         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7034         const char *tni_flow_id_m;
7035         const char *tni_flow_id_v;
7036         char *gre_key_m;
7037         char *gre_key_v;
7038         int size;
7039         int i;
7040
7041         /* For NVGRE, GRE header fields must be set with defined values. */
7042         const struct rte_flow_item_gre gre_spec = {
7043                 .c_rsvd0_ver = RTE_BE16(0x2000),
7044                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7045         };
7046         const struct rte_flow_item_gre gre_mask = {
7047                 .c_rsvd0_ver = RTE_BE16(0xB000),
7048                 .protocol = RTE_BE16(UINT16_MAX),
7049         };
7050         const struct rte_flow_item gre_item = {
7051                 .spec = &gre_spec,
7052                 .mask = &gre_mask,
7053                 .last = NULL,
7054         };
7055         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7056         if (!nvgre_v)
7057                 return;
7058         if (!nvgre_m)
7059                 nvgre_m = &rte_flow_item_nvgre_mask;
7060         tni_flow_id_m = (const char *)nvgre_m->tni;
7061         tni_flow_id_v = (const char *)nvgre_v->tni;
7062         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7063         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7064         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7065         memcpy(gre_key_m, tni_flow_id_m, size);
7066         for (i = 0; i < size; ++i)
7067                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7068 }
7069
7070 /**
7071  * Add VXLAN item to matcher and to the value.
7072  *
7073  * @param[in, out] matcher
7074  *   Flow matcher.
7075  * @param[in, out] key
7076  *   Flow matcher value.
7077  * @param[in] item
7078  *   Flow pattern to translate.
7079  * @param[in] inner
7080  *   Item is inner pattern.
7081  */
7082 static void
7083 flow_dv_translate_item_vxlan(void *matcher, void *key,
7084                              const struct rte_flow_item *item,
7085                              int inner)
7086 {
7087         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7088         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7089         void *headers_m;
7090         void *headers_v;
7091         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7092         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7093         char *vni_m;
7094         char *vni_v;
7095         uint16_t dport;
7096         int size;
7097         int i;
7098
7099         if (inner) {
7100                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7101                                          inner_headers);
7102                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7103         } else {
7104                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7105                                          outer_headers);
7106                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7107         }
7108         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7109                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7110         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7111                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7112                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7113         }
7114         if (!vxlan_v)
7115                 return;
7116         if (!vxlan_m)
7117                 vxlan_m = &rte_flow_item_vxlan_mask;
7118         size = sizeof(vxlan_m->vni);
7119         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7120         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7121         memcpy(vni_m, vxlan_m->vni, size);
7122         for (i = 0; i < size; ++i)
7123                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7124 }
7125
7126 /**
7127  * Add VXLAN-GPE item to matcher and to the value.
7128  *
7129  * @param[in, out] matcher
7130  *   Flow matcher.
7131  * @param[in, out] key
7132  *   Flow matcher value.
7133  * @param[in] item
7134  *   Flow pattern to translate.
7135  * @param[in] inner
7136  *   Item is inner pattern.
7137  */
7138
7139 static void
7140 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7141                                  const struct rte_flow_item *item, int inner)
7142 {
7143         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7144         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7145         void *headers_m;
7146         void *headers_v;
7147         void *misc_m =
7148                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7149         void *misc_v =
7150                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7151         char *vni_m;
7152         char *vni_v;
7153         uint16_t dport;
7154         int size;
7155         int i;
7156         uint8_t flags_m = 0xff;
7157         uint8_t flags_v = 0xc;
7158
7159         if (inner) {
7160                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7161                                          inner_headers);
7162                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7163         } else {
7164                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7165                                          outer_headers);
7166                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7167         }
7168         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7169                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7170         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7171                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7172                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7173         }
7174         if (!vxlan_v)
7175                 return;
7176         if (!vxlan_m)
7177                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7178         size = sizeof(vxlan_m->vni);
7179         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7180         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7181         memcpy(vni_m, vxlan_m->vni, size);
7182         for (i = 0; i < size; ++i)
7183                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7184         if (vxlan_m->flags) {
7185                 flags_m = vxlan_m->flags;
7186                 flags_v = vxlan_v->flags;
7187         }
7188         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7189         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7190         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7191                  vxlan_m->protocol);
7192         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7193                  vxlan_v->protocol);
7194 }
7195
7196 /**
7197  * Add Geneve item to matcher and to the value.
7198  *
7199  * @param[in, out] matcher
7200  *   Flow matcher.
7201  * @param[in, out] key
7202  *   Flow matcher value.
7203  * @param[in] item
7204  *   Flow pattern to translate.
7205  * @param[in] inner
7206  *   Item is inner pattern.
7207  */
7208
7209 static void
7210 flow_dv_translate_item_geneve(void *matcher, void *key,
7211                               const struct rte_flow_item *item, int inner)
7212 {
7213         const struct rte_flow_item_geneve *geneve_m = item->mask;
7214         const struct rte_flow_item_geneve *geneve_v = item->spec;
7215         void *headers_m;
7216         void *headers_v;
7217         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7218         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7219         uint16_t dport;
7220         uint16_t gbhdr_m;
7221         uint16_t gbhdr_v;
7222         char *vni_m;
7223         char *vni_v;
7224         size_t size, i;
7225
7226         if (inner) {
7227                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7228                                          inner_headers);
7229                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7230         } else {
7231                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7232                                          outer_headers);
7233                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7234         }
7235         dport = MLX5_UDP_PORT_GENEVE;
7236         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7237                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7238                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7239         }
7240         if (!geneve_v)
7241                 return;
7242         if (!geneve_m)
7243                 geneve_m = &rte_flow_item_geneve_mask;
7244         size = sizeof(geneve_m->vni);
7245         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7246         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7247         memcpy(vni_m, geneve_m->vni, size);
7248         for (i = 0; i < size; ++i)
7249                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7250         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7251                  rte_be_to_cpu_16(geneve_m->protocol));
7252         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7253                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7254         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7255         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7256         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7257                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7258         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7259                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7260         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7261                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7262         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7263                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7264                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7265 }
7266
7267 /**
7268  * Add MPLS item to matcher and to the value.
7269  *
7270  * @param[in, out] matcher
7271  *   Flow matcher.
7272  * @param[in, out] key
7273  *   Flow matcher value.
7274  * @param[in] item
7275  *   Flow pattern to translate.
7276  * @param[in] prev_layer
7277  *   The protocol layer indicated in previous item.
7278  * @param[in] inner
7279  *   Item is inner pattern.
7280  */
7281 static void
7282 flow_dv_translate_item_mpls(void *matcher, void *key,
7283                             const struct rte_flow_item *item,
7284                             uint64_t prev_layer,
7285                             int inner)
7286 {
7287         const uint32_t *in_mpls_m = item->mask;
7288         const uint32_t *in_mpls_v = item->spec;
7289         uint32_t *out_mpls_m = 0;
7290         uint32_t *out_mpls_v = 0;
7291         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7292         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7293         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7294                                      misc_parameters_2);
7295         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7296         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7297         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7298
7299         switch (prev_layer) {
7300         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7301                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7302                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7303                          MLX5_UDP_PORT_MPLS);
7304                 break;
7305         case MLX5_FLOW_LAYER_GRE:
7306                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7307                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7308                          RTE_ETHER_TYPE_MPLS);
7309                 break;
7310         default:
7311                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7312                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7313                          IPPROTO_MPLS);
7314                 break;
7315         }
7316         if (!in_mpls_v)
7317                 return;
7318         if (!in_mpls_m)
7319                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7320         switch (prev_layer) {
7321         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7322                 out_mpls_m =
7323                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7324                                                  outer_first_mpls_over_udp);
7325                 out_mpls_v =
7326                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7327                                                  outer_first_mpls_over_udp);
7328                 break;
7329         case MLX5_FLOW_LAYER_GRE:
7330                 out_mpls_m =
7331                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7332                                                  outer_first_mpls_over_gre);
7333                 out_mpls_v =
7334                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7335                                                  outer_first_mpls_over_gre);
7336                 break;
7337         default:
7338                 /* Inner MPLS not over GRE is not supported. */
7339                 if (!inner) {
7340                         out_mpls_m =
7341                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7342                                                          misc2_m,
7343                                                          outer_first_mpls);
7344                         out_mpls_v =
7345                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7346                                                          misc2_v,
7347                                                          outer_first_mpls);
7348                 }
7349                 break;
7350         }
7351         if (out_mpls_m && out_mpls_v) {
7352                 *out_mpls_m = *in_mpls_m;
7353                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7354         }
7355 }
7356
7357 /**
7358  * Add metadata register item to matcher
7359  *
7360  * @param[in, out] matcher
7361  *   Flow matcher.
7362  * @param[in, out] key
7363  *   Flow matcher value.
7364  * @param[in] reg_type
7365  *   Type of device metadata register
7366  * @param[in] value
7367  *   Register value
7368  * @param[in] mask
7369  *   Register mask
7370  */
7371 static void
7372 flow_dv_match_meta_reg(void *matcher, void *key,
7373                        enum modify_reg reg_type,
7374                        uint32_t data, uint32_t mask)
7375 {
7376         void *misc2_m =
7377                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7378         void *misc2_v =
7379                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7380         uint32_t temp;
7381
7382         data &= mask;
7383         switch (reg_type) {
7384         case REG_A:
7385                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7386                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7387                 break;
7388         case REG_B:
7389                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7390                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7391                 break;
7392         case REG_C_0:
7393                 /*
7394                  * The metadata register C0 field might be divided into
7395                  * source vport index and META item value, we should set
7396                  * this field according to specified mask, not as whole one.
7397                  */
7398                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7399                 temp |= mask;
7400                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7401                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7402                 temp &= ~mask;
7403                 temp |= data;
7404                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7405                 break;
7406         case REG_C_1:
7407                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7408                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7409                 break;
7410         case REG_C_2:
7411                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7412                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7413                 break;
7414         case REG_C_3:
7415                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7416                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7417                 break;
7418         case REG_C_4:
7419                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7420                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7421                 break;
7422         case REG_C_5:
7423                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7424                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7425                 break;
7426         case REG_C_6:
7427                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7428                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7429                 break;
7430         case REG_C_7:
7431                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7432                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7433                 break;
7434         default:
7435                 MLX5_ASSERT(false);
7436                 break;
7437         }
7438 }
7439
7440 /**
7441  * Add MARK item to matcher
7442  *
7443  * @param[in] dev
7444  *   The device to configure through.
7445  * @param[in, out] matcher
7446  *   Flow matcher.
7447  * @param[in, out] key
7448  *   Flow matcher value.
7449  * @param[in] item
7450  *   Flow pattern to translate.
7451  */
7452 static void
7453 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7454                             void *matcher, void *key,
7455                             const struct rte_flow_item *item)
7456 {
7457         struct mlx5_priv *priv = dev->data->dev_private;
7458         const struct rte_flow_item_mark *mark;
7459         uint32_t value;
7460         uint32_t mask;
7461
7462         mark = item->mask ? (const void *)item->mask :
7463                             &rte_flow_item_mark_mask;
7464         mask = mark->id & priv->sh->dv_mark_mask;
7465         mark = (const void *)item->spec;
7466         MLX5_ASSERT(mark);
7467         value = mark->id & priv->sh->dv_mark_mask & mask;
7468         if (mask) {
7469                 enum modify_reg reg;
7470
7471                 /* Get the metadata register index for the mark. */
7472                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7473                 MLX5_ASSERT(reg > 0);
7474                 if (reg == REG_C_0) {
7475                         struct mlx5_priv *priv = dev->data->dev_private;
7476                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7477                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7478
7479                         mask &= msk_c0;
7480                         mask <<= shl_c0;
7481                         value <<= shl_c0;
7482                 }
7483                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7484         }
7485 }
7486
7487 /**
7488  * Add META item to matcher
7489  *
7490  * @param[in] dev
7491  *   The devich to configure through.
7492  * @param[in, out] matcher
7493  *   Flow matcher.
7494  * @param[in, out] key
7495  *   Flow matcher value.
7496  * @param[in] attr
7497  *   Attributes of flow that includes this item.
7498  * @param[in] item
7499  *   Flow pattern to translate.
7500  */
7501 static void
7502 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7503                             void *matcher, void *key,
7504                             const struct rte_flow_attr *attr,
7505                             const struct rte_flow_item *item)
7506 {
7507         const struct rte_flow_item_meta *meta_m;
7508         const struct rte_flow_item_meta *meta_v;
7509
7510         meta_m = (const void *)item->mask;
7511         if (!meta_m)
7512                 meta_m = &rte_flow_item_meta_mask;
7513         meta_v = (const void *)item->spec;
7514         if (meta_v) {
7515                 int reg;
7516                 uint32_t value = meta_v->data;
7517                 uint32_t mask = meta_m->data;
7518
7519                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7520                 if (reg < 0)
7521                         return;
7522                 /*
7523                  * In datapath code there is no endianness
7524                  * coversions for perfromance reasons, all
7525                  * pattern conversions are done in rte_flow.
7526                  */
7527                 value = rte_cpu_to_be_32(value);
7528                 mask = rte_cpu_to_be_32(mask);
7529                 if (reg == REG_C_0) {
7530                         struct mlx5_priv *priv = dev->data->dev_private;
7531                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7532                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7533 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7534                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7535
7536                         value >>= shr_c0;
7537                         mask >>= shr_c0;
7538 #endif
7539                         value <<= shl_c0;
7540                         mask <<= shl_c0;
7541                         MLX5_ASSERT(msk_c0);
7542                         MLX5_ASSERT(!(~msk_c0 & mask));
7543                 }
7544                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7545         }
7546 }
7547
7548 /**
7549  * Add vport metadata Reg C0 item to matcher
7550  *
7551  * @param[in, out] matcher
7552  *   Flow matcher.
7553  * @param[in, out] key
7554  *   Flow matcher value.
7555  * @param[in] reg
7556  *   Flow pattern to translate.
7557  */
7558 static void
7559 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7560                                   uint32_t value, uint32_t mask)
7561 {
7562         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7563 }
7564
7565 /**
7566  * Add tag item to matcher
7567  *
7568  * @param[in] dev
7569  *   The devich to configure through.
7570  * @param[in, out] matcher
7571  *   Flow matcher.
7572  * @param[in, out] key
7573  *   Flow matcher value.
7574  * @param[in] item
7575  *   Flow pattern to translate.
7576  */
7577 static void
7578 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7579                                 void *matcher, void *key,
7580                                 const struct rte_flow_item *item)
7581 {
7582         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7583         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7584         uint32_t mask, value;
7585
7586         MLX5_ASSERT(tag_v);
7587         value = tag_v->data;
7588         mask = tag_m ? tag_m->data : UINT32_MAX;
7589         if (tag_v->id == REG_C_0) {
7590                 struct mlx5_priv *priv = dev->data->dev_private;
7591                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7592                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7593
7594                 mask &= msk_c0;
7595                 mask <<= shl_c0;
7596                 value <<= shl_c0;
7597         }
7598         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7599 }
7600
7601 /**
7602  * Add TAG item to matcher
7603  *
7604  * @param[in] dev
7605  *   The devich to configure through.
7606  * @param[in, out] matcher
7607  *   Flow matcher.
7608  * @param[in, out] key
7609  *   Flow matcher value.
7610  * @param[in] item
7611  *   Flow pattern to translate.
7612  */
7613 static void
7614 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7615                            void *matcher, void *key,
7616                            const struct rte_flow_item *item)
7617 {
7618         const struct rte_flow_item_tag *tag_v = item->spec;
7619         const struct rte_flow_item_tag *tag_m = item->mask;
7620         enum modify_reg reg;
7621
7622         MLX5_ASSERT(tag_v);
7623         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7624         /* Get the metadata register index for the tag. */
7625         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7626         MLX5_ASSERT(reg > 0);
7627         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7628 }
7629
7630 /**
7631  * Add source vport match to the specified matcher.
7632  *
7633  * @param[in, out] matcher
7634  *   Flow matcher.
7635  * @param[in, out] key
7636  *   Flow matcher value.
7637  * @param[in] port
7638  *   Source vport value to match
7639  * @param[in] mask
7640  *   Mask
7641  */
7642 static void
7643 flow_dv_translate_item_source_vport(void *matcher, void *key,
7644                                     int16_t port, uint16_t mask)
7645 {
7646         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7647         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7648
7649         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7650         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7651 }
7652
7653 /**
7654  * Translate port-id item to eswitch match on  port-id.
7655  *
7656  * @param[in] dev
7657  *   The devich to configure through.
7658  * @param[in, out] matcher
7659  *   Flow matcher.
7660  * @param[in, out] key
7661  *   Flow matcher value.
7662  * @param[in] item
7663  *   Flow pattern to translate.
7664  *
7665  * @return
7666  *   0 on success, a negative errno value otherwise.
7667  */
7668 static int
7669 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7670                                void *key, const struct rte_flow_item *item)
7671 {
7672         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7673         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7674         struct mlx5_priv *priv;
7675         uint16_t mask, id;
7676
7677         mask = pid_m ? pid_m->id : 0xffff;
7678         id = pid_v ? pid_v->id : dev->data->port_id;
7679         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7680         if (!priv)
7681                 return -rte_errno;
7682         /* Translate to vport field or to metadata, depending on mode. */
7683         if (priv->vport_meta_mask)
7684                 flow_dv_translate_item_meta_vport(matcher, key,
7685                                                   priv->vport_meta_tag,
7686                                                   priv->vport_meta_mask);
7687         else
7688                 flow_dv_translate_item_source_vport(matcher, key,
7689                                                     priv->vport_id, mask);
7690         return 0;
7691 }
7692
7693 /**
7694  * Add ICMP6 item to matcher and to the value.
7695  *
7696  * @param[in, out] matcher
7697  *   Flow matcher.
7698  * @param[in, out] key
7699  *   Flow matcher value.
7700  * @param[in] item
7701  *   Flow pattern to translate.
7702  * @param[in] inner
7703  *   Item is inner pattern.
7704  */
7705 static void
7706 flow_dv_translate_item_icmp6(void *matcher, void *key,
7707                               const struct rte_flow_item *item,
7708                               int inner)
7709 {
7710         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7711         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7712         void *headers_m;
7713         void *headers_v;
7714         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7715                                      misc_parameters_3);
7716         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7717         if (inner) {
7718                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7719                                          inner_headers);
7720                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7721         } else {
7722                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7723                                          outer_headers);
7724                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7725         }
7726         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7727         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7728         if (!icmp6_v)
7729                 return;
7730         if (!icmp6_m)
7731                 icmp6_m = &rte_flow_item_icmp6_mask;
7732         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7733         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7734                  icmp6_v->type & icmp6_m->type);
7735         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7736         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7737                  icmp6_v->code & icmp6_m->code);
7738 }
7739
7740 /**
7741  * Add ICMP item to matcher and to the value.
7742  *
7743  * @param[in, out] matcher
7744  *   Flow matcher.
7745  * @param[in, out] key
7746  *   Flow matcher value.
7747  * @param[in] item
7748  *   Flow pattern to translate.
7749  * @param[in] inner
7750  *   Item is inner pattern.
7751  */
7752 static void
7753 flow_dv_translate_item_icmp(void *matcher, void *key,
7754                             const struct rte_flow_item *item,
7755                             int inner)
7756 {
7757         const struct rte_flow_item_icmp *icmp_m = item->mask;
7758         const struct rte_flow_item_icmp *icmp_v = item->spec;
7759         uint32_t icmp_header_data_m = 0;
7760         uint32_t icmp_header_data_v = 0;
7761         void *headers_m;
7762         void *headers_v;
7763         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7764                                      misc_parameters_3);
7765         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7766         if (inner) {
7767                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7768                                          inner_headers);
7769                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7770         } else {
7771                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7772                                          outer_headers);
7773                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7774         }
7775         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7776         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7777         if (!icmp_v)
7778                 return;
7779         if (!icmp_m)
7780                 icmp_m = &rte_flow_item_icmp_mask;
7781         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7782                  icmp_m->hdr.icmp_type);
7783         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7784                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7785         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7786                  icmp_m->hdr.icmp_code);
7787         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7788                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7789         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7790         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7791         if (icmp_header_data_m) {
7792                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7793                 icmp_header_data_v |=
7794                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7795                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7796                          icmp_header_data_m);
7797                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7798                          icmp_header_data_v & icmp_header_data_m);
7799         }
7800 }
7801
7802 /**
7803  * Add GTP item to matcher and to the value.
7804  *
7805  * @param[in, out] matcher
7806  *   Flow matcher.
7807  * @param[in, out] key
7808  *   Flow matcher value.
7809  * @param[in] item
7810  *   Flow pattern to translate.
7811  * @param[in] inner
7812  *   Item is inner pattern.
7813  */
7814 static void
7815 flow_dv_translate_item_gtp(void *matcher, void *key,
7816                            const struct rte_flow_item *item, int inner)
7817 {
7818         const struct rte_flow_item_gtp *gtp_m = item->mask;
7819         const struct rte_flow_item_gtp *gtp_v = item->spec;
7820         void *headers_m;
7821         void *headers_v;
7822         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7823                                      misc_parameters_3);
7824         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7825         uint16_t dport = RTE_GTPU_UDP_PORT;
7826
7827         if (inner) {
7828                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7829                                          inner_headers);
7830                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7831         } else {
7832                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7833                                          outer_headers);
7834                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7835         }
7836         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7837                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7838                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7839         }
7840         if (!gtp_v)
7841                 return;
7842         if (!gtp_m)
7843                 gtp_m = &rte_flow_item_gtp_mask;
7844         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7845                  gtp_m->v_pt_rsv_flags);
7846         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7847                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7848         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7849         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7850                  gtp_v->msg_type & gtp_m->msg_type);
7851         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7852                  rte_be_to_cpu_32(gtp_m->teid));
7853         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7854                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7855 }
7856
7857 /**
7858  * Add eCPRI item to matcher and to the value.
7859  *
7860  * @param[in] dev
7861  *   The devich to configure through.
7862  * @param[in, out] matcher
7863  *   Flow matcher.
7864  * @param[in, out] key
7865  *   Flow matcher value.
7866  * @param[in] item
7867  *   Flow pattern to translate.
7868  * @param[in] samples
7869  *   Sample IDs to be used in the matching.
7870  */
7871 static void
7872 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7873                              void *key, const struct rte_flow_item *item)
7874 {
7875         struct mlx5_priv *priv = dev->data->dev_private;
7876         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7877         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7878         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7879                                      misc_parameters_4);
7880         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7881         uint32_t *samples;
7882         void *dw_m;
7883         void *dw_v;
7884
7885         if (!ecpri_v)
7886                 return;
7887         if (!ecpri_m)
7888                 ecpri_m = &rte_flow_item_ecpri_mask;
7889         /*
7890          * Maximal four DW samples are supported in a single matching now.
7891          * Two are used now for a eCPRI matching:
7892          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7893          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7894          *    if any.
7895          */
7896         if (!ecpri_m->hdr.common.u32)
7897                 return;
7898         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7899         /* Need to take the whole DW as the mask to fill the entry. */
7900         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7901                             prog_sample_field_value_0);
7902         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7903                             prog_sample_field_value_0);
7904         /* Already big endian (network order) in the header. */
7905         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7906         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7907         /* Sample#0, used for matching type, offset 0. */
7908         MLX5_SET(fte_match_set_misc4, misc4_m,
7909                  prog_sample_field_id_0, samples[0]);
7910         /* It makes no sense to set the sample ID in the mask field. */
7911         MLX5_SET(fte_match_set_misc4, misc4_v,
7912                  prog_sample_field_id_0, samples[0]);
7913         /*
7914          * Checking if message body part needs to be matched.
7915          * Some wildcard rules only matching type field should be supported.
7916          */
7917         if (ecpri_m->hdr.dummy[0]) {
7918                 switch (ecpri_v->hdr.common.type) {
7919                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7920                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7921                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7922                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7923                                             prog_sample_field_value_1);
7924                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7925                                             prog_sample_field_value_1);
7926                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7927                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7928                         /* Sample#1, to match message body, offset 4. */
7929                         MLX5_SET(fte_match_set_misc4, misc4_m,
7930                                  prog_sample_field_id_1, samples[1]);
7931                         MLX5_SET(fte_match_set_misc4, misc4_v,
7932                                  prog_sample_field_id_1, samples[1]);
7933                         break;
7934                 default:
7935                         /* Others, do not match any sample ID. */
7936                         break;
7937                 }
7938         }
7939 }
7940
7941 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7942
7943 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7944         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7945                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7946
7947 /**
7948  * Calculate flow matcher enable bitmap.
7949  *
7950  * @param match_criteria
7951  *   Pointer to flow matcher criteria.
7952  *
7953  * @return
7954  *   Bitmap of enabled fields.
7955  */
7956 static uint8_t
7957 flow_dv_matcher_enable(uint32_t *match_criteria)
7958 {
7959         uint8_t match_criteria_enable;
7960
7961         match_criteria_enable =
7962                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7963                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7964         match_criteria_enable |=
7965                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7966                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7967         match_criteria_enable |=
7968                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7969                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7970         match_criteria_enable |=
7971                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7972                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7973         match_criteria_enable |=
7974                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7975                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7976         match_criteria_enable |=
7977                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7978                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7979         return match_criteria_enable;
7980 }
7981
7982
7983 /**
7984  * Get a flow table.
7985  *
7986  * @param[in, out] dev
7987  *   Pointer to rte_eth_dev structure.
7988  * @param[in] table_id
7989  *   Table id to use.
7990  * @param[in] egress
7991  *   Direction of the table.
7992  * @param[in] transfer
7993  *   E-Switch or NIC flow.
7994  * @param[out] error
7995  *   pointer to error structure.
7996  *
7997  * @return
7998  *   Returns tables resource based on the index, NULL in case of failed.
7999  */
8000 static struct mlx5_flow_tbl_resource *
8001 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8002                          uint32_t table_id, uint8_t egress,
8003                          uint8_t transfer,
8004                          bool external,
8005                          const struct mlx5_flow_tunnel *tunnel,
8006                          uint32_t group_id,
8007                          struct rte_flow_error *error)
8008 {
8009         struct mlx5_priv *priv = dev->data->dev_private;
8010         struct mlx5_dev_ctx_shared *sh = priv->sh;
8011         struct mlx5_flow_tbl_resource *tbl;
8012         union mlx5_flow_tbl_key table_key = {
8013                 {
8014                         .table_id = table_id,
8015                         .reserved = 0,
8016                         .domain = !!transfer,
8017                         .direction = !!egress,
8018                 }
8019         };
8020         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
8021                                                          table_key.v64);
8022         struct mlx5_flow_tbl_data_entry *tbl_data;
8023         uint32_t idx = 0;
8024         int ret;
8025         void *domain;
8026
8027         if (pos) {
8028                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
8029                                         entry);
8030                 tbl = &tbl_data->tbl;
8031                 __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED);
8032                 return tbl;
8033         }
8034         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
8035         if (!tbl_data) {
8036                 rte_flow_error_set(error, ENOMEM,
8037                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8038                                    NULL,
8039                                    "cannot allocate flow table data entry");
8040                 return NULL;
8041         }
8042         tbl_data->idx = idx;
8043         tbl_data->tunnel = tunnel;
8044         tbl_data->group_id = group_id;
8045         tbl_data->external = external;
8046         tbl = &tbl_data->tbl;
8047         pos = &tbl_data->entry;
8048         if (transfer)
8049                 domain = sh->fdb_domain;
8050         else if (egress)
8051                 domain = sh->tx_domain;
8052         else
8053                 domain = sh->rx_domain;
8054         ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
8055         if (ret) {
8056                 rte_flow_error_set(error, ENOMEM,
8057                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8058                                    NULL, "cannot create flow table object");
8059                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8060                 return NULL;
8061         }
8062         /*
8063          * No multi-threads now, but still better to initialize the reference
8064          * count before insert it into the hash list.
8065          */
8066         __atomic_store_n(&tbl->refcnt, 0, __ATOMIC_RELAXED);
8067         /* Jump action reference count is initialized here. */
8068         __atomic_store_n(&tbl_data->jump.refcnt, 0, __ATOMIC_RELAXED);
8069         pos->key = table_key.v64;
8070         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
8071         if (ret < 0) {
8072                 rte_flow_error_set(error, -ret,
8073                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8074                                    "cannot insert flow table data entry");
8075                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
8076                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8077         }
8078         __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED);
8079         return tbl;
8080 }
8081
8082 /**
8083  * Release a flow table.
8084  *
8085  * @param[in] dev
8086  *   Pointer to rte_eth_dev structure.
8087  * @param[in] tbl
8088  *   Table resource to be released.
8089  *
8090  * @return
8091  *   Returns 0 if table was released, else return 1;
8092  */
8093 static int
8094 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
8095                              struct mlx5_flow_tbl_resource *tbl)
8096 {
8097         struct mlx5_priv *priv = dev->data->dev_private;
8098         struct mlx5_dev_ctx_shared *sh = priv->sh;
8099         struct mlx5_flow_tbl_data_entry *tbl_data =
8100                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8101
8102         if (!tbl)
8103                 return 0;
8104         if (__atomic_sub_fetch(&tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) {
8105                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
8106
8107                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
8108                 tbl->obj = NULL;
8109                 if (is_tunnel_offload_active(dev) && tbl_data->external) {
8110                         struct mlx5_hlist_entry *he;
8111                         struct mlx5_hlist *tunnel_grp_hash;
8112                         struct mlx5_flow_tunnel_hub *thub =
8113                                                         mlx5_tunnel_hub(dev);
8114                         union tunnel_tbl_key tunnel_key = {
8115                                 .tunnel_id = tbl_data->tunnel ?
8116                                                 tbl_data->tunnel->tunnel_id : 0,
8117                                 .group = tbl_data->group_id
8118                         };
8119                         union mlx5_flow_tbl_key table_key = {
8120                                 .v64 = pos->key
8121                         };
8122                         uint32_t table_id = table_key.table_id;
8123
8124                         tunnel_grp_hash = tbl_data->tunnel ?
8125                                                 tbl_data->tunnel->groups :
8126                                                 thub->groups;
8127                         he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val);
8128                         if (he) {
8129                                 struct tunnel_tbl_entry *tte;
8130                                 tte = container_of(he, typeof(*tte), hash);
8131                                 MLX5_ASSERT(tte->flow_table == table_id);
8132                                 mlx5_hlist_remove(tunnel_grp_hash, he);
8133                                 mlx5_free(tte);
8134                         }
8135                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8136                                         tunnel_flow_tbl_to_id(table_id));
8137                         DRV_LOG(DEBUG,
8138                                 "port %u release table_id %#x tunnel %u group %u",
8139                                 dev->data->port_id, table_id,
8140                                 tbl_data->tunnel ?
8141                                 tbl_data->tunnel->tunnel_id : 0,
8142                                 tbl_data->group_id);
8143                 }
8144                 /* remove the entry from the hash list and free memory. */
8145                 mlx5_hlist_remove(sh->flow_tbls, pos);
8146                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
8147                                 tbl_data->idx);
8148                 return 0;
8149         }
8150         return 1;
8151 }
8152
8153 /**
8154  * Register the flow matcher.
8155  *
8156  * @param[in, out] dev
8157  *   Pointer to rte_eth_dev structure.
8158  * @param[in, out] matcher
8159  *   Pointer to flow matcher.
8160  * @param[in, out] key
8161  *   Pointer to flow table key.
8162  * @parm[in, out] dev_flow
8163  *   Pointer to the dev_flow.
8164  * @param[out] error
8165  *   pointer to error structure.
8166  *
8167  * @return
8168  *   0 on success otherwise -errno and errno is set.
8169  */
8170 static int
8171 flow_dv_matcher_register(struct rte_eth_dev *dev,
8172                          struct mlx5_flow_dv_matcher *matcher,
8173                          union mlx5_flow_tbl_key *key,
8174                          struct mlx5_flow *dev_flow,
8175                          struct rte_flow_error *error)
8176 {
8177         struct mlx5_priv *priv = dev->data->dev_private;
8178         struct mlx5_dev_ctx_shared *sh = priv->sh;
8179         struct mlx5_flow_dv_matcher *cache_matcher;
8180         struct mlx5dv_flow_matcher_attr dv_attr = {
8181                 .type = IBV_FLOW_ATTR_NORMAL,
8182                 .match_mask = (void *)&matcher->mask,
8183         };
8184         struct mlx5_flow_tbl_resource *tbl;
8185         struct mlx5_flow_tbl_data_entry *tbl_data;
8186         int ret;
8187
8188         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8189                                        key->domain, false, NULL, 0, error);
8190         if (!tbl)
8191                 return -rte_errno;      /* No need to refill the error info */
8192         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8193         /* Lookup from cache. */
8194         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
8195                 if (matcher->crc == cache_matcher->crc &&
8196                     matcher->priority == cache_matcher->priority &&
8197                     !memcmp((const void *)matcher->mask.buf,
8198                             (const void *)cache_matcher->mask.buf,
8199                             cache_matcher->mask.size)) {
8200                         DRV_LOG(DEBUG,
8201                                 "%s group %u priority %hd use %s "
8202                                 "matcher %p: refcnt %d++",
8203                                 key->domain ? "FDB" : "NIC", key->table_id,
8204                                 cache_matcher->priority,
8205                                 key->direction ? "tx" : "rx",
8206                                 (void *)cache_matcher,
8207                                 __atomic_load_n(&cache_matcher->refcnt,
8208                                                 __ATOMIC_RELAXED));
8209                         __atomic_fetch_add(&cache_matcher->refcnt, 1,
8210                                            __ATOMIC_RELAXED);
8211                         dev_flow->handle->dvh.matcher = cache_matcher;
8212                         /* old matcher should not make the table ref++. */
8213                         flow_dv_tbl_resource_release(dev, tbl);
8214                         return 0;
8215                 }
8216         }
8217         /* Register new matcher. */
8218         cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
8219                                     SOCKET_ID_ANY);
8220         if (!cache_matcher) {
8221                 flow_dv_tbl_resource_release(dev, tbl);
8222                 return rte_flow_error_set(error, ENOMEM,
8223                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8224                                           "cannot allocate matcher memory");
8225         }
8226         *cache_matcher = *matcher;
8227         dv_attr.match_criteria_enable =
8228                 flow_dv_matcher_enable(cache_matcher->mask.buf);
8229         dv_attr.priority = matcher->priority;
8230         if (key->direction)
8231                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8232         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
8233                                                &cache_matcher->matcher_object);
8234         if (ret) {
8235                 mlx5_free(cache_matcher);
8236 #ifdef HAVE_MLX5DV_DR
8237                 flow_dv_tbl_resource_release(dev, tbl);
8238 #endif
8239                 return rte_flow_error_set(error, ENOMEM,
8240                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8241                                           NULL, "cannot create matcher");
8242         }
8243         /* Save the table information */
8244         cache_matcher->tbl = tbl;
8245         /* only matcher ref++, table ref++ already done above in get API. */
8246         __atomic_store_n(&cache_matcher->refcnt, 1, __ATOMIC_RELAXED);
8247         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
8248         dev_flow->handle->dvh.matcher = cache_matcher;
8249         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
8250                 key->domain ? "FDB" : "NIC", key->table_id,
8251                 cache_matcher->priority,
8252                 key->direction ? "tx" : "rx", (void *)cache_matcher,
8253                 __atomic_load_n(&cache_matcher->refcnt, __ATOMIC_RELAXED));
8254         return 0;
8255 }
8256
8257 /**
8258  * Find existing tag resource or create and register a new one.
8259  *
8260  * @param dev[in, out]
8261  *   Pointer to rte_eth_dev structure.
8262  * @param[in, out] tag_be24
8263  *   Tag value in big endian then R-shift 8.
8264  * @parm[in, out] dev_flow
8265  *   Pointer to the dev_flow.
8266  * @param[out] error
8267  *   pointer to error structure.
8268  *
8269  * @return
8270  *   0 on success otherwise -errno and errno is set.
8271  */
8272 static int
8273 flow_dv_tag_resource_register
8274                         (struct rte_eth_dev *dev,
8275                          uint32_t tag_be24,
8276                          struct mlx5_flow *dev_flow,
8277                          struct rte_flow_error *error)
8278 {
8279         struct mlx5_priv *priv = dev->data->dev_private;
8280         struct mlx5_dev_ctx_shared *sh = priv->sh;
8281         struct mlx5_flow_dv_tag_resource *cache_resource;
8282         struct mlx5_hlist_entry *entry;
8283         int ret;
8284
8285         /* Lookup a matching resource from cache. */
8286         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
8287         if (entry) {
8288                 cache_resource = container_of
8289                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8290                 __atomic_fetch_add(&cache_resource->refcnt, 1,
8291                                    __ATOMIC_RELAXED);
8292                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8293                 dev_flow->dv.tag_resource = cache_resource;
8294                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
8295                         (void *)cache_resource,
8296                         __atomic_load_n(&cache_resource->refcnt,
8297                                         __ATOMIC_RELAXED));
8298                 return 0;
8299         }
8300         /* Register new resource. */
8301         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
8302                                        &dev_flow->handle->dvh.rix_tag);
8303         if (!cache_resource)
8304                 return rte_flow_error_set(error, ENOMEM,
8305                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8306                                           "cannot allocate resource memory");
8307         cache_resource->entry.key = (uint64_t)tag_be24;
8308         ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
8309                                                   &cache_resource->action);
8310         if (ret) {
8311                 mlx5_free(cache_resource);
8312                 return rte_flow_error_set(error, ENOMEM,
8313                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8314                                           NULL, "cannot create action");
8315         }
8316         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8317         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
8318                 mlx5_flow_os_destroy_flow_action(cache_resource->action);
8319                 mlx5_free(cache_resource);
8320                 return rte_flow_error_set(error, EEXIST,
8321                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8322                                           NULL, "cannot insert tag");
8323         }
8324         dev_flow->dv.tag_resource = cache_resource;
8325         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
8326                 (void *)cache_resource,
8327                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8328         return 0;
8329 }
8330
8331 /**
8332  * Release the tag.
8333  *
8334  * @param dev
8335  *   Pointer to Ethernet device.
8336  * @param tag_idx
8337  *   Tag index.
8338  *
8339  * @return
8340  *   1 while a reference on it exists, 0 when freed.
8341  */
8342 static int
8343 flow_dv_tag_release(struct rte_eth_dev *dev,
8344                     uint32_t tag_idx)
8345 {
8346         struct mlx5_priv *priv = dev->data->dev_private;
8347         struct mlx5_dev_ctx_shared *sh = priv->sh;
8348         struct mlx5_flow_dv_tag_resource *tag;
8349
8350         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8351         if (!tag)
8352                 return 0;
8353         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8354                 dev->data->port_id, (void *)tag,
8355                 __atomic_load_n(&tag->refcnt, __ATOMIC_RELAXED));
8356         if (__atomic_sub_fetch(&tag->refcnt, 1, __ATOMIC_RELAXED) == 0) {
8357                 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8358                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
8359                 DRV_LOG(DEBUG, "port %u tag %p: removed",
8360                         dev->data->port_id, (void *)tag);
8361                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8362                 return 0;
8363         }
8364         return 1;
8365 }
8366
8367 /**
8368  * Translate port ID action to vport.
8369  *
8370  * @param[in] dev
8371  *   Pointer to rte_eth_dev structure.
8372  * @param[in] action
8373  *   Pointer to the port ID action.
8374  * @param[out] dst_port_id
8375  *   The target port ID.
8376  * @param[out] error
8377  *   Pointer to the error structure.
8378  *
8379  * @return
8380  *   0 on success, a negative errno value otherwise and rte_errno is set.
8381  */
8382 static int
8383 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8384                                  const struct rte_flow_action *action,
8385                                  uint32_t *dst_port_id,
8386                                  struct rte_flow_error *error)
8387 {
8388         uint32_t port;
8389         struct mlx5_priv *priv;
8390         const struct rte_flow_action_port_id *conf =
8391                         (const struct rte_flow_action_port_id *)action->conf;
8392
8393         port = conf->original ? dev->data->port_id : conf->id;
8394         priv = mlx5_port_to_eswitch_info(port, false);
8395         if (!priv)
8396                 return rte_flow_error_set(error, -rte_errno,
8397                                           RTE_FLOW_ERROR_TYPE_ACTION,
8398                                           NULL,
8399                                           "No eswitch info was found for port");
8400 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8401         /*
8402          * This parameter is transferred to
8403          * mlx5dv_dr_action_create_dest_ib_port().
8404          */
8405         *dst_port_id = priv->dev_port;
8406 #else
8407         /*
8408          * Legacy mode, no LAG configurations is supported.
8409          * This parameter is transferred to
8410          * mlx5dv_dr_action_create_dest_vport().
8411          */
8412         *dst_port_id = priv->vport_id;
8413 #endif
8414         return 0;
8415 }
8416
8417 /**
8418  * Create a counter with aging configuration.
8419  *
8420  * @param[in] dev
8421  *   Pointer to rte_eth_dev structure.
8422  * @param[out] count
8423  *   Pointer to the counter action configuration.
8424  * @param[in] age
8425  *   Pointer to the aging action configuration.
8426  *
8427  * @return
8428  *   Index to flow counter on success, 0 otherwise.
8429  */
8430 static uint32_t
8431 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8432                                 struct mlx5_flow *dev_flow,
8433                                 const struct rte_flow_action_count *count,
8434                                 const struct rte_flow_action_age *age)
8435 {
8436         uint32_t counter;
8437         struct mlx5_age_param *age_param;
8438
8439         if (count && count->shared)
8440                 counter = flow_dv_counter_get_shared(dev, count->id);
8441         else
8442                 counter = flow_dv_counter_alloc(dev, !!age);
8443         if (!counter || age == NULL)
8444                 return counter;
8445         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8446         age_param->context = age->context ? age->context :
8447                 (void *)(uintptr_t)(dev_flow->flow_idx);
8448         age_param->timeout = age->timeout;
8449         age_param->port_id = dev->data->port_id;
8450         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8451         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8452         return counter;
8453 }
8454 /**
8455  * Add Tx queue matcher
8456  *
8457  * @param[in] dev
8458  *   Pointer to the dev struct.
8459  * @param[in, out] matcher
8460  *   Flow matcher.
8461  * @param[in, out] key
8462  *   Flow matcher value.
8463  * @param[in] item
8464  *   Flow pattern to translate.
8465  * @param[in] inner
8466  *   Item is inner pattern.
8467  */
8468 static void
8469 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8470                                 void *matcher, void *key,
8471                                 const struct rte_flow_item *item)
8472 {
8473         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8474         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8475         void *misc_m =
8476                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8477         void *misc_v =
8478                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8479         struct mlx5_txq_ctrl *txq;
8480         uint32_t queue;
8481
8482
8483         queue_m = (const void *)item->mask;
8484         if (!queue_m)
8485                 return;
8486         queue_v = (const void *)item->spec;
8487         if (!queue_v)
8488                 return;
8489         txq = mlx5_txq_get(dev, queue_v->queue);
8490         if (!txq)
8491                 return;
8492         queue = txq->obj->sq->id;
8493         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8494         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8495                  queue & queue_m->queue);
8496         mlx5_txq_release(dev, queue_v->queue);
8497 }
8498
8499 /**
8500  * Set the hash fields according to the @p flow information.
8501  *
8502  * @param[in] dev_flow
8503  *   Pointer to the mlx5_flow.
8504  * @param[in] rss_desc
8505  *   Pointer to the mlx5_flow_rss_desc.
8506  */
8507 static void
8508 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8509                        struct mlx5_flow_rss_desc *rss_desc)
8510 {
8511         uint64_t items = dev_flow->handle->layers;
8512         int rss_inner = 0;
8513         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8514
8515         dev_flow->hash_fields = 0;
8516 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8517         if (rss_desc->level >= 2) {
8518                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8519                 rss_inner = 1;
8520         }
8521 #endif
8522         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8523             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8524                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8525                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8526                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8527                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8528                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8529                         else
8530                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8531                 }
8532         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8533                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8534                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8535                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8536                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8537                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8538                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8539                         else
8540                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8541                 }
8542         }
8543         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8544             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8545                 if (rss_types & ETH_RSS_UDP) {
8546                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8547                                 dev_flow->hash_fields |=
8548                                                 IBV_RX_HASH_SRC_PORT_UDP;
8549                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8550                                 dev_flow->hash_fields |=
8551                                                 IBV_RX_HASH_DST_PORT_UDP;
8552                         else
8553                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8554                 }
8555         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8556                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8557                 if (rss_types & ETH_RSS_TCP) {
8558                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8559                                 dev_flow->hash_fields |=
8560                                                 IBV_RX_HASH_SRC_PORT_TCP;
8561                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8562                                 dev_flow->hash_fields |=
8563                                                 IBV_RX_HASH_DST_PORT_TCP;
8564                         else
8565                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8566                 }
8567         }
8568 }
8569
8570 /**
8571  * Create an Rx Hash queue.
8572  *
8573  * @param dev
8574  *   Pointer to Ethernet device.
8575  * @param[in] dev_flow
8576  *   Pointer to the mlx5_flow.
8577  * @param[in] rss_desc
8578  *   Pointer to the mlx5_flow_rss_desc.
8579  * @param[out] hrxq_idx
8580  *   Hash Rx queue index.
8581  *
8582  * @return
8583  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8584  */
8585 static struct mlx5_hrxq *
8586 flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
8587                         struct mlx5_flow *dev_flow,
8588                         struct mlx5_flow_rss_desc *rss_desc,
8589                         uint32_t *hrxq_idx)
8590 {
8591         struct mlx5_priv *priv = dev->data->dev_private;
8592         struct mlx5_flow_handle *dh = dev_flow->handle;
8593         struct mlx5_hrxq *hrxq;
8594
8595         MLX5_ASSERT(rss_desc->queue_num);
8596         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
8597                                   dev_flow->hash_fields,
8598                                   rss_desc->queue, rss_desc->queue_num);
8599         if (!*hrxq_idx) {
8600                 *hrxq_idx = mlx5_hrxq_new
8601                                 (dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
8602                                  dev_flow->hash_fields,
8603                                  rss_desc->queue, rss_desc->queue_num,
8604                                  !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL),
8605                                  false);
8606                 if (!*hrxq_idx)
8607                         return NULL;
8608         }
8609         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8610                               *hrxq_idx);
8611         return hrxq;
8612 }
8613
8614 /**
8615  * Find existing sample resource or create and register a new one.
8616  *
8617  * @param[in, out] dev
8618  *   Pointer to rte_eth_dev structure.
8619  * @param[in] attr
8620  *   Attributes of flow that includes this item.
8621  * @param[in] resource
8622  *   Pointer to sample resource.
8623  * @parm[in, out] dev_flow
8624  *   Pointer to the dev_flow.
8625  * @param[in, out] sample_dv_actions
8626  *   Pointer to sample actions list.
8627  * @param[out] error
8628  *   pointer to error structure.
8629  *
8630  * @return
8631  *   0 on success otherwise -errno and errno is set.
8632  */
8633 static int
8634 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8635                          const struct rte_flow_attr *attr,
8636                          struct mlx5_flow_dv_sample_resource *resource,
8637                          struct mlx5_flow *dev_flow,
8638                          void **sample_dv_actions,
8639                          struct rte_flow_error *error)
8640 {
8641         struct mlx5_flow_dv_sample_resource *cache_resource;
8642         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8643         struct mlx5_priv *priv = dev->data->dev_private;
8644         struct mlx5_dev_ctx_shared *sh = priv->sh;
8645         struct mlx5_flow_tbl_resource *tbl;
8646         uint32_t idx = 0;
8647         const uint32_t next_ft_step = 1;
8648         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8649
8650         /* Lookup a matching resource from cache. */
8651         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
8652                       idx, cache_resource, next) {
8653                 if (resource->ratio == cache_resource->ratio &&
8654                     resource->ft_type == cache_resource->ft_type &&
8655                     resource->ft_id == cache_resource->ft_id &&
8656                     resource->set_action == cache_resource->set_action &&
8657                     !memcmp((void *)&resource->sample_act,
8658                             (void *)&cache_resource->sample_act,
8659                             sizeof(struct mlx5_flow_sub_actions_list))) {
8660                         DRV_LOG(DEBUG, "sample resource %p: refcnt %d++",
8661                                 (void *)cache_resource,
8662                                 __atomic_load_n(&cache_resource->refcnt,
8663                                                 __ATOMIC_RELAXED));
8664                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8665                                            __ATOMIC_RELAXED);
8666                         dev_flow->handle->dvh.rix_sample = idx;
8667                         dev_flow->dv.sample_res = cache_resource;
8668                         return 0;
8669                 }
8670         }
8671         /* Register new sample resource. */
8672         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE],
8673                                        &dev_flow->handle->dvh.rix_sample);
8674         if (!cache_resource)
8675                 return rte_flow_error_set(error, ENOMEM,
8676                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8677                                           NULL,
8678                                           "cannot allocate resource memory");
8679         *cache_resource = *resource;
8680         /* Create normal path table level */
8681         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8682                                         attr->egress, attr->transfer,
8683                                         dev_flow->external, NULL, 0, error);
8684         if (!tbl) {
8685                 rte_flow_error_set(error, ENOMEM,
8686                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8687                                           NULL,
8688                                           "fail to create normal path table "
8689                                           "for sample");
8690                 goto error;
8691         }
8692         cache_resource->normal_path_tbl = tbl;
8693         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8694                 cache_resource->default_miss =
8695                                 mlx5_glue->dr_create_flow_action_default_miss();
8696                 if (!cache_resource->default_miss) {
8697                         rte_flow_error_set(error, ENOMEM,
8698                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8699                                                 NULL,
8700                                                 "cannot create default miss "
8701                                                 "action");
8702                         goto error;
8703                 }
8704                 sample_dv_actions[resource->sample_act.actions_num++] =
8705                                                 cache_resource->default_miss;
8706         }
8707         /* Create a DR sample action */
8708         sampler_attr.sample_ratio = cache_resource->ratio;
8709         sampler_attr.default_next_table = tbl->obj;
8710         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8711         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8712                                                         &sample_dv_actions[0];
8713         sampler_attr.action = cache_resource->set_action;
8714         cache_resource->verbs_action =
8715                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8716         if (!cache_resource->verbs_action) {
8717                 rte_flow_error_set(error, ENOMEM,
8718                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8719                                         NULL, "cannot create sample action");
8720                 goto error;
8721         }
8722         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8723         ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list,
8724                      dev_flow->handle->dvh.rix_sample, cache_resource,
8725                      next);
8726         dev_flow->dv.sample_res = cache_resource;
8727         DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++",
8728                 (void *)cache_resource,
8729                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8730         return 0;
8731 error:
8732         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8733                 if (cache_resource->default_miss)
8734                         claim_zero(mlx5_glue->destroy_flow_action
8735                                 (cache_resource->default_miss));
8736         } else {
8737                 if (cache_resource->sample_idx.rix_hrxq &&
8738                     !mlx5_hrxq_release(dev,
8739                                 cache_resource->sample_idx.rix_hrxq))
8740                         cache_resource->sample_idx.rix_hrxq = 0;
8741                 if (cache_resource->sample_idx.rix_tag &&
8742                     !flow_dv_tag_release(dev,
8743                                 cache_resource->sample_idx.rix_tag))
8744                         cache_resource->sample_idx.rix_tag = 0;
8745                 if (cache_resource->sample_idx.cnt) {
8746                         flow_dv_counter_release(dev,
8747                                 cache_resource->sample_idx.cnt);
8748                         cache_resource->sample_idx.cnt = 0;
8749                 }
8750         }
8751         if (cache_resource->normal_path_tbl)
8752                 flow_dv_tbl_resource_release(dev,
8753                                 cache_resource->normal_path_tbl);
8754         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],
8755                                 dev_flow->handle->dvh.rix_sample);
8756         dev_flow->handle->dvh.rix_sample = 0;
8757         return -rte_errno;
8758 }
8759
8760 /**
8761  * Find existing destination array resource or create and register a new one.
8762  *
8763  * @param[in, out] dev
8764  *   Pointer to rte_eth_dev structure.
8765  * @param[in] attr
8766  *   Attributes of flow that includes this item.
8767  * @param[in] resource
8768  *   Pointer to destination array resource.
8769  * @parm[in, out] dev_flow
8770  *   Pointer to the dev_flow.
8771  * @param[out] error
8772  *   pointer to error structure.
8773  *
8774  * @return
8775  *   0 on success otherwise -errno and errno is set.
8776  */
8777 static int
8778 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8779                          const struct rte_flow_attr *attr,
8780                          struct mlx5_flow_dv_dest_array_resource *resource,
8781                          struct mlx5_flow *dev_flow,
8782                          struct rte_flow_error *error)
8783 {
8784         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8785         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8786         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8787         struct mlx5_priv *priv = dev->data->dev_private;
8788         struct mlx5_dev_ctx_shared *sh = priv->sh;
8789         struct mlx5_flow_sub_actions_list *sample_act;
8790         struct mlx5dv_dr_domain *domain;
8791         uint32_t idx = 0;
8792
8793         /* Lookup a matching resource from cache. */
8794         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8795                       sh->dest_array_list,
8796                       idx, cache_resource, next) {
8797                 if (resource->num_of_dest == cache_resource->num_of_dest &&
8798                     resource->ft_type == cache_resource->ft_type &&
8799                     !memcmp((void *)cache_resource->sample_act,
8800                             (void *)resource->sample_act,
8801                            (resource->num_of_dest *
8802                            sizeof(struct mlx5_flow_sub_actions_list)))) {
8803                         DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
8804                                 (void *)cache_resource,
8805                                 __atomic_load_n(&cache_resource->refcnt,
8806                                                 __ATOMIC_RELAXED));
8807                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8808                                            __ATOMIC_RELAXED);
8809                         dev_flow->handle->dvh.rix_dest_array = idx;
8810                         dev_flow->dv.dest_array_res = cache_resource;
8811                         return 0;
8812                 }
8813         }
8814         /* Register new destination array resource. */
8815         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8816                                        &dev_flow->handle->dvh.rix_dest_array);
8817         if (!cache_resource)
8818                 return rte_flow_error_set(error, ENOMEM,
8819                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8820                                           NULL,
8821                                           "cannot allocate resource memory");
8822         *cache_resource = *resource;
8823         if (attr->transfer)
8824                 domain = sh->fdb_domain;
8825         else if (attr->ingress)
8826                 domain = sh->rx_domain;
8827         else
8828                 domain = sh->tx_domain;
8829         for (idx = 0; idx < resource->num_of_dest; idx++) {
8830                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8831                                  mlx5_malloc(MLX5_MEM_ZERO,
8832                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8833                                  0, SOCKET_ID_ANY);
8834                 if (!dest_attr[idx]) {
8835                         rte_flow_error_set(error, ENOMEM,
8836                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8837                                            NULL,
8838                                            "cannot allocate resource memory");
8839                         goto error;
8840                 }
8841                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8842                 sample_act = &resource->sample_act[idx];
8843                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8844                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8845                 } else if (sample_act->action_flags ==
8846                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8847                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8848                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8849                         dest_attr[idx]->dest_reformat->reformat =
8850                                         sample_act->dr_encap_action;
8851                         dest_attr[idx]->dest_reformat->dest =
8852                                         sample_act->dr_port_id_action;
8853                 } else if (sample_act->action_flags ==
8854                            MLX5_FLOW_ACTION_PORT_ID) {
8855                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8856                 }
8857         }
8858         /* create a dest array actioin */
8859         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8860                                                 (domain,
8861                                                  cache_resource->num_of_dest,
8862                                                  dest_attr);
8863         if (!cache_resource->action) {
8864                 rte_flow_error_set(error, ENOMEM,
8865                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8866                                    NULL,
8867                                    "cannot create destination array action");
8868                 goto error;
8869         }
8870         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8871         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8872                      &sh->dest_array_list,
8873                      dev_flow->handle->dvh.rix_dest_array, cache_resource,
8874                      next);
8875         dev_flow->dv.dest_array_res = cache_resource;
8876         DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
8877                 (void *)cache_resource,
8878                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8879         for (idx = 0; idx < resource->num_of_dest; idx++)
8880                 mlx5_free(dest_attr[idx]);
8881         return 0;
8882 error:
8883         for (idx = 0; idx < resource->num_of_dest; idx++) {
8884                 struct mlx5_flow_sub_actions_idx *act_res =
8885                                         &cache_resource->sample_idx[idx];
8886                 if (act_res->rix_hrxq &&
8887                     !mlx5_hrxq_release(dev,
8888                                 act_res->rix_hrxq))
8889                         act_res->rix_hrxq = 0;
8890                 if (act_res->rix_encap_decap &&
8891                         !flow_dv_encap_decap_resource_release(dev,
8892                                 act_res->rix_encap_decap))
8893                         act_res->rix_encap_decap = 0;
8894                 if (act_res->rix_port_id_action &&
8895                         !flow_dv_port_id_action_resource_release(dev,
8896                                 act_res->rix_port_id_action))
8897                         act_res->rix_port_id_action = 0;
8898                 if (dest_attr[idx])
8899                         mlx5_free(dest_attr[idx]);
8900         }
8901
8902         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8903                                 dev_flow->handle->dvh.rix_dest_array);
8904         dev_flow->handle->dvh.rix_dest_array = 0;
8905         return -rte_errno;
8906 }
8907
8908 /**
8909  * Convert Sample action to DV specification.
8910  *
8911  * @param[in] dev
8912  *   Pointer to rte_eth_dev structure.
8913  * @param[in] action
8914  *   Pointer to action structure.
8915  * @param[in, out] dev_flow
8916  *   Pointer to the mlx5_flow.
8917  * @param[in] attr
8918  *   Pointer to the flow attributes.
8919  * @param[in, out] num_of_dest
8920  *   Pointer to the num of destination.
8921  * @param[in, out] sample_actions
8922  *   Pointer to sample actions list.
8923  * @param[in, out] res
8924  *   Pointer to sample resource.
8925  * @param[out] error
8926  *   Pointer to the error structure.
8927  *
8928  * @return
8929  *   0 on success, a negative errno value otherwise and rte_errno is set.
8930  */
8931 static int
8932 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8933                                 const struct rte_flow_action *action,
8934                                 struct mlx5_flow *dev_flow,
8935                                 const struct rte_flow_attr *attr,
8936                                 uint32_t *num_of_dest,
8937                                 void **sample_actions,
8938                                 struct mlx5_flow_dv_sample_resource *res,
8939                                 struct rte_flow_error *error)
8940 {
8941         struct mlx5_priv *priv = dev->data->dev_private;
8942         const struct rte_flow_action_sample *sample_action;
8943         const struct rte_flow_action *sub_actions;
8944         const struct rte_flow_action_queue *queue;
8945         struct mlx5_flow_sub_actions_list *sample_act;
8946         struct mlx5_flow_sub_actions_idx *sample_idx;
8947         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8948         struct mlx5_flow_rss_desc *rss_desc;
8949         uint64_t action_flags = 0;
8950
8951         MLX5_ASSERT(wks);
8952         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
8953         sample_act = &res->sample_act;
8954         sample_idx = &res->sample_idx;
8955         sample_action = (const struct rte_flow_action_sample *)action->conf;
8956         res->ratio = sample_action->ratio;
8957         sub_actions = sample_action->actions;
8958         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
8959                 int type = sub_actions->type;
8960                 uint32_t pre_rix = 0;
8961                 void *pre_r;
8962                 switch (type) {
8963                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8964                 {
8965                         struct mlx5_hrxq *hrxq;
8966                         uint32_t hrxq_idx;
8967
8968                         queue = sub_actions->conf;
8969                         rss_desc->queue_num = 1;
8970                         rss_desc->queue[0] = queue->index;
8971                         hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
8972                                         rss_desc, &hrxq_idx);
8973                         if (!hrxq)
8974                                 return rte_flow_error_set
8975                                         (error, rte_errno,
8976                                          RTE_FLOW_ERROR_TYPE_ACTION,
8977                                          NULL,
8978                                          "cannot create fate queue");
8979                         sample_act->dr_queue_action = hrxq->action;
8980                         sample_idx->rix_hrxq = hrxq_idx;
8981                         sample_actions[sample_act->actions_num++] =
8982                                                 hrxq->action;
8983                         (*num_of_dest)++;
8984                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8985                         if (action_flags & MLX5_FLOW_ACTION_MARK)
8986                                 dev_flow->handle->rix_hrxq = hrxq_idx;
8987                         dev_flow->handle->fate_action =
8988                                         MLX5_FLOW_FATE_QUEUE;
8989                         break;
8990                 }
8991                 case RTE_FLOW_ACTION_TYPE_MARK:
8992                 {
8993                         uint32_t tag_be = mlx5_flow_mark_set
8994                                 (((const struct rte_flow_action_mark *)
8995                                 (sub_actions->conf))->id);
8996
8997                         dev_flow->handle->mark = 1;
8998                         pre_rix = dev_flow->handle->dvh.rix_tag;
8999                         /* Save the mark resource before sample */
9000                         pre_r = dev_flow->dv.tag_resource;
9001                         if (flow_dv_tag_resource_register(dev, tag_be,
9002                                                   dev_flow, error))
9003                                 return -rte_errno;
9004                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9005                         sample_act->dr_tag_action =
9006                                 dev_flow->dv.tag_resource->action;
9007                         sample_idx->rix_tag =
9008                                 dev_flow->handle->dvh.rix_tag;
9009                         sample_actions[sample_act->actions_num++] =
9010                                                 sample_act->dr_tag_action;
9011                         /* Recover the mark resource after sample */
9012                         dev_flow->dv.tag_resource = pre_r;
9013                         dev_flow->handle->dvh.rix_tag = pre_rix;
9014                         action_flags |= MLX5_FLOW_ACTION_MARK;
9015                         break;
9016                 }
9017                 case RTE_FLOW_ACTION_TYPE_COUNT:
9018                 {
9019                         uint32_t counter;
9020
9021                         counter = flow_dv_translate_create_counter(dev,
9022                                         dev_flow, sub_actions->conf, 0);
9023                         if (!counter)
9024                                 return rte_flow_error_set
9025                                                 (error, rte_errno,
9026                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9027                                                  NULL,
9028                                                  "cannot create counter"
9029                                                  " object.");
9030                         sample_idx->cnt = counter;
9031                         sample_act->dr_cnt_action =
9032                                   (flow_dv_counter_get_by_idx(dev,
9033                                   counter, NULL))->action;
9034                         sample_actions[sample_act->actions_num++] =
9035                                                 sample_act->dr_cnt_action;
9036                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9037                         break;
9038                 }
9039                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9040                 {
9041                         struct mlx5_flow_dv_port_id_action_resource
9042                                         port_id_resource;
9043                         uint32_t port_id = 0;
9044
9045                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9046                         /* Save the port id resource before sample */
9047                         pre_rix = dev_flow->handle->rix_port_id_action;
9048                         pre_r = dev_flow->dv.port_id_action;
9049                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9050                                                              &port_id, error))
9051                                 return -rte_errno;
9052                         port_id_resource.port_id = port_id;
9053                         if (flow_dv_port_id_action_resource_register
9054                             (dev, &port_id_resource, dev_flow, error))
9055                                 return -rte_errno;
9056                         sample_act->dr_port_id_action =
9057                                 dev_flow->dv.port_id_action->action;
9058                         sample_idx->rix_port_id_action =
9059                                 dev_flow->handle->rix_port_id_action;
9060                         sample_actions[sample_act->actions_num++] =
9061                                                 sample_act->dr_port_id_action;
9062                         /* Recover the port id resource after sample */
9063                         dev_flow->dv.port_id_action = pre_r;
9064                         dev_flow->handle->rix_port_id_action = pre_rix;
9065                         (*num_of_dest)++;
9066                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9067                         break;
9068                 }
9069                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9070                         /* Save the encap resource before sample */
9071                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9072                         pre_r = dev_flow->dv.encap_decap;
9073                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9074                                                            dev_flow,
9075                                                            attr->transfer,
9076                                                            error))
9077                                 return -rte_errno;
9078                         sample_act->dr_encap_action =
9079                                 dev_flow->dv.encap_decap->action;
9080                         sample_idx->rix_encap_decap =
9081                                 dev_flow->handle->dvh.rix_encap_decap;
9082                         sample_actions[sample_act->actions_num++] =
9083                                                 sample_act->dr_encap_action;
9084                         /* Recover the encap resource after sample */
9085                         dev_flow->dv.encap_decap = pre_r;
9086                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9087                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9088                         break;
9089                 default:
9090                         return rte_flow_error_set(error, EINVAL,
9091                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9092                                 NULL,
9093                                 "Not support for sampler action");
9094                 }
9095         }
9096         sample_act->action_flags = action_flags;
9097         res->ft_id = dev_flow->dv.group;
9098         if (attr->transfer) {
9099                 union {
9100                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9101                         uint64_t set_action;
9102                 } action_ctx = { .set_action = 0 };
9103
9104                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9105                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9106                          MLX5_MODIFICATION_TYPE_SET);
9107                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9108                          MLX5_MODI_META_REG_C_0);
9109                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9110                          priv->vport_meta_tag);
9111                 res->set_action = action_ctx.set_action;
9112         } else if (attr->ingress) {
9113                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9114         }
9115         return 0;
9116 }
9117
9118 /**
9119  * Convert Sample action to DV specification.
9120  *
9121  * @param[in] dev
9122  *   Pointer to rte_eth_dev structure.
9123  * @param[in, out] dev_flow
9124  *   Pointer to the mlx5_flow.
9125  * @param[in] attr
9126  *   Pointer to the flow attributes.
9127  * @param[in] num_of_dest
9128  *   The num of destination.
9129  * @param[in, out] res
9130  *   Pointer to sample resource.
9131  * @param[in, out] mdest_res
9132  *   Pointer to destination array resource.
9133  * @param[in] sample_actions
9134  *   Pointer to sample path actions list.
9135  * @param[in] action_flags
9136  *   Holds the actions detected until now.
9137  * @param[out] error
9138  *   Pointer to the error structure.
9139  *
9140  * @return
9141  *   0 on success, a negative errno value otherwise and rte_errno is set.
9142  */
9143 static int
9144 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9145                              struct mlx5_flow *dev_flow,
9146                              const struct rte_flow_attr *attr,
9147                              uint32_t num_of_dest,
9148                              struct mlx5_flow_dv_sample_resource *res,
9149                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9150                              void **sample_actions,
9151                              uint64_t action_flags,
9152                              struct rte_flow_error *error)
9153 {
9154         /* update normal path action resource into last index of array */
9155         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9156         struct mlx5_flow_sub_actions_list *sample_act =
9157                                         &mdest_res->sample_act[dest_index];
9158         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9159         struct mlx5_flow_rss_desc *rss_desc;
9160         uint32_t normal_idx = 0;
9161         struct mlx5_hrxq *hrxq;
9162         uint32_t hrxq_idx;
9163
9164         MLX5_ASSERT(wks);
9165         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9166         if (num_of_dest > 1) {
9167                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9168                         /* Handle QP action for mirroring */
9169                         hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
9170                                                        rss_desc, &hrxq_idx);
9171                         if (!hrxq)
9172                                 return rte_flow_error_set
9173                                      (error, rte_errno,
9174                                       RTE_FLOW_ERROR_TYPE_ACTION,
9175                                       NULL,
9176                                       "cannot create rx queue");
9177                         normal_idx++;
9178                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9179                         sample_act->dr_queue_action = hrxq->action;
9180                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9181                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9182                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9183                 }
9184                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9185                         normal_idx++;
9186                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9187                                 dev_flow->handle->dvh.rix_encap_decap;
9188                         sample_act->dr_encap_action =
9189                                 dev_flow->dv.encap_decap->action;
9190                 }
9191                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9192                         normal_idx++;
9193                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9194                                 dev_flow->handle->rix_port_id_action;
9195                         sample_act->dr_port_id_action =
9196                                 dev_flow->dv.port_id_action->action;
9197                 }
9198                 sample_act->actions_num = normal_idx;
9199                 /* update sample action resource into first index of array */
9200                 mdest_res->ft_type = res->ft_type;
9201                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9202                                 sizeof(struct mlx5_flow_sub_actions_idx));
9203                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9204                                 sizeof(struct mlx5_flow_sub_actions_list));
9205                 mdest_res->num_of_dest = num_of_dest;
9206                 if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
9207                                                          dev_flow, error))
9208                         return rte_flow_error_set(error, EINVAL,
9209                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9210                                                   NULL, "can't create sample "
9211                                                   "action");
9212         } else {
9213                 if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
9214                                                      sample_actions, error))
9215                         return rte_flow_error_set(error, EINVAL,
9216                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9217                                                   NULL,
9218                                                   "can't create sample action");
9219         }
9220         return 0;
9221 }
9222
9223 /**
9224  * Fill the flow with DV spec, lock free
9225  * (mutex should be acquired by caller).
9226  *
9227  * @param[in] dev
9228  *   Pointer to rte_eth_dev structure.
9229  * @param[in, out] dev_flow
9230  *   Pointer to the sub flow.
9231  * @param[in] attr
9232  *   Pointer to the flow attributes.
9233  * @param[in] items
9234  *   Pointer to the list of items.
9235  * @param[in] actions
9236  *   Pointer to the list of actions.
9237  * @param[out] error
9238  *   Pointer to the error structure.
9239  *
9240  * @return
9241  *   0 on success, a negative errno value otherwise and rte_errno is set.
9242  */
9243 static int
9244 __flow_dv_translate(struct rte_eth_dev *dev,
9245                     struct mlx5_flow *dev_flow,
9246                     const struct rte_flow_attr *attr,
9247                     const struct rte_flow_item items[],
9248                     const struct rte_flow_action actions[],
9249                     struct rte_flow_error *error)
9250 {
9251         struct mlx5_priv *priv = dev->data->dev_private;
9252         struct mlx5_dev_config *dev_conf = &priv->config;
9253         struct rte_flow *flow = dev_flow->flow;
9254         struct mlx5_flow_handle *handle = dev_flow->handle;
9255         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9256         struct mlx5_flow_rss_desc *rss_desc;
9257         uint64_t item_flags = 0;
9258         uint64_t last_item = 0;
9259         uint64_t action_flags = 0;
9260         uint64_t priority = attr->priority;
9261         struct mlx5_flow_dv_matcher matcher = {
9262                 .mask = {
9263                         .size = sizeof(matcher.mask.buf) -
9264                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9265                 },
9266         };
9267         int actions_n = 0;
9268         bool actions_end = false;
9269         union {
9270                 struct mlx5_flow_dv_modify_hdr_resource res;
9271                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9272                             sizeof(struct mlx5_modification_cmd) *
9273                             (MLX5_MAX_MODIFY_NUM + 1)];
9274         } mhdr_dummy;
9275         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9276         const struct rte_flow_action_count *count = NULL;
9277         const struct rte_flow_action_age *age = NULL;
9278         union flow_dv_attr flow_attr = { .attr = 0 };
9279         uint32_t tag_be;
9280         union mlx5_flow_tbl_key tbl_key;
9281         uint32_t modify_action_position = UINT32_MAX;
9282         void *match_mask = matcher.mask.buf;
9283         void *match_value = dev_flow->dv.value.buf;
9284         uint8_t next_protocol = 0xff;
9285         struct rte_vlan_hdr vlan = { 0 };
9286         struct mlx5_flow_dv_dest_array_resource mdest_res;
9287         struct mlx5_flow_dv_sample_resource sample_res;
9288         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9289         struct mlx5_flow_sub_actions_list *sample_act;
9290         uint32_t sample_act_pos = UINT32_MAX;
9291         uint32_t num_of_dest = 0;
9292         int tmp_actions_n = 0;
9293         uint32_t table;
9294         int ret = 0;
9295         const struct mlx5_flow_tunnel *tunnel;
9296         struct flow_grp_info grp_info = {
9297                 .external = !!dev_flow->external,
9298                 .transfer = !!attr->transfer,
9299                 .fdb_def_rule = !!priv->fdb_def_rule,
9300         };
9301
9302         MLX5_ASSERT(wks);
9303         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9304         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9305         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9306         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9307                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9308         /* update normal path action resource into last index of array */
9309         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9310         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9311                  flow_items_to_tunnel(items) :
9312                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9313                  flow_actions_to_tunnel(actions) :
9314                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9315         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9316                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9317         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9318                                 (dev, tunnel, attr, items, actions);
9319         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9320                                        grp_info, error);
9321         if (ret)
9322                 return ret;
9323         dev_flow->dv.group = table;
9324         if (attr->transfer)
9325                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9326         if (priority == MLX5_FLOW_PRIO_RSVD)
9327                 priority = dev_conf->flow_prio - 1;
9328         /* number of actions must be set to 0 in case of dirty stack. */
9329         mhdr_res->actions_num = 0;
9330         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9331                 /*
9332                  * do not add decap action if match rule drops packet
9333                  * HW rejects rules with decap & drop
9334                  */
9335                 bool add_decap = true;
9336                 const struct rte_flow_action *ptr = actions;
9337                 struct mlx5_flow_tbl_resource *tbl;
9338
9339                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9340                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9341                                 add_decap = false;
9342                                 break;
9343                         }
9344                 }
9345                 if (add_decap) {
9346                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9347                                                            attr->transfer,
9348                                                            error))
9349                                 return -rte_errno;
9350                         dev_flow->dv.actions[actions_n++] =
9351                                         dev_flow->dv.encap_decap->action;
9352                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9353                 }
9354                 /*
9355                  * bind table_id with <group, table> for tunnel match rule.
9356                  * Tunnel set rule establishes that bind in JUMP action handler.
9357                  * Required for scenario when application creates tunnel match
9358                  * rule before tunnel set rule.
9359                  */
9360                 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9361                                                attr->transfer,
9362                                                !!dev_flow->external, tunnel,
9363                                                attr->group, error);
9364                 if (!tbl)
9365                         return rte_flow_error_set
9366                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9367                                actions, "cannot register tunnel group");
9368         }
9369         for (; !actions_end ; actions++) {
9370                 const struct rte_flow_action_queue *queue;
9371                 const struct rte_flow_action_rss *rss;
9372                 const struct rte_flow_action *action = actions;
9373                 const uint8_t *rss_key;
9374                 const struct rte_flow_action_meter *mtr;
9375                 struct mlx5_flow_tbl_resource *tbl;
9376                 uint32_t port_id = 0;
9377                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9378                 int action_type = actions->type;
9379                 const struct rte_flow_action *found_action = NULL;
9380                 struct mlx5_flow_meter *fm = NULL;
9381                 uint32_t jump_group = 0;
9382
9383                 if (!mlx5_flow_os_action_supported(action_type))
9384                         return rte_flow_error_set(error, ENOTSUP,
9385                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9386                                                   actions,
9387                                                   "action not supported");
9388                 switch (action_type) {
9389                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9390                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9391                         break;
9392                 case RTE_FLOW_ACTION_TYPE_VOID:
9393                         break;
9394                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9395                         if (flow_dv_translate_action_port_id(dev, action,
9396                                                              &port_id, error))
9397                                 return -rte_errno;
9398                         port_id_resource.port_id = port_id;
9399                         MLX5_ASSERT(!handle->rix_port_id_action);
9400                         if (flow_dv_port_id_action_resource_register
9401                             (dev, &port_id_resource, dev_flow, error))
9402                                 return -rte_errno;
9403                         dev_flow->dv.actions[actions_n++] =
9404                                         dev_flow->dv.port_id_action->action;
9405                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9406                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9407                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9408                         num_of_dest++;
9409                         break;
9410                 case RTE_FLOW_ACTION_TYPE_FLAG:
9411                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9412                         dev_flow->handle->mark = 1;
9413                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9414                                 struct rte_flow_action_mark mark = {
9415                                         .id = MLX5_FLOW_MARK_DEFAULT,
9416                                 };
9417
9418                                 if (flow_dv_convert_action_mark(dev, &mark,
9419                                                                 mhdr_res,
9420                                                                 error))
9421                                         return -rte_errno;
9422                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9423                                 break;
9424                         }
9425                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9426                         /*
9427                          * Only one FLAG or MARK is supported per device flow
9428                          * right now. So the pointer to the tag resource must be
9429                          * zero before the register process.
9430                          */
9431                         MLX5_ASSERT(!handle->dvh.rix_tag);
9432                         if (flow_dv_tag_resource_register(dev, tag_be,
9433                                                           dev_flow, error))
9434                                 return -rte_errno;
9435                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9436                         dev_flow->dv.actions[actions_n++] =
9437                                         dev_flow->dv.tag_resource->action;
9438                         break;
9439                 case RTE_FLOW_ACTION_TYPE_MARK:
9440                         action_flags |= MLX5_FLOW_ACTION_MARK;
9441                         dev_flow->handle->mark = 1;
9442                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9443                                 const struct rte_flow_action_mark *mark =
9444                                         (const struct rte_flow_action_mark *)
9445                                                 actions->conf;
9446
9447                                 if (flow_dv_convert_action_mark(dev, mark,
9448                                                                 mhdr_res,
9449                                                                 error))
9450                                         return -rte_errno;
9451                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9452                                 break;
9453                         }
9454                         /* Fall-through */
9455                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9456                         /* Legacy (non-extensive) MARK action. */
9457                         tag_be = mlx5_flow_mark_set
9458                               (((const struct rte_flow_action_mark *)
9459                                (actions->conf))->id);
9460                         MLX5_ASSERT(!handle->dvh.rix_tag);
9461                         if (flow_dv_tag_resource_register(dev, tag_be,
9462                                                           dev_flow, error))
9463                                 return -rte_errno;
9464                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9465                         dev_flow->dv.actions[actions_n++] =
9466                                         dev_flow->dv.tag_resource->action;
9467                         break;
9468                 case RTE_FLOW_ACTION_TYPE_SET_META:
9469                         if (flow_dv_convert_action_set_meta
9470                                 (dev, mhdr_res, attr,
9471                                  (const struct rte_flow_action_set_meta *)
9472                                   actions->conf, error))
9473                                 return -rte_errno;
9474                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9475                         break;
9476                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9477                         if (flow_dv_convert_action_set_tag
9478                                 (dev, mhdr_res,
9479                                  (const struct rte_flow_action_set_tag *)
9480                                   actions->conf, error))
9481                                 return -rte_errno;
9482                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9483                         break;
9484                 case RTE_FLOW_ACTION_TYPE_DROP:
9485                         action_flags |= MLX5_FLOW_ACTION_DROP;
9486                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9487                         break;
9488                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9489                         queue = actions->conf;
9490                         rss_desc->queue_num = 1;
9491                         rss_desc->queue[0] = queue->index;
9492                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9493                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9494                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9495                         num_of_dest++;
9496                         break;
9497                 case RTE_FLOW_ACTION_TYPE_RSS:
9498                         rss = actions->conf;
9499                         memcpy(rss_desc->queue, rss->queue,
9500                                rss->queue_num * sizeof(uint16_t));
9501                         rss_desc->queue_num = rss->queue_num;
9502                         /* NULL RSS key indicates default RSS key. */
9503                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9504                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9505                         /*
9506                          * rss->level and rss.types should be set in advance
9507                          * when expanding items for RSS.
9508                          */
9509                         action_flags |= MLX5_FLOW_ACTION_RSS;
9510                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9511                         break;
9512                 case RTE_FLOW_ACTION_TYPE_AGE:
9513                 case RTE_FLOW_ACTION_TYPE_COUNT:
9514                         if (!dev_conf->devx) {
9515                                 return rte_flow_error_set
9516                                               (error, ENOTSUP,
9517                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9518                                                NULL,
9519                                                "count action not supported");
9520                         }
9521                         /* Save information first, will apply later. */
9522                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9523                                 count = action->conf;
9524                         else
9525                                 age = action->conf;
9526                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9527                         break;
9528                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9529                         dev_flow->dv.actions[actions_n++] =
9530                                                 priv->sh->pop_vlan_action;
9531                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9532                         break;
9533                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9534                         if (!(action_flags &
9535                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9536                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9537                         vlan.eth_proto = rte_be_to_cpu_16
9538                              ((((const struct rte_flow_action_of_push_vlan *)
9539                                                    actions->conf)->ethertype));
9540                         found_action = mlx5_flow_find_action
9541                                         (actions + 1,
9542                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9543                         if (found_action)
9544                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9545                         found_action = mlx5_flow_find_action
9546                                         (actions + 1,
9547                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9548                         if (found_action)
9549                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9550                         if (flow_dv_create_action_push_vlan
9551                                             (dev, attr, &vlan, dev_flow, error))
9552                                 return -rte_errno;
9553                         dev_flow->dv.actions[actions_n++] =
9554                                         dev_flow->dv.push_vlan_res->action;
9555                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9556                         break;
9557                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9558                         /* of_vlan_push action handled this action */
9559                         MLX5_ASSERT(action_flags &
9560                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9561                         break;
9562                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9563                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9564                                 break;
9565                         flow_dev_get_vlan_info_from_items(items, &vlan);
9566                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9567                         /* If no VLAN push - this is a modify header action */
9568                         if (flow_dv_convert_action_modify_vlan_vid
9569                                                 (mhdr_res, actions, error))
9570                                 return -rte_errno;
9571                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9572                         break;
9573                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9574                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9575                         if (flow_dv_create_action_l2_encap(dev, actions,
9576                                                            dev_flow,
9577                                                            attr->transfer,
9578                                                            error))
9579                                 return -rte_errno;
9580                         dev_flow->dv.actions[actions_n++] =
9581                                         dev_flow->dv.encap_decap->action;
9582                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9583                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9584                                 sample_act->action_flags |=
9585                                                         MLX5_FLOW_ACTION_ENCAP;
9586                         break;
9587                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9588                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9589                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9590                                                            attr->transfer,
9591                                                            error))
9592                                 return -rte_errno;
9593                         dev_flow->dv.actions[actions_n++] =
9594                                         dev_flow->dv.encap_decap->action;
9595                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9596                         break;
9597                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9598                         /* Handle encap with preceding decap. */
9599                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9600                                 if (flow_dv_create_action_raw_encap
9601                                         (dev, actions, dev_flow, attr, error))
9602                                         return -rte_errno;
9603                                 dev_flow->dv.actions[actions_n++] =
9604                                         dev_flow->dv.encap_decap->action;
9605                         } else {
9606                                 /* Handle encap without preceding decap. */
9607                                 if (flow_dv_create_action_l2_encap
9608                                     (dev, actions, dev_flow, attr->transfer,
9609                                      error))
9610                                         return -rte_errno;
9611                                 dev_flow->dv.actions[actions_n++] =
9612                                         dev_flow->dv.encap_decap->action;
9613                         }
9614                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9615                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9616                                 sample_act->action_flags |=
9617                                                         MLX5_FLOW_ACTION_ENCAP;
9618                         break;
9619                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9620                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9621                                 ;
9622                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9623                                 if (flow_dv_create_action_l2_decap
9624                                     (dev, dev_flow, attr->transfer, error))
9625                                         return -rte_errno;
9626                                 dev_flow->dv.actions[actions_n++] =
9627                                         dev_flow->dv.encap_decap->action;
9628                         }
9629                         /* If decap is followed by encap, handle it at encap. */
9630                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9631                         break;
9632                 case RTE_FLOW_ACTION_TYPE_JUMP:
9633                         jump_group = ((const struct rte_flow_action_jump *)
9634                                                         action->conf)->group;
9635                         grp_info.std_tbl_fix = 0;
9636                         ret = mlx5_flow_group_to_table(dev, tunnel,
9637                                                        jump_group,
9638                                                        &table,
9639                                                        grp_info, error);
9640                         if (ret)
9641                                 return ret;
9642                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9643                                                        attr->transfer,
9644                                                        !!dev_flow->external,
9645                                                        tunnel, jump_group,
9646                                                        error);
9647                         if (!tbl)
9648                                 return rte_flow_error_set
9649                                                 (error, errno,
9650                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9651                                                  NULL,
9652                                                  "cannot create jump action.");
9653                         if (flow_dv_jump_tbl_resource_register
9654                             (dev, tbl, dev_flow, error)) {
9655                                 flow_dv_tbl_resource_release(dev, tbl);
9656                                 return rte_flow_error_set
9657                                                 (error, errno,
9658                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9659                                                  NULL,
9660                                                  "cannot create jump action.");
9661                         }
9662                         dev_flow->dv.actions[actions_n++] =
9663                                         dev_flow->dv.jump->action;
9664                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9665                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9666                         break;
9667                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9668                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9669                         if (flow_dv_convert_action_modify_mac
9670                                         (mhdr_res, actions, error))
9671                                 return -rte_errno;
9672                         action_flags |= actions->type ==
9673                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9674                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
9675                                         MLX5_FLOW_ACTION_SET_MAC_DST;
9676                         break;
9677                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9678                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9679                         if (flow_dv_convert_action_modify_ipv4
9680                                         (mhdr_res, actions, error))
9681                                 return -rte_errno;
9682                         action_flags |= actions->type ==
9683                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9684                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
9685                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
9686                         break;
9687                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9688                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9689                         if (flow_dv_convert_action_modify_ipv6
9690                                         (mhdr_res, actions, error))
9691                                 return -rte_errno;
9692                         action_flags |= actions->type ==
9693                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9694                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
9695                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
9696                         break;
9697                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9698                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9699                         if (flow_dv_convert_action_modify_tp
9700                                         (mhdr_res, actions, items,
9701                                          &flow_attr, dev_flow, !!(action_flags &
9702                                          MLX5_FLOW_ACTION_DECAP), error))
9703                                 return -rte_errno;
9704                         action_flags |= actions->type ==
9705                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9706                                         MLX5_FLOW_ACTION_SET_TP_SRC :
9707                                         MLX5_FLOW_ACTION_SET_TP_DST;
9708                         break;
9709                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9710                         if (flow_dv_convert_action_modify_dec_ttl
9711                                         (mhdr_res, items, &flow_attr, dev_flow,
9712                                          !!(action_flags &
9713                                          MLX5_FLOW_ACTION_DECAP), error))
9714                                 return -rte_errno;
9715                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9716                         break;
9717                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9718                         if (flow_dv_convert_action_modify_ttl
9719                                         (mhdr_res, actions, items, &flow_attr,
9720                                          dev_flow, !!(action_flags &
9721                                          MLX5_FLOW_ACTION_DECAP), error))
9722                                 return -rte_errno;
9723                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9724                         break;
9725                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9726                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9727                         if (flow_dv_convert_action_modify_tcp_seq
9728                                         (mhdr_res, actions, error))
9729                                 return -rte_errno;
9730                         action_flags |= actions->type ==
9731                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9732                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
9733                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9734                         break;
9735
9736                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9737                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9738                         if (flow_dv_convert_action_modify_tcp_ack
9739                                         (mhdr_res, actions, error))
9740                                 return -rte_errno;
9741                         action_flags |= actions->type ==
9742                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9743                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
9744                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
9745                         break;
9746                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9747                         if (flow_dv_convert_action_set_reg
9748                                         (mhdr_res, actions, error))
9749                                 return -rte_errno;
9750                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9751                         break;
9752                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9753                         if (flow_dv_convert_action_copy_mreg
9754                                         (dev, mhdr_res, actions, error))
9755                                 return -rte_errno;
9756                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9757                         break;
9758                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9759                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9760                         dev_flow->handle->fate_action =
9761                                         MLX5_FLOW_FATE_DEFAULT_MISS;
9762                         break;
9763                 case RTE_FLOW_ACTION_TYPE_METER:
9764                         mtr = actions->conf;
9765                         if (!flow->meter) {
9766                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9767                                                             attr, error);
9768                                 if (!fm)
9769                                         return rte_flow_error_set(error,
9770                                                 rte_errno,
9771                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9772                                                 NULL,
9773                                                 "meter not found "
9774                                                 "or invalid parameters");
9775                                 flow->meter = fm->idx;
9776                         }
9777                         /* Set the meter action. */
9778                         if (!fm) {
9779                                 fm = mlx5_ipool_get(priv->sh->ipool
9780                                                 [MLX5_IPOOL_MTR], flow->meter);
9781                                 if (!fm)
9782                                         return rte_flow_error_set(error,
9783                                                 rte_errno,
9784                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9785                                                 NULL,
9786                                                 "meter not found "
9787                                                 "or invalid parameters");
9788                         }
9789                         dev_flow->dv.actions[actions_n++] =
9790                                 fm->mfts->meter_action;
9791                         action_flags |= MLX5_FLOW_ACTION_METER;
9792                         break;
9793                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9794                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9795                                                               actions, error))
9796                                 return -rte_errno;
9797                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9798                         break;
9799                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9800                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9801                                                               actions, error))
9802                                 return -rte_errno;
9803                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9804                         break;
9805                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9806                         sample_act_pos = actions_n;
9807                         ret = flow_dv_translate_action_sample(dev,
9808                                                               actions,
9809                                                               dev_flow, attr,
9810                                                               &num_of_dest,
9811                                                               sample_actions,
9812                                                               &sample_res,
9813                                                               error);
9814                         if (ret < 0)
9815                                 return ret;
9816                         actions_n++;
9817                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9818                         /* put encap action into group if work with port id */
9819                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9820                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9821                                 sample_act->action_flags |=
9822                                                         MLX5_FLOW_ACTION_ENCAP;
9823                         break;
9824                 case RTE_FLOW_ACTION_TYPE_END:
9825                         actions_end = true;
9826                         if (mhdr_res->actions_num) {
9827                                 /* create modify action if needed. */
9828                                 if (flow_dv_modify_hdr_resource_register
9829                                         (dev, mhdr_res, dev_flow, error))
9830                                         return -rte_errno;
9831                                 dev_flow->dv.actions[modify_action_position] =
9832                                         handle->dvh.modify_hdr->action;
9833                         }
9834                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9835                                 flow->counter =
9836                                         flow_dv_translate_create_counter(dev,
9837                                                 dev_flow, count, age);
9838
9839                                 if (!flow->counter)
9840                                         return rte_flow_error_set
9841                                                 (error, rte_errno,
9842                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9843                                                 NULL,
9844                                                 "cannot create counter"
9845                                                 " object.");
9846                                 dev_flow->dv.actions[actions_n] =
9847                                           (flow_dv_counter_get_by_idx(dev,
9848                                           flow->counter, NULL))->action;
9849                                 actions_n++;
9850                         }
9851                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9852                                 ret = flow_dv_create_action_sample(dev,
9853                                                           dev_flow, attr,
9854                                                           num_of_dest,
9855                                                           &sample_res,
9856                                                           &mdest_res,
9857                                                           sample_actions,
9858                                                           action_flags,
9859                                                           error);
9860                                 if (ret < 0)
9861                                         return rte_flow_error_set
9862                                                 (error, rte_errno,
9863                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9864                                                 NULL,
9865                                                 "cannot create sample action");
9866                                 if (num_of_dest > 1) {
9867                                         dev_flow->dv.actions[sample_act_pos] =
9868                                         dev_flow->dv.dest_array_res->action;
9869                                 } else {
9870                                         dev_flow->dv.actions[sample_act_pos] =
9871                                         dev_flow->dv.sample_res->verbs_action;
9872                                 }
9873                         }
9874                         break;
9875                 default:
9876                         break;
9877                 }
9878                 if (mhdr_res->actions_num &&
9879                     modify_action_position == UINT32_MAX)
9880                         modify_action_position = actions_n++;
9881         }
9882         /*
9883          * For multiple destination (sample action with ratio=1), the encap
9884          * action and port id action will be combined into group action.
9885          * So need remove the original these actions in the flow and only
9886          * use the sample action instead of.
9887          */
9888         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9889                 int i;
9890                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9891
9892                 for (i = 0; i < actions_n; i++) {
9893                         if ((sample_act->dr_encap_action &&
9894                                 sample_act->dr_encap_action ==
9895                                 dev_flow->dv.actions[i]) ||
9896                                 (sample_act->dr_port_id_action &&
9897                                 sample_act->dr_port_id_action ==
9898                                 dev_flow->dv.actions[i]))
9899                                 continue;
9900                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9901                 }
9902                 memcpy((void *)dev_flow->dv.actions,
9903                                 (void *)temp_actions,
9904                                 tmp_actions_n * sizeof(void *));
9905                 actions_n = tmp_actions_n;
9906         }
9907         dev_flow->dv.actions_n = actions_n;
9908         dev_flow->act_flags = action_flags;
9909         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9910                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9911                 int item_type = items->type;
9912
9913                 if (!mlx5_flow_os_item_supported(item_type))
9914                         return rte_flow_error_set(error, ENOTSUP,
9915                                                   RTE_FLOW_ERROR_TYPE_ITEM,
9916                                                   NULL, "item not supported");
9917                 switch (item_type) {
9918                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9919                         flow_dv_translate_item_port_id(dev, match_mask,
9920                                                        match_value, items);
9921                         last_item = MLX5_FLOW_ITEM_PORT_ID;
9922                         break;
9923                 case RTE_FLOW_ITEM_TYPE_ETH:
9924                         flow_dv_translate_item_eth(match_mask, match_value,
9925                                                    items, tunnel,
9926                                                    dev_flow->dv.group);
9927                         matcher.priority = action_flags &
9928                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
9929                                         !dev_flow->external ?
9930                                         MLX5_PRIORITY_MAP_L3 :
9931                                         MLX5_PRIORITY_MAP_L2;
9932                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9933                                              MLX5_FLOW_LAYER_OUTER_L2;
9934                         break;
9935                 case RTE_FLOW_ITEM_TYPE_VLAN:
9936                         flow_dv_translate_item_vlan(dev_flow,
9937                                                     match_mask, match_value,
9938                                                     items, tunnel,
9939                                                     dev_flow->dv.group);
9940                         matcher.priority = MLX5_PRIORITY_MAP_L2;
9941                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
9942                                               MLX5_FLOW_LAYER_INNER_VLAN) :
9943                                              (MLX5_FLOW_LAYER_OUTER_L2 |
9944                                               MLX5_FLOW_LAYER_OUTER_VLAN);
9945                         break;
9946                 case RTE_FLOW_ITEM_TYPE_IPV4:
9947                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9948                                                   &item_flags, &tunnel);
9949                         flow_dv_translate_item_ipv4(match_mask, match_value,
9950                                                     items, tunnel,
9951                                                     dev_flow->dv.group);
9952                         matcher.priority = MLX5_PRIORITY_MAP_L3;
9953                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
9954                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
9955                         if (items->mask != NULL &&
9956                             ((const struct rte_flow_item_ipv4 *)
9957                              items->mask)->hdr.next_proto_id) {
9958                                 next_protocol =
9959                                         ((const struct rte_flow_item_ipv4 *)
9960                                          (items->spec))->hdr.next_proto_id;
9961                                 next_protocol &=
9962                                         ((const struct rte_flow_item_ipv4 *)
9963                                          (items->mask))->hdr.next_proto_id;
9964                         } else {
9965                                 /* Reset for inner layer. */
9966                                 next_protocol = 0xff;
9967                         }
9968                         break;
9969                 case RTE_FLOW_ITEM_TYPE_IPV6:
9970                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9971                                                   &item_flags, &tunnel);
9972                         flow_dv_translate_item_ipv6(match_mask, match_value,
9973                                                     items, tunnel,
9974                                                     dev_flow->dv.group);
9975                         matcher.priority = MLX5_PRIORITY_MAP_L3;
9976                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
9977                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
9978                         if (items->mask != NULL &&
9979                             ((const struct rte_flow_item_ipv6 *)
9980                              items->mask)->hdr.proto) {
9981                                 next_protocol =
9982                                         ((const struct rte_flow_item_ipv6 *)
9983                                          items->spec)->hdr.proto;
9984                                 next_protocol &=
9985                                         ((const struct rte_flow_item_ipv6 *)
9986                                          items->mask)->hdr.proto;
9987                         } else {
9988                                 /* Reset for inner layer. */
9989                                 next_protocol = 0xff;
9990                         }
9991                         break;
9992                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
9993                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
9994                                                              match_value,
9995                                                              items, tunnel);
9996                         last_item = tunnel ?
9997                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
9998                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
9999                         if (items->mask != NULL &&
10000                             ((const struct rte_flow_item_ipv6_frag_ext *)
10001                              items->mask)->hdr.next_header) {
10002                                 next_protocol =
10003                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10004                                  items->spec)->hdr.next_header;
10005                                 next_protocol &=
10006                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10007                                  items->mask)->hdr.next_header;
10008                         } else {
10009                                 /* Reset for inner layer. */
10010                                 next_protocol = 0xff;
10011                         }
10012                         break;
10013                 case RTE_FLOW_ITEM_TYPE_TCP:
10014                         flow_dv_translate_item_tcp(match_mask, match_value,
10015                                                    items, tunnel);
10016                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10017                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10018                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10019                         break;
10020                 case RTE_FLOW_ITEM_TYPE_UDP:
10021                         flow_dv_translate_item_udp(match_mask, match_value,
10022                                                    items, tunnel);
10023                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10024                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10025                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10026                         break;
10027                 case RTE_FLOW_ITEM_TYPE_GRE:
10028                         flow_dv_translate_item_gre(match_mask, match_value,
10029                                                    items, tunnel);
10030                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10031                         last_item = MLX5_FLOW_LAYER_GRE;
10032                         break;
10033                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10034                         flow_dv_translate_item_gre_key(match_mask,
10035                                                        match_value, items);
10036                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10037                         break;
10038                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10039                         flow_dv_translate_item_nvgre(match_mask, match_value,
10040                                                      items, tunnel);
10041                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10042                         last_item = MLX5_FLOW_LAYER_GRE;
10043                         break;
10044                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10045                         flow_dv_translate_item_vxlan(match_mask, match_value,
10046                                                      items, tunnel);
10047                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10048                         last_item = MLX5_FLOW_LAYER_VXLAN;
10049                         break;
10050                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10051                         flow_dv_translate_item_vxlan_gpe(match_mask,
10052                                                          match_value, items,
10053                                                          tunnel);
10054                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10055                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10056                         break;
10057                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10058                         flow_dv_translate_item_geneve(match_mask, match_value,
10059                                                       items, tunnel);
10060                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10061                         last_item = MLX5_FLOW_LAYER_GENEVE;
10062                         break;
10063                 case RTE_FLOW_ITEM_TYPE_MPLS:
10064                         flow_dv_translate_item_mpls(match_mask, match_value,
10065                                                     items, last_item, tunnel);
10066                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10067                         last_item = MLX5_FLOW_LAYER_MPLS;
10068                         break;
10069                 case RTE_FLOW_ITEM_TYPE_MARK:
10070                         flow_dv_translate_item_mark(dev, match_mask,
10071                                                     match_value, items);
10072                         last_item = MLX5_FLOW_ITEM_MARK;
10073                         break;
10074                 case RTE_FLOW_ITEM_TYPE_META:
10075                         flow_dv_translate_item_meta(dev, match_mask,
10076                                                     match_value, attr, items);
10077                         last_item = MLX5_FLOW_ITEM_METADATA;
10078                         break;
10079                 case RTE_FLOW_ITEM_TYPE_ICMP:
10080                         flow_dv_translate_item_icmp(match_mask, match_value,
10081                                                     items, tunnel);
10082                         last_item = MLX5_FLOW_LAYER_ICMP;
10083                         break;
10084                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10085                         flow_dv_translate_item_icmp6(match_mask, match_value,
10086                                                       items, tunnel);
10087                         last_item = MLX5_FLOW_LAYER_ICMP6;
10088                         break;
10089                 case RTE_FLOW_ITEM_TYPE_TAG:
10090                         flow_dv_translate_item_tag(dev, match_mask,
10091                                                    match_value, items);
10092                         last_item = MLX5_FLOW_ITEM_TAG;
10093                         break;
10094                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10095                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10096                                                         match_value, items);
10097                         last_item = MLX5_FLOW_ITEM_TAG;
10098                         break;
10099                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10100                         flow_dv_translate_item_tx_queue(dev, match_mask,
10101                                                         match_value,
10102                                                         items);
10103                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10104                         break;
10105                 case RTE_FLOW_ITEM_TYPE_GTP:
10106                         flow_dv_translate_item_gtp(match_mask, match_value,
10107                                                    items, tunnel);
10108                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10109                         last_item = MLX5_FLOW_LAYER_GTP;
10110                         break;
10111                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10112                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10113                                 /* Create it only the first time to be used. */
10114                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10115                                 if (ret)
10116                                         return rte_flow_error_set
10117                                                 (error, -ret,
10118                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10119                                                 NULL,
10120                                                 "cannot create eCPRI parser");
10121                         }
10122                         /* Adjust the length matcher and device flow value. */
10123                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10124                         dev_flow->dv.value.size =
10125                                         MLX5_ST_SZ_BYTES(fte_match_param);
10126                         flow_dv_translate_item_ecpri(dev, match_mask,
10127                                                      match_value, items);
10128                         /* No other protocol should follow eCPRI layer. */
10129                         last_item = MLX5_FLOW_LAYER_ECPRI;
10130                         break;
10131                 default:
10132                         break;
10133                 }
10134                 item_flags |= last_item;
10135         }
10136         /*
10137          * When E-Switch mode is enabled, we have two cases where we need to
10138          * set the source port manually.
10139          * The first one, is in case of Nic steering rule, and the second is
10140          * E-Switch rule where no port_id item was found. In both cases
10141          * the source port is set according the current port in use.
10142          */
10143         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10144             (priv->representor || priv->master)) {
10145                 if (flow_dv_translate_item_port_id(dev, match_mask,
10146                                                    match_value, NULL))
10147                         return -rte_errno;
10148         }
10149 #ifdef RTE_LIBRTE_MLX5_DEBUG
10150         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10151                                               dev_flow->dv.value.buf));
10152 #endif
10153         /*
10154          * Layers may be already initialized from prefix flow if this dev_flow
10155          * is the suffix flow.
10156          */
10157         handle->layers |= item_flags;
10158         if (action_flags & MLX5_FLOW_ACTION_RSS)
10159                 flow_dv_hashfields_set(dev_flow, rss_desc);
10160         /* Register matcher. */
10161         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10162                                     matcher.mask.size);
10163         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10164                                                      matcher.priority);
10165         /* reserved field no needs to be set to 0 here. */
10166         tbl_key.domain = attr->transfer;
10167         tbl_key.direction = attr->egress;
10168         tbl_key.table_id = dev_flow->dv.group;
10169         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10170                 return -rte_errno;
10171         return 0;
10172 }
10173
10174 /**
10175  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10176  * and tunnel.
10177  *
10178  * @param[in, out] action
10179  *   Shred RSS action holding hash RX queue objects.
10180  * @param[in] hash_fields
10181  *   Defines combination of packet fields to participate in RX hash.
10182  * @param[in] tunnel
10183  *   Tunnel type
10184  * @param[in] hrxq_idx
10185  *   Hash RX queue index to set.
10186  *
10187  * @return
10188  *   0 on success, otherwise negative errno value.
10189  */
10190 static int
10191 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10192                               const uint64_t hash_fields,
10193                               const int tunnel,
10194                               uint32_t hrxq_idx)
10195 {
10196         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10197
10198         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10199         case MLX5_RSS_HASH_IPV4:
10200                 hrxqs[0] = hrxq_idx;
10201                 return 0;
10202         case MLX5_RSS_HASH_IPV4_TCP:
10203                 hrxqs[1] = hrxq_idx;
10204                 return 0;
10205         case MLX5_RSS_HASH_IPV4_UDP:
10206                 hrxqs[2] = hrxq_idx;
10207                 return 0;
10208         case MLX5_RSS_HASH_IPV6:
10209                 hrxqs[3] = hrxq_idx;
10210                 return 0;
10211         case MLX5_RSS_HASH_IPV6_TCP:
10212                 hrxqs[4] = hrxq_idx;
10213                 return 0;
10214         case MLX5_RSS_HASH_IPV6_UDP:
10215                 hrxqs[5] = hrxq_idx;
10216                 return 0;
10217         case MLX5_RSS_HASH_NONE:
10218                 hrxqs[6] = hrxq_idx;
10219                 return 0;
10220         default:
10221                 return -1;
10222         }
10223 }
10224
10225 /**
10226  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10227  * and tunnel.
10228  *
10229  * @param[in] action
10230  *   Shred RSS action holding hash RX queue objects.
10231  * @param[in] hash_fields
10232  *   Defines combination of packet fields to participate in RX hash.
10233  * @param[in] tunnel
10234  *   Tunnel type
10235  *
10236  * @return
10237  *   Valid hash RX queue index, otherwise 0.
10238  */
10239 static uint32_t
10240 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10241                                  const uint64_t hash_fields,
10242                                  const int tunnel)
10243 {
10244         const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10245
10246         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10247         case MLX5_RSS_HASH_IPV4:
10248                 return hrxqs[0];
10249         case MLX5_RSS_HASH_IPV4_TCP:
10250                 return hrxqs[1];
10251         case MLX5_RSS_HASH_IPV4_UDP:
10252                 return hrxqs[2];
10253         case MLX5_RSS_HASH_IPV6:
10254                 return hrxqs[3];
10255         case MLX5_RSS_HASH_IPV6_TCP:
10256                 return hrxqs[4];
10257         case MLX5_RSS_HASH_IPV6_UDP:
10258                 return hrxqs[5];
10259         case MLX5_RSS_HASH_NONE:
10260                 return hrxqs[6];
10261         default:
10262                 return 0;
10263         }
10264 }
10265
10266 /**
10267  * Retrieves hash RX queue suitable for the *flow*.
10268  * If shared action configured for *flow* suitable hash RX queue will be
10269  * retrieved from attached shared action.
10270  *
10271  * @param[in] flow
10272  *   Shred RSS action holding hash RX queue objects.
10273  * @param[in] dev_flow
10274  *   Pointer to the sub flow.
10275  * @param[out] hrxq
10276  *   Pointer to retrieved hash RX queue object.
10277  *
10278  * @return
10279  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10280  */
10281 static uint32_t
10282 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10283                            struct mlx5_flow *dev_flow,
10284                            struct mlx5_hrxq **hrxq)
10285 {
10286         struct mlx5_priv *priv = dev->data->dev_private;
10287         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10288         uint32_t hrxq_idx;
10289
10290         if (flow->shared_rss) {
10291                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10292                                 (flow->shared_rss, dev_flow->hash_fields,
10293                                  !!(dev_flow->handle->layers &
10294                                     MLX5_FLOW_LAYER_TUNNEL));
10295                 if (hrxq_idx) {
10296                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10297                                                hrxq_idx);
10298                         __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10299                                            __ATOMIC_RELAXED);
10300                 }
10301         } else {
10302                 struct mlx5_flow_rss_desc *rss_desc =
10303                                 &wks->rss_desc[!!wks->flow_nested_idx];
10304
10305                 MLX5_ASSERT(rss_desc->queue_num);
10306                 hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
10307                                          MLX5_RSS_HASH_KEY_LEN,
10308                                          dev_flow->hash_fields,
10309                                          rss_desc->queue, rss_desc->queue_num);
10310                 if (!hrxq_idx) {
10311                         hrxq_idx = mlx5_hrxq_new(dev,
10312                                                  rss_desc->key,
10313                                                  MLX5_RSS_HASH_KEY_LEN,
10314                                                  dev_flow->hash_fields,
10315                                                  rss_desc->queue,
10316                                                  rss_desc->queue_num,
10317                                                  !!(dev_flow->handle->layers &
10318                                                  MLX5_FLOW_LAYER_TUNNEL),
10319                                                  false);
10320                 }
10321                 *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10322                                        hrxq_idx);
10323         }
10324         return hrxq_idx;
10325 }
10326
10327 /**
10328  * Apply the flow to the NIC, lock free,
10329  * (mutex should be acquired by caller).
10330  *
10331  * @param[in] dev
10332  *   Pointer to the Ethernet device structure.
10333  * @param[in, out] flow
10334  *   Pointer to flow structure.
10335  * @param[out] error
10336  *   Pointer to error structure.
10337  *
10338  * @return
10339  *   0 on success, a negative errno value otherwise and rte_errno is set.
10340  */
10341 static int
10342 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10343                 struct rte_flow_error *error)
10344 {
10345         struct mlx5_flow_dv_workspace *dv;
10346         struct mlx5_flow_handle *dh;
10347         struct mlx5_flow_handle_dv *dv_h;
10348         struct mlx5_flow *dev_flow;
10349         struct mlx5_priv *priv = dev->data->dev_private;
10350         uint32_t handle_idx;
10351         int n;
10352         int err;
10353         int idx;
10354         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10355
10356         MLX5_ASSERT(wks);
10357         for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10358                 dev_flow = &wks->flows[idx];
10359                 dv = &dev_flow->dv;
10360                 dh = dev_flow->handle;
10361                 dv_h = &dh->dvh;
10362                 n = dv->actions_n;
10363                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10364                         if (dv->transfer) {
10365                                 dv->actions[n++] = priv->sh->esw_drop_action;
10366                         } else {
10367                                 struct mlx5_hrxq *drop_hrxq;
10368                                 drop_hrxq = mlx5_drop_action_create(dev);
10369                                 if (!drop_hrxq) {
10370                                         rte_flow_error_set
10371                                                 (error, errno,
10372                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10373                                                  NULL,
10374                                                  "cannot get drop hash queue");
10375                                         goto error;
10376                                 }
10377                                 /*
10378                                  * Drop queues will be released by the specify
10379                                  * mlx5_drop_action_destroy() function. Assign
10380                                  * the special index to hrxq to mark the queue
10381                                  * has been allocated.
10382                                  */
10383                                 dh->rix_hrxq = UINT32_MAX;
10384                                 dv->actions[n++] = drop_hrxq->action;
10385                         }
10386                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10387                            !dv_h->rix_sample && !dv_h->rix_dest_array) {
10388                         struct mlx5_hrxq *hrxq = NULL;
10389                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10390                                                 (dev, flow, dev_flow, &hrxq);
10391
10392                         if (!hrxq) {
10393                                 rte_flow_error_set
10394                                         (error, rte_errno,
10395                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10396                                          "cannot get hash queue");
10397                                 goto error;
10398                         }
10399                         dh->rix_hrxq = hrxq_idx;
10400                         dv->actions[n++] = hrxq->action;
10401                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10402                         if (flow_dv_default_miss_resource_register
10403                                         (dev, error)) {
10404                                 rte_flow_error_set
10405                                         (error, rte_errno,
10406                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10407                                          "cannot create default miss resource");
10408                                 goto error_default_miss;
10409                         }
10410                         dh->rix_default_fate =  MLX5_FLOW_FATE_DEFAULT_MISS;
10411                         dv->actions[n++] = priv->sh->default_miss.action;
10412                 }
10413                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10414                                                (void *)&dv->value, n,
10415                                                dv->actions, &dh->drv_flow);
10416                 if (err) {
10417                         rte_flow_error_set(error, errno,
10418                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10419                                            NULL,
10420                                            "hardware refuses to create flow");
10421                         goto error;
10422                 }
10423                 if (priv->vmwa_context &&
10424                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10425                         /*
10426                          * The rule contains the VLAN pattern.
10427                          * For VF we are going to create VLAN
10428                          * interface to make hypervisor set correct
10429                          * e-Switch vport context.
10430                          */
10431                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10432                 }
10433         }
10434         return 0;
10435 error:
10436         if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
10437                 flow_dv_default_miss_resource_release(dev);
10438 error_default_miss:
10439         err = rte_errno; /* Save rte_errno before cleanup. */
10440         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10441                        handle_idx, dh, next) {
10442                 /* hrxq is union, don't clear it if the flag is not set. */
10443                 if (dh->rix_hrxq) {
10444                         if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10445                                 mlx5_drop_action_destroy(dev);
10446                                 dh->rix_hrxq = 0;
10447                         } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
10448                                 mlx5_hrxq_release(dev, dh->rix_hrxq);
10449                                 dh->rix_hrxq = 0;
10450                         }
10451                 }
10452                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10453                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10454         }
10455         rte_errno = err; /* Restore rte_errno. */
10456         return -rte_errno;
10457 }
10458
10459 /**
10460  * Release the flow matcher.
10461  *
10462  * @param dev
10463  *   Pointer to Ethernet device.
10464  * @param handle
10465  *   Pointer to mlx5_flow_handle.
10466  *
10467  * @return
10468  *   1 while a reference on it exists, 0 when freed.
10469  */
10470 static int
10471 flow_dv_matcher_release(struct rte_eth_dev *dev,
10472                         struct mlx5_flow_handle *handle)
10473 {
10474         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10475
10476         MLX5_ASSERT(matcher->matcher_object);
10477         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
10478                 dev->data->port_id, (void *)matcher,
10479                 __atomic_load_n(&matcher->refcnt, __ATOMIC_RELAXED));
10480         if (__atomic_sub_fetch(&matcher->refcnt, 1, __ATOMIC_RELAXED) == 0) {
10481                 claim_zero(mlx5_flow_os_destroy_flow_matcher
10482                            (matcher->matcher_object));
10483                 LIST_REMOVE(matcher, next);
10484                 /* table ref-- in release interface. */
10485                 flow_dv_tbl_resource_release(dev, matcher->tbl);
10486                 mlx5_free(matcher);
10487                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
10488                         dev->data->port_id, (void *)matcher);
10489                 return 0;
10490         }
10491         return 1;
10492 }
10493
10494 /**
10495  * Release an encap/decap resource.
10496  *
10497  * @param dev
10498  *   Pointer to Ethernet device.
10499  * @param encap_decap_idx
10500  *   Index of encap decap resource.
10501  *
10502  * @return
10503  *   1 while a reference on it exists, 0 when freed.
10504  */
10505 static int
10506 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10507                                      uint32_t encap_decap_idx)
10508 {
10509         struct mlx5_priv *priv = dev->data->dev_private;
10510         uint32_t idx = encap_decap_idx;
10511         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10512
10513         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10514                          idx);
10515         if (!cache_resource)
10516                 return 0;
10517         MLX5_ASSERT(cache_resource->action);
10518         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
10519                 (void *)cache_resource,
10520                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10521         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10522                                __ATOMIC_RELAXED) == 0) {
10523                 claim_zero(mlx5_flow_os_destroy_flow_action
10524                                                 (cache_resource->action));
10525                 mlx5_hlist_remove(priv->sh->encaps_decaps,
10526                                   &cache_resource->entry);
10527                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
10528                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
10529                         (void *)cache_resource);
10530                 return 0;
10531         }
10532         return 1;
10533 }
10534
10535 /**
10536  * Release an jump to table action resource.
10537  *
10538  * @param dev
10539  *   Pointer to Ethernet device.
10540  * @param handle
10541  *   Pointer to mlx5_flow_handle.
10542  *
10543  * @return
10544  *   1 while a reference on it exists, 0 when freed.
10545  */
10546 static int
10547 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10548                                   struct mlx5_flow_handle *handle)
10549 {
10550         struct mlx5_priv *priv = dev->data->dev_private;
10551         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
10552         struct mlx5_flow_tbl_data_entry *tbl_data;
10553
10554         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10555                              handle->rix_jump);
10556         if (!tbl_data)
10557                 return 0;
10558         cache_resource = &tbl_data->jump;
10559         MLX5_ASSERT(cache_resource->action);
10560         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
10561                 (void *)cache_resource,
10562                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10563         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10564                                __ATOMIC_RELAXED) == 0) {
10565                 claim_zero(mlx5_flow_os_destroy_flow_action
10566                                                 (cache_resource->action));
10567                 /* jump action memory free is inside the table release. */
10568                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
10569                 DRV_LOG(DEBUG, "jump table resource %p: removed",
10570                         (void *)cache_resource);
10571                 return 0;
10572         }
10573         return 1;
10574 }
10575
10576 /**
10577  * Release a default miss resource.
10578  *
10579  * @param dev
10580  *   Pointer to Ethernet device.
10581  * @return
10582  *   1 while a reference on it exists, 0 when freed.
10583  */
10584 static int
10585 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
10586 {
10587         struct mlx5_priv *priv = dev->data->dev_private;
10588         struct mlx5_dev_ctx_shared *sh = priv->sh;
10589         struct mlx5_flow_default_miss_resource *cache_resource =
10590                         &sh->default_miss;
10591
10592         MLX5_ASSERT(cache_resource->action);
10593         DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
10594                         (void *)cache_resource->action,
10595                         __atomic_load_n(&cache_resource->refcnt,
10596                                         __ATOMIC_RELAXED));
10597         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10598                                __ATOMIC_RELAXED) == 0) {
10599                 claim_zero(mlx5_glue->destroy_flow_action
10600                                 (cache_resource->action));
10601                 DRV_LOG(DEBUG, "default miss resource %p: removed",
10602                                 (void *)cache_resource->action);
10603                 return 0;
10604         }
10605         return 1;
10606 }
10607
10608 /**
10609  * Release a modify-header resource.
10610  *
10611  * @param dev
10612  *   Pointer to Ethernet device.
10613  * @param handle
10614  *   Pointer to mlx5_flow_handle.
10615  *
10616  * @return
10617  *   1 while a reference on it exists, 0 when freed.
10618  */
10619 static int
10620 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10621                                     struct mlx5_flow_handle *handle)
10622 {
10623         struct mlx5_priv *priv = dev->data->dev_private;
10624         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
10625                                                         handle->dvh.modify_hdr;
10626
10627         MLX5_ASSERT(cache_resource->action);
10628         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
10629                 (void *)cache_resource,
10630                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10631         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10632                                 __ATOMIC_RELAXED) == 0) {
10633                 claim_zero(mlx5_flow_os_destroy_flow_action
10634                                                 (cache_resource->action));
10635                 mlx5_hlist_remove(priv->sh->modify_cmds,
10636                                   &cache_resource->entry);
10637                 mlx5_free(cache_resource);
10638                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
10639                         (void *)cache_resource);
10640                 return 0;
10641         }
10642         return 1;
10643 }
10644
10645 /**
10646  * Release port ID action resource.
10647  *
10648  * @param dev
10649  *   Pointer to Ethernet device.
10650  * @param handle
10651  *   Pointer to mlx5_flow_handle.
10652  *
10653  * @return
10654  *   1 while a reference on it exists, 0 when freed.
10655  */
10656 static int
10657 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10658                                         uint32_t port_id)
10659 {
10660         struct mlx5_priv *priv = dev->data->dev_private;
10661         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
10662         uint32_t idx = port_id;
10663
10664         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
10665                                         idx);
10666         if (!cache_resource)
10667                 return 0;
10668         MLX5_ASSERT(cache_resource->action);
10669         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
10670                 (void *)cache_resource,
10671                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10672         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10673                                __ATOMIC_RELAXED) == 0) {
10674                 claim_zero(mlx5_flow_os_destroy_flow_action
10675                                                 (cache_resource->action));
10676                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
10677                              &priv->sh->port_id_action_list, idx,
10678                              cache_resource, next);
10679                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
10680                 DRV_LOG(DEBUG, "port id action resource %p: removed",
10681                         (void *)cache_resource);
10682                 return 0;
10683         }
10684         return 1;
10685 }
10686
10687 /**
10688  * Release push vlan action resource.
10689  *
10690  * @param dev
10691  *   Pointer to Ethernet device.
10692  * @param handle
10693  *   Pointer to mlx5_flow_handle.
10694  *
10695  * @return
10696  *   1 while a reference on it exists, 0 when freed.
10697  */
10698 static int
10699 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10700                                           struct mlx5_flow_handle *handle)
10701 {
10702         struct mlx5_priv *priv = dev->data->dev_private;
10703         uint32_t idx = handle->dvh.rix_push_vlan;
10704         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
10705
10706         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
10707                                         idx);
10708         if (!cache_resource)
10709                 return 0;
10710         MLX5_ASSERT(cache_resource->action);
10711         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
10712                 (void *)cache_resource,
10713                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10714         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10715                                __ATOMIC_RELAXED) == 0) {
10716                 claim_zero(mlx5_flow_os_destroy_flow_action
10717                                                 (cache_resource->action));
10718                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
10719                              &priv->sh->push_vlan_action_list, idx,
10720                              cache_resource, next);
10721                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10722                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
10723                         (void *)cache_resource);
10724                 return 0;
10725         }
10726         return 1;
10727 }
10728
10729 /**
10730  * Release the fate resource.
10731  *
10732  * @param dev
10733  *   Pointer to Ethernet device.
10734  * @param handle
10735  *   Pointer to mlx5_flow_handle.
10736  */
10737 static void
10738 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10739                                struct mlx5_flow_handle *handle)
10740 {
10741         if (!handle->rix_fate)
10742                 return;
10743         switch (handle->fate_action) {
10744         case MLX5_FLOW_FATE_DROP:
10745                 mlx5_drop_action_destroy(dev);
10746                 break;
10747         case MLX5_FLOW_FATE_QUEUE:
10748                 mlx5_hrxq_release(dev, handle->rix_hrxq);
10749                 break;
10750         case MLX5_FLOW_FATE_JUMP:
10751                 flow_dv_jump_tbl_resource_release(dev, handle);
10752                 break;
10753         case MLX5_FLOW_FATE_PORT_ID:
10754                 flow_dv_port_id_action_resource_release(dev,
10755                                 handle->rix_port_id_action);
10756                 break;
10757         case MLX5_FLOW_FATE_DEFAULT_MISS:
10758                 flow_dv_default_miss_resource_release(dev);
10759                 break;
10760         default:
10761                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10762                 break;
10763         }
10764         handle->rix_fate = 0;
10765 }
10766
10767 /**
10768  * Release an sample resource.
10769  *
10770  * @param dev
10771  *   Pointer to Ethernet device.
10772  * @param handle
10773  *   Pointer to mlx5_flow_handle.
10774  *
10775  * @return
10776  *   1 while a reference on it exists, 0 when freed.
10777  */
10778 static int
10779 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10780                                      struct mlx5_flow_handle *handle)
10781 {
10782         struct mlx5_priv *priv = dev->data->dev_private;
10783         uint32_t idx = handle->dvh.rix_sample;
10784         struct mlx5_flow_dv_sample_resource *cache_resource;
10785
10786         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10787                          idx);
10788         if (!cache_resource)
10789                 return 0;
10790         MLX5_ASSERT(cache_resource->verbs_action);
10791         DRV_LOG(DEBUG, "sample resource %p: refcnt %d--",
10792                 (void *)cache_resource,
10793                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10794         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10795                                __ATOMIC_RELAXED) == 0) {
10796                 if (cache_resource->verbs_action)
10797                         claim_zero(mlx5_glue->destroy_flow_action
10798                                         (cache_resource->verbs_action));
10799                 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10800                         if (cache_resource->default_miss)
10801                                 claim_zero(mlx5_glue->destroy_flow_action
10802                                   (cache_resource->default_miss));
10803                 }
10804                 if (cache_resource->normal_path_tbl)
10805                         flow_dv_tbl_resource_release(dev,
10806                                 cache_resource->normal_path_tbl);
10807         }
10808         if (cache_resource->sample_idx.rix_hrxq &&
10809                 !mlx5_hrxq_release(dev,
10810                         cache_resource->sample_idx.rix_hrxq))
10811                 cache_resource->sample_idx.rix_hrxq = 0;
10812         if (cache_resource->sample_idx.rix_tag &&
10813                 !flow_dv_tag_release(dev,
10814                         cache_resource->sample_idx.rix_tag))
10815                 cache_resource->sample_idx.rix_tag = 0;
10816         if (cache_resource->sample_idx.cnt) {
10817                 flow_dv_counter_release(dev,
10818                         cache_resource->sample_idx.cnt);
10819                 cache_resource->sample_idx.cnt = 0;
10820         }
10821         if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) {
10822                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10823                              &priv->sh->sample_action_list, idx,
10824                              cache_resource, next);
10825                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10826                 DRV_LOG(DEBUG, "sample resource %p: removed",
10827                         (void *)cache_resource);
10828                 return 0;
10829         }
10830         return 1;
10831 }
10832
10833 /**
10834  * Release an destination array resource.
10835  *
10836  * @param dev
10837  *   Pointer to Ethernet device.
10838  * @param handle
10839  *   Pointer to mlx5_flow_handle.
10840  *
10841  * @return
10842  *   1 while a reference on it exists, 0 when freed.
10843  */
10844 static int
10845 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10846                                      struct mlx5_flow_handle *handle)
10847 {
10848         struct mlx5_priv *priv = dev->data->dev_private;
10849         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10850         struct mlx5_flow_sub_actions_idx *mdest_act_res;
10851         uint32_t idx = handle->dvh.rix_dest_array;
10852         uint32_t i = 0;
10853
10854         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10855                          idx);
10856         if (!cache_resource)
10857                 return 0;
10858         MLX5_ASSERT(cache_resource->action);
10859         DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
10860                 (void *)cache_resource,
10861                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10862         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10863                                __ATOMIC_RELAXED) == 0) {
10864                 if (cache_resource->action)
10865                         claim_zero(mlx5_glue->destroy_flow_action
10866                                                 (cache_resource->action));
10867                 for (; i < cache_resource->num_of_dest; i++) {
10868                         mdest_act_res = &cache_resource->sample_idx[i];
10869                         if (mdest_act_res->rix_hrxq) {
10870                                 mlx5_hrxq_release(dev,
10871                                         mdest_act_res->rix_hrxq);
10872                                 mdest_act_res->rix_hrxq = 0;
10873                         }
10874                         if (mdest_act_res->rix_encap_decap) {
10875                                 flow_dv_encap_decap_resource_release(dev,
10876                                         mdest_act_res->rix_encap_decap);
10877                                 mdest_act_res->rix_encap_decap = 0;
10878                         }
10879                         if (mdest_act_res->rix_port_id_action) {
10880                                 flow_dv_port_id_action_resource_release(dev,
10881                                         mdest_act_res->rix_port_id_action);
10882                                 mdest_act_res->rix_port_id_action = 0;
10883                         }
10884                         if (mdest_act_res->rix_tag) {
10885                                 flow_dv_tag_release(dev,
10886                                         mdest_act_res->rix_tag);
10887                                 mdest_act_res->rix_tag = 0;
10888                         }
10889                 }
10890                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10891                              &priv->sh->dest_array_list, idx,
10892                              cache_resource, next);
10893                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
10894                 DRV_LOG(DEBUG, "destination array resource %p: removed",
10895                         (void *)cache_resource);
10896                 return 0;
10897         }
10898         return 1;
10899 }
10900
10901 /**
10902  * Remove the flow from the NIC but keeps it in memory.
10903  * Lock free, (mutex should be acquired by caller).
10904  *
10905  * @param[in] dev
10906  *   Pointer to Ethernet device.
10907  * @param[in, out] flow
10908  *   Pointer to flow structure.
10909  */
10910 static void
10911 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10912 {
10913         struct mlx5_flow_handle *dh;
10914         uint32_t handle_idx;
10915         struct mlx5_priv *priv = dev->data->dev_private;
10916
10917         if (!flow)
10918                 return;
10919         handle_idx = flow->dev_handles;
10920         while (handle_idx) {
10921                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10922                                     handle_idx);
10923                 if (!dh)
10924                         return;
10925                 if (dh->drv_flow) {
10926                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10927                         dh->drv_flow = NULL;
10928                 }
10929                 if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
10930                     dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
10931                     dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
10932                         flow_dv_fate_resource_release(dev, dh);
10933                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10934                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10935                 handle_idx = dh->next.next;
10936         }
10937 }
10938
10939 /**
10940  * Remove the flow from the NIC and the memory.
10941  * Lock free, (mutex should be acquired by caller).
10942  *
10943  * @param[in] dev
10944  *   Pointer to the Ethernet device structure.
10945  * @param[in, out] flow
10946  *   Pointer to flow structure.
10947  */
10948 static void
10949 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10950 {
10951         struct rte_flow_shared_action *shared;
10952         struct mlx5_flow_handle *dev_handle;
10953         struct mlx5_priv *priv = dev->data->dev_private;
10954
10955         if (!flow)
10956                 return;
10957         __flow_dv_remove(dev, flow);
10958         shared = mlx5_flow_get_shared_rss(flow);
10959         if (shared)
10960                 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10961         if (flow->counter) {
10962                 flow_dv_counter_release(dev, flow->counter);
10963                 flow->counter = 0;
10964         }
10965         if (flow->meter) {
10966                 struct mlx5_flow_meter *fm;
10967
10968                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10969                                     flow->meter);
10970                 if (fm)
10971                         mlx5_flow_meter_detach(fm);
10972                 flow->meter = 0;
10973         }
10974         while (flow->dev_handles) {
10975                 uint32_t tmp_idx = flow->dev_handles;
10976
10977                 dev_handle = mlx5_ipool_get(priv->sh->ipool
10978                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10979                 if (!dev_handle)
10980                         return;
10981                 flow->dev_handles = dev_handle->next.next;
10982                 if (dev_handle->dvh.matcher)
10983                         flow_dv_matcher_release(dev, dev_handle);
10984                 if (dev_handle->dvh.rix_sample)
10985                         flow_dv_sample_resource_release(dev, dev_handle);
10986                 if (dev_handle->dvh.rix_dest_array)
10987                         flow_dv_dest_array_resource_release(dev, dev_handle);
10988                 if (dev_handle->dvh.rix_encap_decap)
10989                         flow_dv_encap_decap_resource_release(dev,
10990                                 dev_handle->dvh.rix_encap_decap);
10991                 if (dev_handle->dvh.modify_hdr)
10992                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
10993                 if (dev_handle->dvh.rix_push_vlan)
10994                         flow_dv_push_vlan_action_resource_release(dev,
10995                                                                   dev_handle);
10996                 if (dev_handle->dvh.rix_tag)
10997                         flow_dv_tag_release(dev,
10998                                             dev_handle->dvh.rix_tag);
10999                 flow_dv_fate_resource_release(dev, dev_handle);
11000                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11001                            tmp_idx);
11002         }
11003 }
11004
11005 /**
11006  * Release array of hash RX queue objects.
11007  * Helper function.
11008  *
11009  * @param[in] dev
11010  *   Pointer to the Ethernet device structure.
11011  * @param[in, out] hrxqs
11012  *   Array of hash RX queue objects.
11013  *
11014  * @return
11015  *   Total number of references to hash RX queue objects in *hrxqs* array
11016  *   after this operation.
11017  */
11018 static int
11019 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11020                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11021 {
11022         size_t i;
11023         int remaining = 0;
11024
11025         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11026                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11027
11028                 if (!ret)
11029                         (*hrxqs)[i] = 0;
11030                 remaining += ret;
11031         }
11032         return remaining;
11033 }
11034
11035 /**
11036  * Release all hash RX queue objects representing shared RSS action.
11037  *
11038  * @param[in] dev
11039  *   Pointer to the Ethernet device structure.
11040  * @param[in, out] action
11041  *   Shared RSS action to remove hash RX queue objects from.
11042  *
11043  * @return
11044  *   Total number of references to hash RX queue objects stored in *action*
11045  *   after this operation.
11046  *   Expected to be 0 if no external references held.
11047  */
11048 static int
11049 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11050                                  struct mlx5_shared_action_rss *action)
11051 {
11052         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11053                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11054 }
11055
11056 /**
11057  * Setup shared RSS action.
11058  * Prepare set of hash RX queue objects sufficient to handle all valid
11059  * hash_fields combinations (see enum ibv_rx_hash_fields).
11060  *
11061  * @param[in] dev
11062  *   Pointer to the Ethernet device structure.
11063  * @param[in, out] action
11064  *   Partially initialized shared RSS action.
11065  * @param[out] error
11066  *   Perform verbose error reporting if not NULL. Initialized in case of
11067  *   error only.
11068  *
11069  * @return
11070  *   0 on success, otherwise negative errno value.
11071  */
11072 static int
11073 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11074                         struct mlx5_shared_action_rss *action,
11075                         struct rte_flow_error *error)
11076 {
11077         size_t i;
11078         int err;
11079
11080         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11081                 uint32_t hrxq_idx;
11082                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11083                 int tunnel;
11084
11085                 for (tunnel = 0; tunnel < 2; tunnel++) {
11086                         hrxq_idx = mlx5_hrxq_new(dev, action->origin.key,
11087                                         MLX5_RSS_HASH_KEY_LEN,
11088                                         hash_fields,
11089                                         action->origin.queue,
11090                                         action->origin.queue_num,
11091                                         tunnel, true);
11092                         if (!hrxq_idx) {
11093                                 rte_flow_error_set
11094                                         (error, rte_errno,
11095                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11096                                          "cannot get hash queue");
11097                                 goto error_hrxq_new;
11098                         }
11099                         err = __flow_dv_action_rss_hrxq_set
11100                                 (action, hash_fields, tunnel, hrxq_idx);
11101                         MLX5_ASSERT(!err);
11102                 }
11103         }
11104         return 0;
11105 error_hrxq_new:
11106         err = rte_errno;
11107         __flow_dv_action_rss_hrxqs_release(dev, action);
11108         rte_errno = err;
11109         return -rte_errno;
11110 }
11111
11112 /**
11113  * Create shared RSS action.
11114  *
11115  * @param[in] dev
11116  *   Pointer to the Ethernet device structure.
11117  * @param[in] conf
11118  *   Shared action configuration.
11119  * @param[in] rss
11120  *   RSS action specification used to create shared action.
11121  * @param[out] error
11122  *   Perform verbose error reporting if not NULL. Initialized in case of
11123  *   error only.
11124  *
11125  * @return
11126  *   A valid shared action handle in case of success, NULL otherwise and
11127  *   rte_errno is set.
11128  */
11129 static struct rte_flow_shared_action *
11130 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11131                             const struct rte_flow_shared_action_conf *conf,
11132                             const struct rte_flow_action_rss *rss,
11133                             struct rte_flow_error *error)
11134 {
11135         struct rte_flow_shared_action *shared_action = NULL;
11136         void *queue = NULL;
11137         struct mlx5_shared_action_rss *shared_rss;
11138         struct rte_flow_action_rss *origin;
11139         const uint8_t *rss_key;
11140         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11141
11142         RTE_SET_USED(conf);
11143         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11144                             0, SOCKET_ID_ANY);
11145         shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11146                                     SOCKET_ID_ANY);
11147         if (!shared_action || !queue) {
11148                 rte_flow_error_set(error, ENOMEM,
11149                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11150                                    "cannot allocate resource memory");
11151                 goto error_rss_init;
11152         }
11153         shared_rss = &shared_action->rss;
11154         shared_rss->queue = queue;
11155         origin = &shared_rss->origin;
11156         origin->func = rss->func;
11157         origin->level = rss->level;
11158         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11159         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11160         /* NULL RSS key indicates default RSS key. */
11161         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11162         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11163         origin->key = &shared_rss->key[0];
11164         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11165         memcpy(shared_rss->queue, rss->queue, queue_size);
11166         origin->queue = shared_rss->queue;
11167         origin->queue_num = rss->queue_num;
11168         if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11169                 goto error_rss_init;
11170         shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11171         return shared_action;
11172 error_rss_init:
11173         mlx5_free(shared_action);
11174         mlx5_free(queue);
11175         return NULL;
11176 }
11177
11178 /**
11179  * Destroy the shared RSS action.
11180  * Release related hash RX queue objects.
11181  *
11182  * @param[in] dev
11183  *   Pointer to the Ethernet device structure.
11184  * @param[in] shared_rss
11185  *   The shared RSS action object to be removed.
11186  * @param[out] error
11187  *   Perform verbose error reporting if not NULL. Initialized in case of
11188  *   error only.
11189  *
11190  * @return
11191  *   0 on success, otherwise negative errno value.
11192  */
11193 static int
11194 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11195                          struct mlx5_shared_action_rss *shared_rss,
11196                          struct rte_flow_error *error)
11197 {
11198         struct rte_flow_shared_action *shared_action = NULL;
11199         uint32_t old_refcnt = 1;
11200         int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11201
11202         if (remaining) {
11203                 return rte_flow_error_set(error, ETOOMANYREFS,
11204                                           RTE_FLOW_ERROR_TYPE_ACTION,
11205                                           NULL,
11206                                           "shared rss hrxq has references");
11207         }
11208         shared_action = container_of(shared_rss,
11209                                      struct rte_flow_shared_action, rss);
11210         if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11211                                          0, 0,
11212                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11213                 return rte_flow_error_set(error, ETOOMANYREFS,
11214                                           RTE_FLOW_ERROR_TYPE_ACTION,
11215                                           NULL,
11216                                           "shared rss has references");
11217         }
11218         rte_free(shared_rss->queue);
11219         return 0;
11220 }
11221
11222 /**
11223  * Create shared action, lock free,
11224  * (mutex should be acquired by caller).
11225  * Dispatcher for action type specific call.
11226  *
11227  * @param[in] dev
11228  *   Pointer to the Ethernet device structure.
11229  * @param[in] conf
11230  *   Shared action configuration.
11231  * @param[in] action
11232  *   Action specification used to create shared action.
11233  * @param[out] error
11234  *   Perform verbose error reporting if not NULL. Initialized in case of
11235  *   error only.
11236  *
11237  * @return
11238  *   A valid shared action handle in case of success, NULL otherwise and
11239  *   rte_errno is set.
11240  */
11241 static struct rte_flow_shared_action *
11242 __flow_dv_action_create(struct rte_eth_dev *dev,
11243                         const struct rte_flow_shared_action_conf *conf,
11244                         const struct rte_flow_action *action,
11245                         struct rte_flow_error *error)
11246 {
11247         struct rte_flow_shared_action *shared_action = NULL;
11248         struct mlx5_priv *priv = dev->data->dev_private;
11249
11250         switch (action->type) {
11251         case RTE_FLOW_ACTION_TYPE_RSS:
11252                 shared_action = __flow_dv_action_rss_create(dev, conf,
11253                                                             action->conf,
11254                                                             error);
11255                 break;
11256         default:
11257                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11258                                    NULL, "action type not supported");
11259                 break;
11260         }
11261         if (shared_action) {
11262                 __atomic_add_fetch(&shared_action->refcnt, 1,
11263                                    __ATOMIC_RELAXED);
11264                 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11265         }
11266         return shared_action;
11267 }
11268
11269 /**
11270  * Destroy the shared action.
11271  * Release action related resources on the NIC and the memory.
11272  * Lock free, (mutex should be acquired by caller).
11273  * Dispatcher for action type specific call.
11274  *
11275  * @param[in] dev
11276  *   Pointer to the Ethernet device structure.
11277  * @param[in] action
11278  *   The shared action object to be removed.
11279  * @param[out] error
11280  *   Perform verbose error reporting if not NULL. Initialized in case of
11281  *   error only.
11282  *
11283  * @return
11284  *   0 on success, otherwise negative errno value.
11285  */
11286 static int
11287 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11288                          struct rte_flow_shared_action *action,
11289                          struct rte_flow_error *error)
11290 {
11291         int ret;
11292
11293         switch (action->type) {
11294         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11295                 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11296                 break;
11297         default:
11298                 return rte_flow_error_set(error, ENOTSUP,
11299                                           RTE_FLOW_ERROR_TYPE_ACTION,
11300                                           NULL,
11301                                           "action type not supported");
11302         }
11303         if (ret)
11304                 return ret;
11305         LIST_REMOVE(action, next);
11306         rte_free(action);
11307         return 0;
11308 }
11309
11310 /**
11311  * Updates in place shared RSS action configuration.
11312  *
11313  * @param[in] dev
11314  *   Pointer to the Ethernet device structure.
11315  * @param[in] shared_rss
11316  *   The shared RSS action object to be updated.
11317  * @param[in] action_conf
11318  *   RSS action specification used to modify *shared_rss*.
11319  * @param[out] error
11320  *   Perform verbose error reporting if not NULL. Initialized in case of
11321  *   error only.
11322  *
11323  * @return
11324  *   0 on success, otherwise negative errno value.
11325  * @note: currently only support update of RSS queues.
11326  */
11327 static int
11328 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11329                             struct mlx5_shared_action_rss *shared_rss,
11330                             const struct rte_flow_action_rss *action_conf,
11331                             struct rte_flow_error *error)
11332 {
11333         size_t i;
11334         int ret;
11335         void *queue = NULL;
11336         const uint8_t *rss_key;
11337         uint32_t rss_key_len;
11338         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11339
11340         queue = mlx5_malloc(MLX5_MEM_ZERO,
11341                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11342                             0, SOCKET_ID_ANY);
11343         if (!queue)
11344                 return rte_flow_error_set(error, ENOMEM,
11345                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11346                                           NULL,
11347                                           "cannot allocate resource memory");
11348         if (action_conf->key) {
11349                 rss_key = action_conf->key;
11350                 rss_key_len = action_conf->key_len;
11351         } else {
11352                 rss_key = rss_hash_default_key;
11353                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11354         }
11355         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11356                 uint32_t hrxq_idx;
11357                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11358                 int tunnel;
11359
11360                 for (tunnel = 0; tunnel < 2; tunnel++) {
11361                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11362                                         (shared_rss, hash_fields, tunnel);
11363                         MLX5_ASSERT(hrxq_idx);
11364                         ret = mlx5_hrxq_modify
11365                                 (dev, hrxq_idx,
11366                                  rss_key, rss_key_len,
11367                                  hash_fields,
11368                                  action_conf->queue, action_conf->queue_num);
11369                         if (ret) {
11370                                 mlx5_free(queue);
11371                                 return rte_flow_error_set
11372                                         (error, rte_errno,
11373                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11374                                          "cannot update hash queue");
11375                         }
11376                 }
11377         }
11378         mlx5_free(shared_rss->queue);
11379         shared_rss->queue = queue;
11380         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11381         shared_rss->origin.queue = shared_rss->queue;
11382         shared_rss->origin.queue_num = action_conf->queue_num;
11383         return 0;
11384 }
11385
11386 /**
11387  * Updates in place shared action configuration, lock free,
11388  * (mutex should be acquired by caller).
11389  *
11390  * @param[in] dev
11391  *   Pointer to the Ethernet device structure.
11392  * @param[in] action
11393  *   The shared action object to be updated.
11394  * @param[in] action_conf
11395  *   Action specification used to modify *action*.
11396  *   *action_conf* should be of type correlating with type of the *action*,
11397  *   otherwise considered as invalid.
11398  * @param[out] error
11399  *   Perform verbose error reporting if not NULL. Initialized in case of
11400  *   error only.
11401  *
11402  * @return
11403  *   0 on success, otherwise negative errno value.
11404  */
11405 static int
11406 __flow_dv_action_update(struct rte_eth_dev *dev,
11407                         struct rte_flow_shared_action *action,
11408                         const void *action_conf,
11409                         struct rte_flow_error *error)
11410 {
11411         switch (action->type) {
11412         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11413                 return __flow_dv_action_rss_update(dev, &action->rss,
11414                                                    action_conf, error);
11415         default:
11416                 return rte_flow_error_set(error, ENOTSUP,
11417                                           RTE_FLOW_ERROR_TYPE_ACTION,
11418                                           NULL,
11419                                           "action type not supported");
11420         }
11421 }
11422 /**
11423  * Query a dv flow  rule for its statistics via devx.
11424  *
11425  * @param[in] dev
11426  *   Pointer to Ethernet device.
11427  * @param[in] flow
11428  *   Pointer to the sub flow.
11429  * @param[out] data
11430  *   data retrieved by the query.
11431  * @param[out] error
11432  *   Perform verbose error reporting if not NULL.
11433  *
11434  * @return
11435  *   0 on success, a negative errno value otherwise and rte_errno is set.
11436  */
11437 static int
11438 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11439                     void *data, struct rte_flow_error *error)
11440 {
11441         struct mlx5_priv *priv = dev->data->dev_private;
11442         struct rte_flow_query_count *qc = data;
11443
11444         if (!priv->config.devx)
11445                 return rte_flow_error_set(error, ENOTSUP,
11446                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11447                                           NULL,
11448                                           "counters are not supported");
11449         if (flow->counter) {
11450                 uint64_t pkts, bytes;
11451                 struct mlx5_flow_counter *cnt;
11452
11453                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11454                                                  NULL);
11455                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11456                                                &bytes);
11457
11458                 if (err)
11459                         return rte_flow_error_set(error, -err,
11460                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11461                                         NULL, "cannot read counters");
11462                 qc->hits_set = 1;
11463                 qc->bytes_set = 1;
11464                 qc->hits = pkts - cnt->hits;
11465                 qc->bytes = bytes - cnt->bytes;
11466                 if (qc->reset) {
11467                         cnt->hits = pkts;
11468                         cnt->bytes = bytes;
11469                 }
11470                 return 0;
11471         }
11472         return rte_flow_error_set(error, EINVAL,
11473                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11474                                   NULL,
11475                                   "counters are not available");
11476 }
11477
11478 /**
11479  * Query a flow rule AGE action for aging information.
11480  *
11481  * @param[in] dev
11482  *   Pointer to Ethernet device.
11483  * @param[in] flow
11484  *   Pointer to the sub flow.
11485  * @param[out] data
11486  *   data retrieved by the query.
11487  * @param[out] error
11488  *   Perform verbose error reporting if not NULL.
11489  *
11490  * @return
11491  *   0 on success, a negative errno value otherwise and rte_errno is set.
11492  */
11493 static int
11494 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11495                   void *data, struct rte_flow_error *error)
11496 {
11497         struct rte_flow_query_age *resp = data;
11498
11499         if (flow->counter) {
11500                 struct mlx5_age_param *age_param =
11501                                 flow_dv_counter_idx_get_age(dev, flow->counter);
11502
11503                 if (!age_param || !age_param->timeout)
11504                         return rte_flow_error_set
11505                                         (error, EINVAL,
11506                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11507                                          NULL, "cannot read age data");
11508                 resp->aged = __atomic_load_n(&age_param->state,
11509                                              __ATOMIC_RELAXED) ==
11510                                                         AGE_TMOUT ? 1 : 0;
11511                 resp->sec_since_last_hit_valid = !resp->aged;
11512                 if (resp->sec_since_last_hit_valid)
11513                         resp->sec_since_last_hit =
11514                                 __atomic_load_n(&age_param->sec_since_last_hit,
11515                                                 __ATOMIC_RELAXED);
11516                 return 0;
11517         }
11518         return rte_flow_error_set(error, EINVAL,
11519                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11520                                   NULL,
11521                                   "age data not available");
11522 }
11523
11524 /**
11525  * Query a flow.
11526  *
11527  * @see rte_flow_query()
11528  * @see rte_flow_ops
11529  */
11530 static int
11531 flow_dv_query(struct rte_eth_dev *dev,
11532               struct rte_flow *flow __rte_unused,
11533               const struct rte_flow_action *actions __rte_unused,
11534               void *data __rte_unused,
11535               struct rte_flow_error *error __rte_unused)
11536 {
11537         int ret = -EINVAL;
11538
11539         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11540                 switch (actions->type) {
11541                 case RTE_FLOW_ACTION_TYPE_VOID:
11542                         break;
11543                 case RTE_FLOW_ACTION_TYPE_COUNT:
11544                         ret = flow_dv_query_count(dev, flow, data, error);
11545                         break;
11546                 case RTE_FLOW_ACTION_TYPE_AGE:
11547                         ret = flow_dv_query_age(dev, flow, data, error);
11548                         break;
11549                 default:
11550                         return rte_flow_error_set(error, ENOTSUP,
11551                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11552                                                   actions,
11553                                                   "action not supported");
11554                 }
11555         }
11556         return ret;
11557 }
11558
11559 /**
11560  * Destroy the meter table set.
11561  * Lock free, (mutex should be acquired by caller).
11562  *
11563  * @param[in] dev
11564  *   Pointer to Ethernet device.
11565  * @param[in] tbl
11566  *   Pointer to the meter table set.
11567  *
11568  * @return
11569  *   Always 0.
11570  */
11571 static int
11572 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11573                         struct mlx5_meter_domains_infos *tbl)
11574 {
11575         struct mlx5_priv *priv = dev->data->dev_private;
11576         struct mlx5_meter_domains_infos *mtd =
11577                                 (struct mlx5_meter_domains_infos *)tbl;
11578
11579         if (!mtd || !priv->config.dv_flow_en)
11580                 return 0;
11581         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11582                 claim_zero(mlx5_flow_os_destroy_flow
11583                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11584         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11585                 claim_zero(mlx5_flow_os_destroy_flow
11586                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11587         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11588                 claim_zero(mlx5_flow_os_destroy_flow
11589                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11590         if (mtd->egress.color_matcher)
11591                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11592                            (mtd->egress.color_matcher));
11593         if (mtd->egress.any_matcher)
11594                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11595                            (mtd->egress.any_matcher));
11596         if (mtd->egress.tbl)
11597                 flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
11598         if (mtd->egress.sfx_tbl)
11599                 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
11600         if (mtd->ingress.color_matcher)
11601                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11602                            (mtd->ingress.color_matcher));
11603         if (mtd->ingress.any_matcher)
11604                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11605                            (mtd->ingress.any_matcher));
11606         if (mtd->ingress.tbl)
11607                 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
11608         if (mtd->ingress.sfx_tbl)
11609                 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
11610         if (mtd->transfer.color_matcher)
11611                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11612                            (mtd->transfer.color_matcher));
11613         if (mtd->transfer.any_matcher)
11614                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11615                            (mtd->transfer.any_matcher));
11616         if (mtd->transfer.tbl)
11617                 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
11618         if (mtd->transfer.sfx_tbl)
11619                 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
11620         if (mtd->drop_actn)
11621                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11622         mlx5_free(mtd);
11623         return 0;
11624 }
11625
11626 /* Number of meter flow actions, count and jump or count and drop. */
11627 #define METER_ACTIONS 2
11628
11629 /**
11630  * Create specify domain meter table and suffix table.
11631  *
11632  * @param[in] dev
11633  *   Pointer to Ethernet device.
11634  * @param[in,out] mtb
11635  *   Pointer to DV meter table set.
11636  * @param[in] egress
11637  *   Table attribute.
11638  * @param[in] transfer
11639  *   Table attribute.
11640  * @param[in] color_reg_c_idx
11641  *   Reg C index for color match.
11642  *
11643  * @return
11644  *   0 on success, -1 otherwise and rte_errno is set.
11645  */
11646 static int
11647 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11648                            struct mlx5_meter_domains_infos *mtb,
11649                            uint8_t egress, uint8_t transfer,
11650                            uint32_t color_reg_c_idx)
11651 {
11652         struct mlx5_priv *priv = dev->data->dev_private;
11653         struct mlx5_dev_ctx_shared *sh = priv->sh;
11654         struct mlx5_flow_dv_match_params mask = {
11655                 .size = sizeof(mask.buf),
11656         };
11657         struct mlx5_flow_dv_match_params value = {
11658                 .size = sizeof(value.buf),
11659         };
11660         struct mlx5dv_flow_matcher_attr dv_attr = {
11661                 .type = IBV_FLOW_ATTR_NORMAL,
11662                 .priority = 0,
11663                 .match_criteria_enable = 0,
11664                 .match_mask = (void *)&mask,
11665         };
11666         void *actions[METER_ACTIONS];
11667         struct mlx5_meter_domain_info *dtb;
11668         struct rte_flow_error error;
11669         int i = 0;
11670         int ret;
11671
11672         if (transfer)
11673                 dtb = &mtb->transfer;
11674         else if (egress)
11675                 dtb = &mtb->egress;
11676         else
11677                 dtb = &mtb->ingress;
11678         /* Create the meter table with METER level. */
11679         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11680                                             egress, transfer, false, NULL, 0,
11681                                             &error);
11682         if (!dtb->tbl) {
11683                 DRV_LOG(ERR, "Failed to create meter policer table.");
11684                 return -1;
11685         }
11686         /* Create the meter suffix table with SUFFIX level. */
11687         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11688                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11689                                             egress, transfer, false, NULL, 0,
11690                                             &error);
11691         if (!dtb->sfx_tbl) {
11692                 DRV_LOG(ERR, "Failed to create meter suffix table.");
11693                 return -1;
11694         }
11695         /* Create matchers, Any and Color. */
11696         dv_attr.priority = 3;
11697         dv_attr.match_criteria_enable = 0;
11698         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11699                                                &dtb->any_matcher);
11700         if (ret) {
11701                 DRV_LOG(ERR, "Failed to create meter"
11702                              " policer default matcher.");
11703                 goto error_exit;
11704         }
11705         dv_attr.priority = 0;
11706         dv_attr.match_criteria_enable =
11707                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11708         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11709                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11710         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11711                                                &dtb->color_matcher);
11712         if (ret) {
11713                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11714                 goto error_exit;
11715         }
11716         if (mtb->count_actns[RTE_MTR_DROPPED])
11717                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11718         actions[i++] = mtb->drop_actn;
11719         /* Default rule: lowest priority, match any, actions: drop. */
11720         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11721                                        actions,
11722                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
11723         if (ret) {
11724                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11725                 goto error_exit;
11726         }
11727         return 0;
11728 error_exit:
11729         return -1;
11730 }
11731
11732 /**
11733  * Create the needed meter and suffix tables.
11734  * Lock free, (mutex should be acquired by caller).
11735  *
11736  * @param[in] dev
11737  *   Pointer to Ethernet device.
11738  * @param[in] fm
11739  *   Pointer to the flow meter.
11740  *
11741  * @return
11742  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
11743  */
11744 static struct mlx5_meter_domains_infos *
11745 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11746                        const struct mlx5_flow_meter *fm)
11747 {
11748         struct mlx5_priv *priv = dev->data->dev_private;
11749         struct mlx5_meter_domains_infos *mtb;
11750         int ret;
11751         int i;
11752
11753         if (!priv->mtr_en) {
11754                 rte_errno = ENOTSUP;
11755                 return NULL;
11756         }
11757         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11758         if (!mtb) {
11759                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11760                 return NULL;
11761         }
11762         /* Create meter count actions */
11763         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11764                 struct mlx5_flow_counter *cnt;
11765                 if (!fm->policer_stats.cnt[i])
11766                         continue;
11767                 cnt = flow_dv_counter_get_by_idx(dev,
11768                       fm->policer_stats.cnt[i], NULL);
11769                 mtb->count_actns[i] = cnt->action;
11770         }
11771         /* Create drop action. */
11772         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11773         if (ret) {
11774                 DRV_LOG(ERR, "Failed to create drop action.");
11775                 goto error_exit;
11776         }
11777         /* Egress meter table. */
11778         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11779         if (ret) {
11780                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11781                 goto error_exit;
11782         }
11783         /* Ingress meter table. */
11784         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11785         if (ret) {
11786                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11787                 goto error_exit;
11788         }
11789         /* FDB meter table. */
11790         if (priv->config.dv_esw_en) {
11791                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11792                                                  priv->mtr_color_reg);
11793                 if (ret) {
11794                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11795                         goto error_exit;
11796                 }
11797         }
11798         return mtb;
11799 error_exit:
11800         flow_dv_destroy_mtr_tbl(dev, mtb);
11801         return NULL;
11802 }
11803
11804 /**
11805  * Destroy domain policer rule.
11806  *
11807  * @param[in] dt
11808  *   Pointer to domain table.
11809  */
11810 static void
11811 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11812 {
11813         int i;
11814
11815         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11816                 if (dt->policer_rules[i]) {
11817                         claim_zero(mlx5_flow_os_destroy_flow
11818                                    (dt->policer_rules[i]));
11819                         dt->policer_rules[i] = NULL;
11820                 }
11821         }
11822         if (dt->jump_actn) {
11823                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11824                 dt->jump_actn = NULL;
11825         }
11826 }
11827
11828 /**
11829  * Destroy policer rules.
11830  *
11831  * @param[in] dev
11832  *   Pointer to Ethernet device.
11833  * @param[in] fm
11834  *   Pointer to flow meter structure.
11835  * @param[in] attr
11836  *   Pointer to flow attributes.
11837  *
11838  * @return
11839  *   Always 0.
11840  */
11841 static int
11842 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11843                               const struct mlx5_flow_meter *fm,
11844                               const struct rte_flow_attr *attr)
11845 {
11846         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11847
11848         if (!mtb)
11849                 return 0;
11850         if (attr->egress)
11851                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11852         if (attr->ingress)
11853                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11854         if (attr->transfer)
11855                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11856         return 0;
11857 }
11858
11859 /**
11860  * Create specify domain meter policer rule.
11861  *
11862  * @param[in] fm
11863  *   Pointer to flow meter structure.
11864  * @param[in] mtb
11865  *   Pointer to DV meter table set.
11866  * @param[in] mtr_reg_c
11867  *   Color match REG_C.
11868  *
11869  * @return
11870  *   0 on success, -1 otherwise.
11871  */
11872 static int
11873 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11874                                     struct mlx5_meter_domain_info *dtb,
11875                                     uint8_t mtr_reg_c)
11876 {
11877         struct mlx5_flow_dv_match_params matcher = {
11878                 .size = sizeof(matcher.buf),
11879         };
11880         struct mlx5_flow_dv_match_params value = {
11881                 .size = sizeof(value.buf),
11882         };
11883         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11884         void *actions[METER_ACTIONS];
11885         int i;
11886         int ret = 0;
11887
11888         /* Create jump action. */
11889         if (!dtb->jump_actn)
11890                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11891                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11892         if (ret) {
11893                 DRV_LOG(ERR, "Failed to create policer jump action.");
11894                 goto error;
11895         }
11896         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11897                 int j = 0;
11898
11899                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11900                                        rte_col_2_mlx5_col(i), UINT8_MAX);
11901                 if (mtb->count_actns[i])
11902                         actions[j++] = mtb->count_actns[i];
11903                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11904                         actions[j++] = mtb->drop_actn;
11905                 else
11906                         actions[j++] = dtb->jump_actn;
11907                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11908                                                (void *)&value, j, actions,
11909                                                &dtb->policer_rules[i]);
11910                 if (ret) {
11911                         DRV_LOG(ERR, "Failed to create policer rule.");
11912                         goto error;
11913                 }
11914         }
11915         return 0;
11916 error:
11917         rte_errno = errno;
11918         return -1;
11919 }
11920
11921 /**
11922  * Create policer rules.
11923  *
11924  * @param[in] dev
11925  *   Pointer to Ethernet device.
11926  * @param[in] fm
11927  *   Pointer to flow meter structure.
11928  * @param[in] attr
11929  *   Pointer to flow attributes.
11930  *
11931  * @return
11932  *   0 on success, -1 otherwise.
11933  */
11934 static int
11935 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11936                              struct mlx5_flow_meter *fm,
11937                              const struct rte_flow_attr *attr)
11938 {
11939         struct mlx5_priv *priv = dev->data->dev_private;
11940         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11941         int ret;
11942
11943         if (attr->egress) {
11944                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11945                                                 priv->mtr_color_reg);
11946                 if (ret) {
11947                         DRV_LOG(ERR, "Failed to create egress policer.");
11948                         goto error;
11949                 }
11950         }
11951         if (attr->ingress) {
11952                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11953                                                 priv->mtr_color_reg);
11954                 if (ret) {
11955                         DRV_LOG(ERR, "Failed to create ingress policer.");
11956                         goto error;
11957                 }
11958         }
11959         if (attr->transfer) {
11960                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11961                                                 priv->mtr_color_reg);
11962                 if (ret) {
11963                         DRV_LOG(ERR, "Failed to create transfer policer.");
11964                         goto error;
11965                 }
11966         }
11967         return 0;
11968 error:
11969         flow_dv_destroy_policer_rules(dev, fm, attr);
11970         return -1;
11971 }
11972
11973 /**
11974  * Validate the batch counter support in root table.
11975  *
11976  * Create a simple flow with invalid counter and drop action on root table to
11977  * validate if batch counter with offset on root table is supported or not.
11978  *
11979  * @param[in] dev
11980  *   Pointer to rte_eth_dev structure.
11981  *
11982  * @return
11983  *   0 on success, a negative errno value otherwise and rte_errno is set.
11984  */
11985 int
11986 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11987 {
11988         struct mlx5_priv *priv = dev->data->dev_private;
11989         struct mlx5_dev_ctx_shared *sh = priv->sh;
11990         struct mlx5_flow_dv_match_params mask = {
11991                 .size = sizeof(mask.buf),
11992         };
11993         struct mlx5_flow_dv_match_params value = {
11994                 .size = sizeof(value.buf),
11995         };
11996         struct mlx5dv_flow_matcher_attr dv_attr = {
11997                 .type = IBV_FLOW_ATTR_NORMAL,
11998                 .priority = 0,
11999                 .match_criteria_enable = 0,
12000                 .match_mask = (void *)&mask,
12001         };
12002         void *actions[2] = { 0 };
12003         struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
12004         struct mlx5_devx_obj *dcs = NULL;
12005         void *matcher = NULL;
12006         void *flow = NULL;
12007         int i, ret = -1;
12008
12009         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, NULL);
12010         if (!tbl)
12011                 goto err;
12012         dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, NULL, 0, NULL);
12013         if (!dest_tbl)
12014                 goto err;
12015         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12016         if (!dcs)
12017                 goto err;
12018         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12019                                                     &actions[0]);
12020         if (ret)
12021                 goto err;
12022         ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12023                                 (dest_tbl->obj, &actions[1]);
12024         if (ret)
12025                 goto err;
12026         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12027         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12028                                                &matcher);
12029         if (ret)
12030                 goto err;
12031         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12032                                        actions, &flow);
12033 err:
12034         /*
12035          * If batch counter with offset is not supported, the driver will not
12036          * validate the invalid offset value, flow create should success.
12037          * In this case, it means batch counter is not supported in root table.
12038          *
12039          * Otherwise, if flow create is failed, counter offset is supported.
12040          */
12041         if (flow) {
12042                 DRV_LOG(INFO, "Batch counter is not supported in root "
12043                               "table. Switch to fallback mode.");
12044                 rte_errno = ENOTSUP;
12045                 ret = -rte_errno;
12046                 claim_zero(mlx5_flow_os_destroy_flow(flow));
12047         } else {
12048                 /* Check matcher to make sure validate fail at flow create. */
12049                 if (!matcher || (matcher && errno != EINVAL))
12050                         DRV_LOG(ERR, "Unexpected error in counter offset "
12051                                      "support detection");
12052                 ret = 0;
12053         }
12054         for (i = 0; i < 2; i++) {
12055                 if (actions[i])
12056                         claim_zero(mlx5_flow_os_destroy_flow_action
12057                                    (actions[i]));
12058         }
12059         if (matcher)
12060                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12061         if (tbl)
12062                 flow_dv_tbl_resource_release(dev, tbl);
12063         if (dest_tbl)
12064                 flow_dv_tbl_resource_release(dev, dest_tbl);
12065         if (dcs)
12066                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12067         return ret;
12068 }
12069
12070 /**
12071  * Query a devx counter.
12072  *
12073  * @param[in] dev
12074  *   Pointer to the Ethernet device structure.
12075  * @param[in] cnt
12076  *   Index to the flow counter.
12077  * @param[in] clear
12078  *   Set to clear the counter statistics.
12079  * @param[out] pkts
12080  *   The statistics value of packets.
12081  * @param[out] bytes
12082  *   The statistics value of bytes.
12083  *
12084  * @return
12085  *   0 on success, otherwise return -1.
12086  */
12087 static int
12088 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12089                       uint64_t *pkts, uint64_t *bytes)
12090 {
12091         struct mlx5_priv *priv = dev->data->dev_private;
12092         struct mlx5_flow_counter *cnt;
12093         uint64_t inn_pkts, inn_bytes;
12094         int ret;
12095
12096         if (!priv->config.devx)
12097                 return -1;
12098
12099         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12100         if (ret)
12101                 return -1;
12102         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12103         *pkts = inn_pkts - cnt->hits;
12104         *bytes = inn_bytes - cnt->bytes;
12105         if (clear) {
12106                 cnt->hits = inn_pkts;
12107                 cnt->bytes = inn_bytes;
12108         }
12109         return 0;
12110 }
12111
12112 /**
12113  * Get aged-out flows.
12114  *
12115  * @param[in] dev
12116  *   Pointer to the Ethernet device structure.
12117  * @param[in] context
12118  *   The address of an array of pointers to the aged-out flows contexts.
12119  * @param[in] nb_contexts
12120  *   The length of context array pointers.
12121  * @param[out] error
12122  *   Perform verbose error reporting if not NULL. Initialized in case of
12123  *   error only.
12124  *
12125  * @return
12126  *   how many contexts get in success, otherwise negative errno value.
12127  *   if nb_contexts is 0, return the amount of all aged contexts.
12128  *   if nb_contexts is not 0 , return the amount of aged flows reported
12129  *   in the context array.
12130  * @note: only stub for now
12131  */
12132 static int
12133 flow_get_aged_flows(struct rte_eth_dev *dev,
12134                     void **context,
12135                     uint32_t nb_contexts,
12136                     struct rte_flow_error *error)
12137 {
12138         struct mlx5_priv *priv = dev->data->dev_private;
12139         struct mlx5_age_info *age_info;
12140         struct mlx5_age_param *age_param;
12141         struct mlx5_flow_counter *counter;
12142         int nb_flows = 0;
12143
12144         if (nb_contexts && !context)
12145                 return rte_flow_error_set(error, EINVAL,
12146                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12147                                           NULL,
12148                                           "Should assign at least one flow or"
12149                                           " context to get if nb_contexts != 0");
12150         age_info = GET_PORT_AGE_INFO(priv);
12151         rte_spinlock_lock(&age_info->aged_sl);
12152         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12153                 nb_flows++;
12154                 if (nb_contexts) {
12155                         age_param = MLX5_CNT_TO_AGE(counter);
12156                         context[nb_flows - 1] = age_param->context;
12157                         if (!(--nb_contexts))
12158                                 break;
12159                 }
12160         }
12161         rte_spinlock_unlock(&age_info->aged_sl);
12162         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12163         return nb_flows;
12164 }
12165
12166 /*
12167  * Mutex-protected thunk to lock-free  __flow_dv_translate().
12168  */
12169 static int
12170 flow_dv_translate(struct rte_eth_dev *dev,
12171                   struct mlx5_flow *dev_flow,
12172                   const struct rte_flow_attr *attr,
12173                   const struct rte_flow_item items[],
12174                   const struct rte_flow_action actions[],
12175                   struct rte_flow_error *error)
12176 {
12177         int ret;
12178
12179         flow_dv_shared_lock(dev);
12180         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12181         flow_dv_shared_unlock(dev);
12182         return ret;
12183 }
12184
12185 /*
12186  * Mutex-protected thunk to lock-free  __flow_dv_apply().
12187  */
12188 static int
12189 flow_dv_apply(struct rte_eth_dev *dev,
12190               struct rte_flow *flow,
12191               struct rte_flow_error *error)
12192 {
12193         int ret;
12194
12195         flow_dv_shared_lock(dev);
12196         ret = __flow_dv_apply(dev, flow, error);
12197         flow_dv_shared_unlock(dev);
12198         return ret;
12199 }
12200
12201 /*
12202  * Mutex-protected thunk to lock-free __flow_dv_remove().
12203  */
12204 static void
12205 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12206 {
12207         flow_dv_shared_lock(dev);
12208         __flow_dv_remove(dev, flow);
12209         flow_dv_shared_unlock(dev);
12210 }
12211
12212 /*
12213  * Mutex-protected thunk to lock-free __flow_dv_destroy().
12214  */
12215 static void
12216 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12217 {
12218         flow_dv_shared_lock(dev);
12219         __flow_dv_destroy(dev, flow);
12220         flow_dv_shared_unlock(dev);
12221 }
12222
12223 /*
12224  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12225  */
12226 static uint32_t
12227 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12228 {
12229         uint32_t cnt;
12230
12231         flow_dv_shared_lock(dev);
12232         cnt = flow_dv_counter_alloc(dev, 0);
12233         flow_dv_shared_unlock(dev);
12234         return cnt;
12235 }
12236
12237 /*
12238  * Mutex-protected thunk to lock-free flow_dv_counter_release().
12239  */
12240 static void
12241 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12242 {
12243         flow_dv_shared_lock(dev);
12244         flow_dv_counter_release(dev, cnt);
12245         flow_dv_shared_unlock(dev);
12246 }
12247
12248 /**
12249  * Validate shared action.
12250  * Dispatcher for action type specific validation.
12251  *
12252  * @param[in] dev
12253  *   Pointer to the Ethernet device structure.
12254  * @param[in] conf
12255  *   Shared action configuration.
12256  * @param[in] action
12257  *   The shared action object to validate.
12258  * @param[out] error
12259  *   Perform verbose error reporting if not NULL. Initialized in case of
12260  *   error only.
12261  *
12262  * @return
12263  *   0 on success, otherwise negative errno value.
12264  */
12265 static int
12266 flow_dv_action_validate(struct rte_eth_dev *dev,
12267                         const struct rte_flow_shared_action_conf *conf,
12268                         const struct rte_flow_action *action,
12269                         struct rte_flow_error *error)
12270 {
12271         RTE_SET_USED(conf);
12272         switch (action->type) {
12273         case RTE_FLOW_ACTION_TYPE_RSS:
12274                 return mlx5_validate_action_rss(dev, action, error);
12275         default:
12276                 return rte_flow_error_set(error, ENOTSUP,
12277                                           RTE_FLOW_ERROR_TYPE_ACTION,
12278                                           NULL,
12279                                           "action type not supported");
12280         }
12281 }
12282
12283 /*
12284  * Mutex-protected thunk to lock-free  __flow_dv_action_create().
12285  */
12286 static struct rte_flow_shared_action *
12287 flow_dv_action_create(struct rte_eth_dev *dev,
12288                       const struct rte_flow_shared_action_conf *conf,
12289                       const struct rte_flow_action *action,
12290                       struct rte_flow_error *error)
12291 {
12292         struct rte_flow_shared_action *shared_action = NULL;
12293
12294         flow_dv_shared_lock(dev);
12295         shared_action = __flow_dv_action_create(dev, conf, action, error);
12296         flow_dv_shared_unlock(dev);
12297         return shared_action;
12298 }
12299
12300 /*
12301  * Mutex-protected thunk to lock-free  __flow_dv_action_destroy().
12302  */
12303 static int
12304 flow_dv_action_destroy(struct rte_eth_dev *dev,
12305                        struct rte_flow_shared_action *action,
12306                        struct rte_flow_error *error)
12307 {
12308         int ret;
12309
12310         flow_dv_shared_lock(dev);
12311         ret = __flow_dv_action_destroy(dev, action, error);
12312         flow_dv_shared_unlock(dev);
12313         return ret;
12314 }
12315
12316 /*
12317  * Mutex-protected thunk to lock-free  __flow_dv_action_update().
12318  */
12319 static int
12320 flow_dv_action_update(struct rte_eth_dev *dev,
12321                       struct rte_flow_shared_action *action,
12322                       const void *action_conf,
12323                       struct rte_flow_error *error)
12324 {
12325         int ret;
12326
12327         flow_dv_shared_lock(dev);
12328         ret = __flow_dv_action_update(dev, action, action_conf,
12329                                       error);
12330         flow_dv_shared_unlock(dev);
12331         return ret;
12332 }
12333
12334 static int
12335 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12336 {
12337         struct mlx5_priv *priv = dev->data->dev_private;
12338         int ret = 0;
12339
12340         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12341                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12342                                                 flags);
12343                 if (ret != 0)
12344                         return ret;
12345         }
12346         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12347                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12348                 if (ret != 0)
12349                         return ret;
12350         }
12351         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12352                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12353                 if (ret != 0)
12354                         return ret;
12355         }
12356         return 0;
12357 }
12358
12359 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12360         .validate = flow_dv_validate,
12361         .prepare = flow_dv_prepare,
12362         .translate = flow_dv_translate,
12363         .apply = flow_dv_apply,
12364         .remove = flow_dv_remove,
12365         .destroy = flow_dv_destroy,
12366         .query = flow_dv_query,
12367         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12368         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12369         .create_policer_rules = flow_dv_create_policer_rules,
12370         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12371         .counter_alloc = flow_dv_counter_allocate,
12372         .counter_free = flow_dv_counter_free,
12373         .counter_query = flow_dv_counter_query,
12374         .get_aged_flows = flow_get_aged_flows,
12375         .action_validate = flow_dv_action_validate,
12376         .action_create = flow_dv_action_create,
12377         .action_destroy = flow_dv_action_destroy,
12378         .action_update = flow_dv_action_update,
12379         .sync_domain = flow_dv_sync_domain,
12380 };
12381
12382 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12383