net/mlx5: fix sample register error flow
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84
85 /**
86  * Initialize flow attributes structure according to flow items' types.
87  *
88  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89  * mode. For tunnel mode, the items to be modified are the outermost ones.
90  *
91  * @param[in] item
92  *   Pointer to item specification.
93  * @param[out] attr
94  *   Pointer to flow attributes structure.
95  * @param[in] dev_flow
96  *   Pointer to the sub flow.
97  * @param[in] tunnel_decap
98  *   Whether action is after tunnel decapsulation.
99  */
100 static void
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102                   struct mlx5_flow *dev_flow, bool tunnel_decap)
103 {
104         uint64_t layers = dev_flow->handle->layers;
105
106         /*
107          * If layers is already initialized, it means this dev_flow is the
108          * suffix flow, the layers flags is set by the prefix flow. Need to
109          * use the layer flags from prefix flow as the suffix flow may not
110          * have the user defined items as the flow is split.
111          */
112         if (layers) {
113                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
114                         attr->ipv4 = 1;
115                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
116                         attr->ipv6 = 1;
117                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
118                         attr->tcp = 1;
119                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
120                         attr->udp = 1;
121                 attr->valid = 1;
122                 return;
123         }
124         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125                 uint8_t next_protocol = 0xff;
126                 switch (item->type) {
127                 case RTE_FLOW_ITEM_TYPE_GRE:
128                 case RTE_FLOW_ITEM_TYPE_NVGRE:
129                 case RTE_FLOW_ITEM_TYPE_VXLAN:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
132                 case RTE_FLOW_ITEM_TYPE_MPLS:
133                         if (tunnel_decap)
134                                 attr->attr = 0;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_IPV4:
137                         if (!attr->ipv6)
138                                 attr->ipv4 = 1;
139                         if (item->mask != NULL &&
140                             ((const struct rte_flow_item_ipv4 *)
141                             item->mask)->hdr.next_proto_id)
142                                 next_protocol =
143                                     ((const struct rte_flow_item_ipv4 *)
144                                       (item->spec))->hdr.next_proto_id &
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->mask))->hdr.next_proto_id;
147                         if ((next_protocol == IPPROTO_IPIP ||
148                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
149                                 attr->attr = 0;
150                         break;
151                 case RTE_FLOW_ITEM_TYPE_IPV6:
152                         if (!attr->ipv4)
153                                 attr->ipv6 = 1;
154                         if (item->mask != NULL &&
155                             ((const struct rte_flow_item_ipv6 *)
156                             item->mask)->hdr.proto)
157                                 next_protocol =
158                                     ((const struct rte_flow_item_ipv6 *)
159                                       (item->spec))->hdr.proto &
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->mask))->hdr.proto;
162                         if ((next_protocol == IPPROTO_IPIP ||
163                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
164                                 attr->attr = 0;
165                         break;
166                 case RTE_FLOW_ITEM_TYPE_UDP:
167                         if (!attr->tcp)
168                                 attr->udp = 1;
169                         break;
170                 case RTE_FLOW_ITEM_TYPE_TCP:
171                         if (!attr->udp)
172                                 attr->tcp = 1;
173                         break;
174                 default:
175                         break;
176                 }
177         }
178         attr->valid = 1;
179 }
180
181 /**
182  * Convert rte_mtr_color to mlx5 color.
183  *
184  * @param[in] rcol
185  *   rte_mtr_color.
186  *
187  * @return
188  *   mlx5 color.
189  */
190 static int
191 rte_col_2_mlx5_col(enum rte_color rcol)
192 {
193         switch (rcol) {
194         case RTE_COLOR_GREEN:
195                 return MLX5_FLOW_COLOR_GREEN;
196         case RTE_COLOR_YELLOW:
197                 return MLX5_FLOW_COLOR_YELLOW;
198         case RTE_COLOR_RED:
199                 return MLX5_FLOW_COLOR_RED;
200         default:
201                 break;
202         }
203         return MLX5_FLOW_COLOR_UNDEFINED;
204 }
205
206 struct field_modify_info {
207         uint32_t size; /* Size of field in protocol header, in bytes. */
208         uint32_t offset; /* Offset of field in protocol header, in bytes. */
209         enum mlx5_modification_field id;
210 };
211
212 struct field_modify_info modify_eth[] = {
213         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
214         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
215         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
216         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221         /* Size in bits !!! */
222         {12, 0, MLX5_MODI_OUT_FIRST_VID},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_ipv4[] = {
227         {1,  1, MLX5_MODI_OUT_IP_DSCP},
228         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
229         {4, 12, MLX5_MODI_OUT_SIPV4},
230         {4, 16, MLX5_MODI_OUT_DIPV4},
231         {0, 0, 0},
232 };
233
234 struct field_modify_info modify_ipv6[] = {
235         {1,  0, MLX5_MODI_OUT_IP_DSCP},
236         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
238         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_udp[] = {
249         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_tcp[] = {
255         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
259         {0, 0, 0},
260 };
261
262 static void
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264                           uint8_t next_protocol, uint64_t *item_flags,
265                           int *tunnel)
266 {
267         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269         if (next_protocol == IPPROTO_IPIP) {
270                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
271                 *tunnel = 1;
272         }
273         if (next_protocol == IPPROTO_IPV6) {
274                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
275                 *tunnel = 1;
276         }
277 }
278
279 /**
280  * Acquire the synchronizing object to protect multithreaded access
281  * to shared dv context. Lock occurs only if context is actually
282  * shared, i.e. we have multiport IB device and representors are
283  * created.
284  *
285  * @param[in] dev
286  *   Pointer to the rte_eth_dev structure.
287  */
288 static void
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
290 {
291         struct mlx5_priv *priv = dev->data->dev_private;
292         struct mlx5_dev_ctx_shared *sh = priv->sh;
293
294         if (sh->refcnt > 1) {
295                 int ret;
296
297                 ret = pthread_mutex_lock(&sh->dv_mutex);
298                 MLX5_ASSERT(!ret);
299                 (void)ret;
300         }
301 }
302
303 static void
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
305 {
306         struct mlx5_priv *priv = dev->data->dev_private;
307         struct mlx5_dev_ctx_shared *sh = priv->sh;
308
309         if (sh->refcnt > 1) {
310                 int ret;
311
312                 ret = pthread_mutex_unlock(&sh->dv_mutex);
313                 MLX5_ASSERT(!ret);
314                 (void)ret;
315         }
316 }
317
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
319  *
320  * @param[in] action
321  *   Pointer to struct rte_flow_action.
322  * @param[out] vlan
323  *   Pointer to struct rte_vlan_hdr.
324  */
325 static void
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327                          struct rte_vlan_hdr *vlan)
328 {
329         uint16_t vlan_tci;
330         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
331                 vlan_tci =
332                     ((const struct rte_flow_action_of_set_vlan_pcp *)
333                                                action->conf)->vlan_pcp;
334                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336                 vlan->vlan_tci |= vlan_tci;
337         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339                 vlan->vlan_tci |= rte_be_to_cpu_16
340                     (((const struct rte_flow_action_of_set_vlan_vid *)
341                                              action->conf)->vlan_vid);
342         }
343 }
344
345 /**
346  * Fetch 1, 2, 3 or 4 byte field from the byte array
347  * and return as unsigned integer in host-endian format.
348  *
349  * @param[in] data
350  *   Pointer to data array.
351  * @param[in] size
352  *   Size of field to extract.
353  *
354  * @return
355  *   converted field in host endian format.
356  */
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
359 {
360         uint32_t ret;
361
362         switch (size) {
363         case 1:
364                 ret = *data;
365                 break;
366         case 2:
367                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
368                 break;
369         case 3:
370                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371                 ret = (ret << 8) | *(data + sizeof(uint16_t));
372                 break;
373         case 4:
374                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
375                 break;
376         default:
377                 MLX5_ASSERT(false);
378                 ret = 0;
379                 break;
380         }
381         return ret;
382 }
383
384 /**
385  * Convert modify-header action to DV specification.
386  *
387  * Data length of each action is determined by provided field description
388  * and the item mask. Data bit offset and width of each action is determined
389  * by provided item mask.
390  *
391  * @param[in] item
392  *   Pointer to item specification.
393  * @param[in] field
394  *   Pointer to field modification information.
395  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
398  * @param[in] dcopy
399  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400  *   Negative offset value sets the same offset as source offset.
401  *   size field is ignored, value is taken from source field.
402  * @param[in,out] resource
403  *   Pointer to the modify-header resource.
404  * @param[in] type
405  *   Type of modification.
406  * @param[out] error
407  *   Pointer to the error structure.
408  *
409  * @return
410  *   0 on success, a negative errno value otherwise and rte_errno is set.
411  */
412 static int
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414                               struct field_modify_info *field,
415                               struct field_modify_info *dcopy,
416                               struct mlx5_flow_dv_modify_hdr_resource *resource,
417                               uint32_t type, struct rte_flow_error *error)
418 {
419         uint32_t i = resource->actions_num;
420         struct mlx5_modification_cmd *actions = resource->actions;
421
422         /*
423          * The item and mask are provided in big-endian format.
424          * The fields should be presented as in big-endian format either.
425          * Mask must be always present, it defines the actual field width.
426          */
427         MLX5_ASSERT(item->mask);
428         MLX5_ASSERT(field->size);
429         do {
430                 unsigned int size_b;
431                 unsigned int off_b;
432                 uint32_t mask;
433                 uint32_t data;
434
435                 if (i >= MLX5_MAX_MODIFY_NUM)
436                         return rte_flow_error_set(error, EINVAL,
437                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438                                  "too many items to modify");
439                 /* Fetch variable byte size mask from the array. */
440                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441                                            field->offset, field->size);
442                 if (!mask) {
443                         ++field;
444                         continue;
445                 }
446                 /* Deduce actual data width in bits from mask value. */
447                 off_b = rte_bsf32(mask);
448                 size_b = sizeof(uint32_t) * CHAR_BIT -
449                          off_b - __builtin_clz(mask);
450                 MLX5_ASSERT(size_b);
451                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452                 actions[i] = (struct mlx5_modification_cmd) {
453                         .action_type = type,
454                         .field = field->id,
455                         .offset = off_b,
456                         .length = size_b,
457                 };
458                 /* Convert entire record to expected big-endian format. */
459                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
461                         MLX5_ASSERT(dcopy);
462                         actions[i].dst_field = dcopy->id;
463                         actions[i].dst_offset =
464                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465                         /* Convert entire record to big-endian format. */
466                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
467                 } else {
468                         MLX5_ASSERT(item->spec);
469                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
470                                                    field->offset, field->size);
471                         /* Shift out the trailing masked bits from data. */
472                         data = (data & mask) >> off_b;
473                         actions[i].data1 = rte_cpu_to_be_32(data);
474                 }
475                 ++i;
476                 ++field;
477         } while (field->size);
478         if (resource->actions_num == i)
479                 return rte_flow_error_set(error, EINVAL,
480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481                                           "invalid modification flow item");
482         resource->actions_num = i;
483         return 0;
484 }
485
486 /**
487  * Convert modify-header set IPv4 address action to DV specification.
488  *
489  * @param[in,out] resource
490  *   Pointer to the modify-header resource.
491  * @param[in] action
492  *   Pointer to action specification.
493  * @param[out] error
494  *   Pointer to the error structure.
495  *
496  * @return
497  *   0 on success, a negative errno value otherwise and rte_errno is set.
498  */
499 static int
500 flow_dv_convert_action_modify_ipv4
501                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
502                          const struct rte_flow_action *action,
503                          struct rte_flow_error *error)
504 {
505         const struct rte_flow_action_set_ipv4 *conf =
506                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508         struct rte_flow_item_ipv4 ipv4;
509         struct rte_flow_item_ipv4 ipv4_mask;
510
511         memset(&ipv4, 0, sizeof(ipv4));
512         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514                 ipv4.hdr.src_addr = conf->ipv4_addr;
515                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
516         } else {
517                 ipv4.hdr.dst_addr = conf->ipv4_addr;
518                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
519         }
520         item.spec = &ipv4;
521         item.mask = &ipv4_mask;
522         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523                                              MLX5_MODIFICATION_TYPE_SET, error);
524 }
525
526 /**
527  * Convert modify-header set IPv6 address action to DV specification.
528  *
529  * @param[in,out] resource
530  *   Pointer to the modify-header resource.
531  * @param[in] action
532  *   Pointer to action specification.
533  * @param[out] error
534  *   Pointer to the error structure.
535  *
536  * @return
537  *   0 on success, a negative errno value otherwise and rte_errno is set.
538  */
539 static int
540 flow_dv_convert_action_modify_ipv6
541                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
542                          const struct rte_flow_action *action,
543                          struct rte_flow_error *error)
544 {
545         const struct rte_flow_action_set_ipv6 *conf =
546                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548         struct rte_flow_item_ipv6 ipv6;
549         struct rte_flow_item_ipv6 ipv6_mask;
550
551         memset(&ipv6, 0, sizeof(ipv6));
552         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555                        sizeof(ipv6.hdr.src_addr));
556                 memcpy(&ipv6_mask.hdr.src_addr,
557                        &rte_flow_item_ipv6_mask.hdr.src_addr,
558                        sizeof(ipv6.hdr.src_addr));
559         } else {
560                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561                        sizeof(ipv6.hdr.dst_addr));
562                 memcpy(&ipv6_mask.hdr.dst_addr,
563                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
564                        sizeof(ipv6.hdr.dst_addr));
565         }
566         item.spec = &ipv6;
567         item.mask = &ipv6_mask;
568         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569                                              MLX5_MODIFICATION_TYPE_SET, error);
570 }
571
572 /**
573  * Convert modify-header set MAC address action to DV specification.
574  *
575  * @param[in,out] resource
576  *   Pointer to the modify-header resource.
577  * @param[in] action
578  *   Pointer to action specification.
579  * @param[out] error
580  *   Pointer to the error structure.
581  *
582  * @return
583  *   0 on success, a negative errno value otherwise and rte_errno is set.
584  */
585 static int
586 flow_dv_convert_action_modify_mac
587                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
588                          const struct rte_flow_action *action,
589                          struct rte_flow_error *error)
590 {
591         const struct rte_flow_action_set_mac *conf =
592                 (const struct rte_flow_action_set_mac *)(action->conf);
593         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594         struct rte_flow_item_eth eth;
595         struct rte_flow_item_eth eth_mask;
596
597         memset(&eth, 0, sizeof(eth));
598         memset(&eth_mask, 0, sizeof(eth_mask));
599         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
601                        sizeof(eth.src.addr_bytes));
602                 memcpy(&eth_mask.src.addr_bytes,
603                        &rte_flow_item_eth_mask.src.addr_bytes,
604                        sizeof(eth_mask.src.addr_bytes));
605         } else {
606                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
607                        sizeof(eth.dst.addr_bytes));
608                 memcpy(&eth_mask.dst.addr_bytes,
609                        &rte_flow_item_eth_mask.dst.addr_bytes,
610                        sizeof(eth_mask.dst.addr_bytes));
611         }
612         item.spec = &eth;
613         item.mask = &eth_mask;
614         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615                                              MLX5_MODIFICATION_TYPE_SET, error);
616 }
617
618 /**
619  * Convert modify-header set VLAN VID action to DV specification.
620  *
621  * @param[in,out] resource
622  *   Pointer to the modify-header resource.
623  * @param[in] action
624  *   Pointer to action specification.
625  * @param[out] error
626  *   Pointer to the error structure.
627  *
628  * @return
629  *   0 on success, a negative errno value otherwise and rte_errno is set.
630  */
631 static int
632 flow_dv_convert_action_modify_vlan_vid
633                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
634                          const struct rte_flow_action *action,
635                          struct rte_flow_error *error)
636 {
637         const struct rte_flow_action_of_set_vlan_vid *conf =
638                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639         int i = resource->actions_num;
640         struct mlx5_modification_cmd *actions = resource->actions;
641         struct field_modify_info *field = modify_vlan_out_first_vid;
642
643         if (i >= MLX5_MAX_MODIFY_NUM)
644                 return rte_flow_error_set(error, EINVAL,
645                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646                          "too many items to modify");
647         actions[i] = (struct mlx5_modification_cmd) {
648                 .action_type = MLX5_MODIFICATION_TYPE_SET,
649                 .field = field->id,
650                 .length = field->size,
651                 .offset = field->offset,
652         };
653         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654         actions[i].data1 = conf->vlan_vid;
655         actions[i].data1 = actions[i].data1 << 16;
656         resource->actions_num = ++i;
657         return 0;
658 }
659
660 /**
661  * Convert modify-header set TP action to DV specification.
662  *
663  * @param[in,out] resource
664  *   Pointer to the modify-header resource.
665  * @param[in] action
666  *   Pointer to action specification.
667  * @param[in] items
668  *   Pointer to rte_flow_item objects list.
669  * @param[in] attr
670  *   Pointer to flow attributes structure.
671  * @param[in] dev_flow
672  *   Pointer to the sub flow.
673  * @param[in] tunnel_decap
674  *   Whether action is after tunnel decapsulation.
675  * @param[out] error
676  *   Pointer to the error structure.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 flow_dv_convert_action_modify_tp
683                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
684                          const struct rte_flow_action *action,
685                          const struct rte_flow_item *items,
686                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687                          bool tunnel_decap, struct rte_flow_error *error)
688 {
689         const struct rte_flow_action_set_tp *conf =
690                 (const struct rte_flow_action_set_tp *)(action->conf);
691         struct rte_flow_item item;
692         struct rte_flow_item_udp udp;
693         struct rte_flow_item_udp udp_mask;
694         struct rte_flow_item_tcp tcp;
695         struct rte_flow_item_tcp tcp_mask;
696         struct field_modify_info *field;
697
698         if (!attr->valid)
699                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
700         if (attr->udp) {
701                 memset(&udp, 0, sizeof(udp));
702                 memset(&udp_mask, 0, sizeof(udp_mask));
703                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704                         udp.hdr.src_port = conf->port;
705                         udp_mask.hdr.src_port =
706                                         rte_flow_item_udp_mask.hdr.src_port;
707                 } else {
708                         udp.hdr.dst_port = conf->port;
709                         udp_mask.hdr.dst_port =
710                                         rte_flow_item_udp_mask.hdr.dst_port;
711                 }
712                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
713                 item.spec = &udp;
714                 item.mask = &udp_mask;
715                 field = modify_udp;
716         } else {
717                 MLX5_ASSERT(attr->tcp);
718                 memset(&tcp, 0, sizeof(tcp));
719                 memset(&tcp_mask, 0, sizeof(tcp_mask));
720                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721                         tcp.hdr.src_port = conf->port;
722                         tcp_mask.hdr.src_port =
723                                         rte_flow_item_tcp_mask.hdr.src_port;
724                 } else {
725                         tcp.hdr.dst_port = conf->port;
726                         tcp_mask.hdr.dst_port =
727                                         rte_flow_item_tcp_mask.hdr.dst_port;
728                 }
729                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
730                 item.spec = &tcp;
731                 item.mask = &tcp_mask;
732                 field = modify_tcp;
733         }
734         return flow_dv_convert_modify_action(&item, field, NULL, resource,
735                                              MLX5_MODIFICATION_TYPE_SET, error);
736 }
737
738 /**
739  * Convert modify-header set TTL action to DV specification.
740  *
741  * @param[in,out] resource
742  *   Pointer to the modify-header resource.
743  * @param[in] action
744  *   Pointer to action specification.
745  * @param[in] items
746  *   Pointer to rte_flow_item objects list.
747  * @param[in] attr
748  *   Pointer to flow attributes structure.
749  * @param[in] dev_flow
750  *   Pointer to the sub flow.
751  * @param[in] tunnel_decap
752  *   Whether action is after tunnel decapsulation.
753  * @param[out] error
754  *   Pointer to the error structure.
755  *
756  * @return
757  *   0 on success, a negative errno value otherwise and rte_errno is set.
758  */
759 static int
760 flow_dv_convert_action_modify_ttl
761                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
762                          const struct rte_flow_action *action,
763                          const struct rte_flow_item *items,
764                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765                          bool tunnel_decap, struct rte_flow_error *error)
766 {
767         const struct rte_flow_action_set_ttl *conf =
768                 (const struct rte_flow_action_set_ttl *)(action->conf);
769         struct rte_flow_item item;
770         struct rte_flow_item_ipv4 ipv4;
771         struct rte_flow_item_ipv4 ipv4_mask;
772         struct rte_flow_item_ipv6 ipv6;
773         struct rte_flow_item_ipv6 ipv6_mask;
774         struct field_modify_info *field;
775
776         if (!attr->valid)
777                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
778         if (attr->ipv4) {
779                 memset(&ipv4, 0, sizeof(ipv4));
780                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781                 ipv4.hdr.time_to_live = conf->ttl_value;
782                 ipv4_mask.hdr.time_to_live = 0xFF;
783                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
784                 item.spec = &ipv4;
785                 item.mask = &ipv4_mask;
786                 field = modify_ipv4;
787         } else {
788                 MLX5_ASSERT(attr->ipv6);
789                 memset(&ipv6, 0, sizeof(ipv6));
790                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791                 ipv6.hdr.hop_limits = conf->ttl_value;
792                 ipv6_mask.hdr.hop_limits = 0xFF;
793                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
794                 item.spec = &ipv6;
795                 item.mask = &ipv6_mask;
796                 field = modify_ipv6;
797         }
798         return flow_dv_convert_modify_action(&item, field, NULL, resource,
799                                              MLX5_MODIFICATION_TYPE_SET, error);
800 }
801
802 /**
803  * Convert modify-header decrement TTL action to DV specification.
804  *
805  * @param[in,out] resource
806  *   Pointer to the modify-header resource.
807  * @param[in] action
808  *   Pointer to action specification.
809  * @param[in] items
810  *   Pointer to rte_flow_item objects list.
811  * @param[in] attr
812  *   Pointer to flow attributes structure.
813  * @param[in] dev_flow
814  *   Pointer to the sub flow.
815  * @param[in] tunnel_decap
816  *   Whether action is after tunnel decapsulation.
817  * @param[out] error
818  *   Pointer to the error structure.
819  *
820  * @return
821  *   0 on success, a negative errno value otherwise and rte_errno is set.
822  */
823 static int
824 flow_dv_convert_action_modify_dec_ttl
825                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
826                          const struct rte_flow_item *items,
827                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828                          bool tunnel_decap, struct rte_flow_error *error)
829 {
830         struct rte_flow_item item;
831         struct rte_flow_item_ipv4 ipv4;
832         struct rte_flow_item_ipv4 ipv4_mask;
833         struct rte_flow_item_ipv6 ipv6;
834         struct rte_flow_item_ipv6 ipv6_mask;
835         struct field_modify_info *field;
836
837         if (!attr->valid)
838                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
839         if (attr->ipv4) {
840                 memset(&ipv4, 0, sizeof(ipv4));
841                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842                 ipv4.hdr.time_to_live = 0xFF;
843                 ipv4_mask.hdr.time_to_live = 0xFF;
844                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
845                 item.spec = &ipv4;
846                 item.mask = &ipv4_mask;
847                 field = modify_ipv4;
848         } else {
849                 MLX5_ASSERT(attr->ipv6);
850                 memset(&ipv6, 0, sizeof(ipv6));
851                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852                 ipv6.hdr.hop_limits = 0xFF;
853                 ipv6_mask.hdr.hop_limits = 0xFF;
854                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
855                 item.spec = &ipv6;
856                 item.mask = &ipv6_mask;
857                 field = modify_ipv6;
858         }
859         return flow_dv_convert_modify_action(&item, field, NULL, resource,
860                                              MLX5_MODIFICATION_TYPE_ADD, error);
861 }
862
863 /**
864  * Convert modify-header increment/decrement TCP Sequence number
865  * to DV specification.
866  *
867  * @param[in,out] resource
868  *   Pointer to the modify-header resource.
869  * @param[in] action
870  *   Pointer to action specification.
871  * @param[out] error
872  *   Pointer to the error structure.
873  *
874  * @return
875  *   0 on success, a negative errno value otherwise and rte_errno is set.
876  */
877 static int
878 flow_dv_convert_action_modify_tcp_seq
879                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
880                          const struct rte_flow_action *action,
881                          struct rte_flow_error *error)
882 {
883         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884         uint64_t value = rte_be_to_cpu_32(*conf);
885         struct rte_flow_item item;
886         struct rte_flow_item_tcp tcp;
887         struct rte_flow_item_tcp tcp_mask;
888
889         memset(&tcp, 0, sizeof(tcp));
890         memset(&tcp_mask, 0, sizeof(tcp_mask));
891         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
892                 /*
893                  * The HW has no decrement operation, only increment operation.
894                  * To simulate decrement X from Y using increment operation
895                  * we need to add UINT32_MAX X times to Y.
896                  * Each adding of UINT32_MAX decrements Y by 1.
897                  */
898                 value *= UINT32_MAX;
899         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901         item.type = RTE_FLOW_ITEM_TYPE_TCP;
902         item.spec = &tcp;
903         item.mask = &tcp_mask;
904         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905                                              MLX5_MODIFICATION_TYPE_ADD, error);
906 }
907
908 /**
909  * Convert modify-header increment/decrement TCP Acknowledgment number
910  * to DV specification.
911  *
912  * @param[in,out] resource
913  *   Pointer to the modify-header resource.
914  * @param[in] action
915  *   Pointer to action specification.
916  * @param[out] error
917  *   Pointer to the error structure.
918  *
919  * @return
920  *   0 on success, a negative errno value otherwise and rte_errno is set.
921  */
922 static int
923 flow_dv_convert_action_modify_tcp_ack
924                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
925                          const struct rte_flow_action *action,
926                          struct rte_flow_error *error)
927 {
928         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929         uint64_t value = rte_be_to_cpu_32(*conf);
930         struct rte_flow_item item;
931         struct rte_flow_item_tcp tcp;
932         struct rte_flow_item_tcp tcp_mask;
933
934         memset(&tcp, 0, sizeof(tcp));
935         memset(&tcp_mask, 0, sizeof(tcp_mask));
936         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
937                 /*
938                  * The HW has no decrement operation, only increment operation.
939                  * To simulate decrement X from Y using increment operation
940                  * we need to add UINT32_MAX X times to Y.
941                  * Each adding of UINT32_MAX decrements Y by 1.
942                  */
943                 value *= UINT32_MAX;
944         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946         item.type = RTE_FLOW_ITEM_TYPE_TCP;
947         item.spec = &tcp;
948         item.mask = &tcp_mask;
949         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950                                              MLX5_MODIFICATION_TYPE_ADD, error);
951 }
952
953 static enum mlx5_modification_field reg_to_field[] = {
954         [REG_NON] = MLX5_MODI_OUT_NONE,
955         [REG_A] = MLX5_MODI_META_DATA_REG_A,
956         [REG_B] = MLX5_MODI_META_DATA_REG_B,
957         [REG_C_0] = MLX5_MODI_META_REG_C_0,
958         [REG_C_1] = MLX5_MODI_META_REG_C_1,
959         [REG_C_2] = MLX5_MODI_META_REG_C_2,
960         [REG_C_3] = MLX5_MODI_META_REG_C_3,
961         [REG_C_4] = MLX5_MODI_META_REG_C_4,
962         [REG_C_5] = MLX5_MODI_META_REG_C_5,
963         [REG_C_6] = MLX5_MODI_META_REG_C_6,
964         [REG_C_7] = MLX5_MODI_META_REG_C_7,
965 };
966
967 /**
968  * Convert register set to DV specification.
969  *
970  * @param[in,out] resource
971  *   Pointer to the modify-header resource.
972  * @param[in] action
973  *   Pointer to action specification.
974  * @param[out] error
975  *   Pointer to the error structure.
976  *
977  * @return
978  *   0 on success, a negative errno value otherwise and rte_errno is set.
979  */
980 static int
981 flow_dv_convert_action_set_reg
982                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
983                          const struct rte_flow_action *action,
984                          struct rte_flow_error *error)
985 {
986         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987         struct mlx5_modification_cmd *actions = resource->actions;
988         uint32_t i = resource->actions_num;
989
990         if (i >= MLX5_MAX_MODIFY_NUM)
991                 return rte_flow_error_set(error, EINVAL,
992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993                                           "too many items to modify");
994         MLX5_ASSERT(conf->id != REG_NON);
995         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996         actions[i] = (struct mlx5_modification_cmd) {
997                 .action_type = MLX5_MODIFICATION_TYPE_SET,
998                 .field = reg_to_field[conf->id],
999         };
1000         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1002         ++i;
1003         resource->actions_num = i;
1004         return 0;
1005 }
1006
1007 /**
1008  * Convert SET_TAG action to DV specification.
1009  *
1010  * @param[in] dev
1011  *   Pointer to the rte_eth_dev structure.
1012  * @param[in,out] resource
1013  *   Pointer to the modify-header resource.
1014  * @param[in] conf
1015  *   Pointer to action specification.
1016  * @param[out] error
1017  *   Pointer to the error structure.
1018  *
1019  * @return
1020  *   0 on success, a negative errno value otherwise and rte_errno is set.
1021  */
1022 static int
1023 flow_dv_convert_action_set_tag
1024                         (struct rte_eth_dev *dev,
1025                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1026                          const struct rte_flow_action_set_tag *conf,
1027                          struct rte_flow_error *error)
1028 {
1029         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031         struct rte_flow_item item = {
1032                 .spec = &data,
1033                 .mask = &mask,
1034         };
1035         struct field_modify_info reg_c_x[] = {
1036                 [1] = {0, 0, 0},
1037         };
1038         enum mlx5_modification_field reg_type;
1039         int ret;
1040
1041         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1042         if (ret < 0)
1043                 return ret;
1044         MLX5_ASSERT(ret != REG_NON);
1045         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046         reg_type = reg_to_field[ret];
1047         MLX5_ASSERT(reg_type > 0);
1048         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050                                              MLX5_MODIFICATION_TYPE_SET, error);
1051 }
1052
1053 /**
1054  * Convert internal COPY_REG action to DV specification.
1055  *
1056  * @param[in] dev
1057  *   Pointer to the rte_eth_dev structure.
1058  * @param[in,out] res
1059  *   Pointer to the modify-header resource.
1060  * @param[in] action
1061  *   Pointer to action specification.
1062  * @param[out] error
1063  *   Pointer to the error structure.
1064  *
1065  * @return
1066  *   0 on success, a negative errno value otherwise and rte_errno is set.
1067  */
1068 static int
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1071                                  const struct rte_flow_action *action,
1072                                  struct rte_flow_error *error)
1073 {
1074         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076         struct rte_flow_item item = {
1077                 .spec = NULL,
1078                 .mask = &mask,
1079         };
1080         struct field_modify_info reg_src[] = {
1081                 {4, 0, reg_to_field[conf->src]},
1082                 {0, 0, 0},
1083         };
1084         struct field_modify_info reg_dst = {
1085                 .offset = 0,
1086                 .id = reg_to_field[conf->dst],
1087         };
1088         /* Adjust reg_c[0] usage according to reported mask. */
1089         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090                 struct mlx5_priv *priv = dev->data->dev_private;
1091                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1092
1093                 MLX5_ASSERT(reg_c0);
1094                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095                 if (conf->dst == REG_C_0) {
1096                         /* Copy to reg_c[0], within mask only. */
1097                         reg_dst.offset = rte_bsf32(reg_c0);
1098                         /*
1099                          * Mask is ignoring the enianness, because
1100                          * there is no conversion in datapath.
1101                          */
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103                         /* Copy from destination lower bits to reg_c[0]. */
1104                         mask = reg_c0 >> reg_dst.offset;
1105 #else
1106                         /* Copy from destination upper bits to reg_c[0]. */
1107                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108                                           rte_fls_u32(reg_c0));
1109 #endif
1110                 } else {
1111                         mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113                         /* Copy from reg_c[0] to destination lower bits. */
1114                         reg_dst.offset = 0;
1115 #else
1116                         /* Copy from reg_c[0] to destination upper bits. */
1117                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118                                          (rte_fls_u32(reg_c0) -
1119                                           rte_bsf32(reg_c0));
1120 #endif
1121                 }
1122         }
1123         return flow_dv_convert_modify_action(&item,
1124                                              reg_src, &reg_dst, res,
1125                                              MLX5_MODIFICATION_TYPE_COPY,
1126                                              error);
1127 }
1128
1129 /**
1130  * Convert MARK action to DV specification. This routine is used
1131  * in extensive metadata only and requires metadata register to be
1132  * handled. In legacy mode hardware tag resource is engaged.
1133  *
1134  * @param[in] dev
1135  *   Pointer to the rte_eth_dev structure.
1136  * @param[in] conf
1137  *   Pointer to MARK action specification.
1138  * @param[in,out] resource
1139  *   Pointer to the modify-header resource.
1140  * @param[out] error
1141  *   Pointer to the error structure.
1142  *
1143  * @return
1144  *   0 on success, a negative errno value otherwise and rte_errno is set.
1145  */
1146 static int
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148                             const struct rte_flow_action_mark *conf,
1149                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1150                             struct rte_flow_error *error)
1151 {
1152         struct mlx5_priv *priv = dev->data->dev_private;
1153         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154                                            priv->sh->dv_mark_mask);
1155         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156         struct rte_flow_item item = {
1157                 .spec = &data,
1158                 .mask = &mask,
1159         };
1160         struct field_modify_info reg_c_x[] = {
1161                 [1] = {0, 0, 0},
1162         };
1163         int reg;
1164
1165         if (!mask)
1166                 return rte_flow_error_set(error, EINVAL,
1167                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1168                                           NULL, "zero mark action mask");
1169         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1170         if (reg < 0)
1171                 return reg;
1172         MLX5_ASSERT(reg > 0);
1173         if (reg == REG_C_0) {
1174                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1175                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1176
1177                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1178                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1179                 mask = rte_cpu_to_be_32(mask << shl_c0);
1180         }
1181         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1182         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1183                                              MLX5_MODIFICATION_TYPE_SET, error);
1184 }
1185
1186 /**
1187  * Get metadata register index for specified steering domain.
1188  *
1189  * @param[in] dev
1190  *   Pointer to the rte_eth_dev structure.
1191  * @param[in] attr
1192  *   Attributes of flow to determine steering domain.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   positive index on success, a negative errno value otherwise
1198  *   and rte_errno is set.
1199  */
1200 static enum modify_reg
1201 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1202                          const struct rte_flow_attr *attr,
1203                          struct rte_flow_error *error)
1204 {
1205         int reg =
1206                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1207                                           MLX5_METADATA_FDB :
1208                                             attr->egress ?
1209                                             MLX5_METADATA_TX :
1210                                             MLX5_METADATA_RX, 0, error);
1211         if (reg < 0)
1212                 return rte_flow_error_set(error,
1213                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1214                                           NULL, "unavailable "
1215                                           "metadata register");
1216         return reg;
1217 }
1218
1219 /**
1220  * Convert SET_META action to DV specification.
1221  *
1222  * @param[in] dev
1223  *   Pointer to the rte_eth_dev structure.
1224  * @param[in,out] resource
1225  *   Pointer to the modify-header resource.
1226  * @param[in] attr
1227  *   Attributes of flow that includes this item.
1228  * @param[in] conf
1229  *   Pointer to action specification.
1230  * @param[out] error
1231  *   Pointer to the error structure.
1232  *
1233  * @return
1234  *   0 on success, a negative errno value otherwise and rte_errno is set.
1235  */
1236 static int
1237 flow_dv_convert_action_set_meta
1238                         (struct rte_eth_dev *dev,
1239                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1240                          const struct rte_flow_attr *attr,
1241                          const struct rte_flow_action_set_meta *conf,
1242                          struct rte_flow_error *error)
1243 {
1244         uint32_t data = conf->data;
1245         uint32_t mask = conf->mask;
1246         struct rte_flow_item item = {
1247                 .spec = &data,
1248                 .mask = &mask,
1249         };
1250         struct field_modify_info reg_c_x[] = {
1251                 [1] = {0, 0, 0},
1252         };
1253         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1254
1255         if (reg < 0)
1256                 return reg;
1257         /*
1258          * In datapath code there is no endianness
1259          * coversions for perfromance reasons, all
1260          * pattern conversions are done in rte_flow.
1261          */
1262         if (reg == REG_C_0) {
1263                 struct mlx5_priv *priv = dev->data->dev_private;
1264                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1265                 uint32_t shl_c0;
1266
1267                 MLX5_ASSERT(msk_c0);
1268 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1269                 shl_c0 = rte_bsf32(msk_c0);
1270 #else
1271                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1272 #endif
1273                 mask <<= shl_c0;
1274                 data <<= shl_c0;
1275                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1276         }
1277         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1278         /* The routine expects parameters in memory as big-endian ones. */
1279         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1280                                              MLX5_MODIFICATION_TYPE_SET, error);
1281 }
1282
1283 /**
1284  * Convert modify-header set IPv4 DSCP action to DV specification.
1285  *
1286  * @param[in,out] resource
1287  *   Pointer to the modify-header resource.
1288  * @param[in] action
1289  *   Pointer to action specification.
1290  * @param[out] error
1291  *   Pointer to the error structure.
1292  *
1293  * @return
1294  *   0 on success, a negative errno value otherwise and rte_errno is set.
1295  */
1296 static int
1297 flow_dv_convert_action_modify_ipv4_dscp
1298                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1299                          const struct rte_flow_action *action,
1300                          struct rte_flow_error *error)
1301 {
1302         const struct rte_flow_action_set_dscp *conf =
1303                 (const struct rte_flow_action_set_dscp *)(action->conf);
1304         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1305         struct rte_flow_item_ipv4 ipv4;
1306         struct rte_flow_item_ipv4 ipv4_mask;
1307
1308         memset(&ipv4, 0, sizeof(ipv4));
1309         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1310         ipv4.hdr.type_of_service = conf->dscp;
1311         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1312         item.spec = &ipv4;
1313         item.mask = &ipv4_mask;
1314         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1315                                              MLX5_MODIFICATION_TYPE_SET, error);
1316 }
1317
1318 /**
1319  * Convert modify-header set IPv6 DSCP action to DV specification.
1320  *
1321  * @param[in,out] resource
1322  *   Pointer to the modify-header resource.
1323  * @param[in] action
1324  *   Pointer to action specification.
1325  * @param[out] error
1326  *   Pointer to the error structure.
1327  *
1328  * @return
1329  *   0 on success, a negative errno value otherwise and rte_errno is set.
1330  */
1331 static int
1332 flow_dv_convert_action_modify_ipv6_dscp
1333                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1334                          const struct rte_flow_action *action,
1335                          struct rte_flow_error *error)
1336 {
1337         const struct rte_flow_action_set_dscp *conf =
1338                 (const struct rte_flow_action_set_dscp *)(action->conf);
1339         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1340         struct rte_flow_item_ipv6 ipv6;
1341         struct rte_flow_item_ipv6 ipv6_mask;
1342
1343         memset(&ipv6, 0, sizeof(ipv6));
1344         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1345         /*
1346          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1347          * rdma-core only accept the DSCP bits byte aligned start from
1348          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1349          * bits in IPv6 case as rdma-core requires byte aligned value.
1350          */
1351         ipv6.hdr.vtc_flow = conf->dscp;
1352         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1353         item.spec = &ipv6;
1354         item.mask = &ipv6_mask;
1355         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1356                                              MLX5_MODIFICATION_TYPE_SET, error);
1357 }
1358
1359 /**
1360  * Validate MARK item.
1361  *
1362  * @param[in] dev
1363  *   Pointer to the rte_eth_dev structure.
1364  * @param[in] item
1365  *   Item specification.
1366  * @param[in] attr
1367  *   Attributes of flow that includes this item.
1368  * @param[out] error
1369  *   Pointer to error structure.
1370  *
1371  * @return
1372  *   0 on success, a negative errno value otherwise and rte_errno is set.
1373  */
1374 static int
1375 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1376                            const struct rte_flow_item *item,
1377                            const struct rte_flow_attr *attr __rte_unused,
1378                            struct rte_flow_error *error)
1379 {
1380         struct mlx5_priv *priv = dev->data->dev_private;
1381         struct mlx5_dev_config *config = &priv->config;
1382         const struct rte_flow_item_mark *spec = item->spec;
1383         const struct rte_flow_item_mark *mask = item->mask;
1384         const struct rte_flow_item_mark nic_mask = {
1385                 .id = priv->sh->dv_mark_mask,
1386         };
1387         int ret;
1388
1389         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1390                 return rte_flow_error_set(error, ENOTSUP,
1391                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1392                                           "extended metadata feature"
1393                                           " isn't enabled");
1394         if (!mlx5_flow_ext_mreg_supported(dev))
1395                 return rte_flow_error_set(error, ENOTSUP,
1396                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1397                                           "extended metadata register"
1398                                           " isn't supported");
1399         if (!nic_mask.id)
1400                 return rte_flow_error_set(error, ENOTSUP,
1401                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1402                                           "extended metadata register"
1403                                           " isn't available");
1404         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1405         if (ret < 0)
1406                 return ret;
1407         if (!spec)
1408                 return rte_flow_error_set(error, EINVAL,
1409                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1410                                           item->spec,
1411                                           "data cannot be empty");
1412         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1413                 return rte_flow_error_set(error, EINVAL,
1414                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1415                                           &spec->id,
1416                                           "mark id exceeds the limit");
1417         if (!mask)
1418                 mask = &nic_mask;
1419         if (!mask->id)
1420                 return rte_flow_error_set(error, EINVAL,
1421                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1422                                         "mask cannot be zero");
1423
1424         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1425                                         (const uint8_t *)&nic_mask,
1426                                         sizeof(struct rte_flow_item_mark),
1427                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1428         if (ret < 0)
1429                 return ret;
1430         return 0;
1431 }
1432
1433 /**
1434  * Validate META item.
1435  *
1436  * @param[in] dev
1437  *   Pointer to the rte_eth_dev structure.
1438  * @param[in] item
1439  *   Item specification.
1440  * @param[in] attr
1441  *   Attributes of flow that includes this item.
1442  * @param[out] error
1443  *   Pointer to error structure.
1444  *
1445  * @return
1446  *   0 on success, a negative errno value otherwise and rte_errno is set.
1447  */
1448 static int
1449 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1450                            const struct rte_flow_item *item,
1451                            const struct rte_flow_attr *attr,
1452                            struct rte_flow_error *error)
1453 {
1454         struct mlx5_priv *priv = dev->data->dev_private;
1455         struct mlx5_dev_config *config = &priv->config;
1456         const struct rte_flow_item_meta *spec = item->spec;
1457         const struct rte_flow_item_meta *mask = item->mask;
1458         struct rte_flow_item_meta nic_mask = {
1459                 .data = UINT32_MAX
1460         };
1461         int reg;
1462         int ret;
1463
1464         if (!spec)
1465                 return rte_flow_error_set(error, EINVAL,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1467                                           item->spec,
1468                                           "data cannot be empty");
1469         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1470                 if (!mlx5_flow_ext_mreg_supported(dev))
1471                         return rte_flow_error_set(error, ENOTSUP,
1472                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1473                                           "extended metadata register"
1474                                           " isn't supported");
1475                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1476                 if (reg < 0)
1477                         return reg;
1478                 if (reg == REG_B)
1479                         return rte_flow_error_set(error, ENOTSUP,
1480                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1481                                           "match on reg_b "
1482                                           "isn't supported");
1483                 if (reg != REG_A)
1484                         nic_mask.data = priv->sh->dv_meta_mask;
1485         } else if (attr->transfer) {
1486                 return rte_flow_error_set(error, ENOTSUP,
1487                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1488                                         "extended metadata feature "
1489                                         "should be enabled when "
1490                                         "meta item is requested "
1491                                         "with e-switch mode ");
1492         }
1493         if (!mask)
1494                 mask = &rte_flow_item_meta_mask;
1495         if (!mask->data)
1496                 return rte_flow_error_set(error, EINVAL,
1497                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1498                                         "mask cannot be zero");
1499
1500         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1501                                         (const uint8_t *)&nic_mask,
1502                                         sizeof(struct rte_flow_item_meta),
1503                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1504         return ret;
1505 }
1506
1507 /**
1508  * Validate TAG item.
1509  *
1510  * @param[in] dev
1511  *   Pointer to the rte_eth_dev structure.
1512  * @param[in] item
1513  *   Item specification.
1514  * @param[in] attr
1515  *   Attributes of flow that includes this item.
1516  * @param[out] error
1517  *   Pointer to error structure.
1518  *
1519  * @return
1520  *   0 on success, a negative errno value otherwise and rte_errno is set.
1521  */
1522 static int
1523 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1524                           const struct rte_flow_item *item,
1525                           const struct rte_flow_attr *attr __rte_unused,
1526                           struct rte_flow_error *error)
1527 {
1528         const struct rte_flow_item_tag *spec = item->spec;
1529         const struct rte_flow_item_tag *mask = item->mask;
1530         const struct rte_flow_item_tag nic_mask = {
1531                 .data = RTE_BE32(UINT32_MAX),
1532                 .index = 0xff,
1533         };
1534         int ret;
1535
1536         if (!mlx5_flow_ext_mreg_supported(dev))
1537                 return rte_flow_error_set(error, ENOTSUP,
1538                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1539                                           "extensive metadata register"
1540                                           " isn't supported");
1541         if (!spec)
1542                 return rte_flow_error_set(error, EINVAL,
1543                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1544                                           item->spec,
1545                                           "data cannot be empty");
1546         if (!mask)
1547                 mask = &rte_flow_item_tag_mask;
1548         if (!mask->data)
1549                 return rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551                                         "mask cannot be zero");
1552
1553         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1554                                         (const uint8_t *)&nic_mask,
1555                                         sizeof(struct rte_flow_item_tag),
1556                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1557         if (ret < 0)
1558                 return ret;
1559         if (mask->index != 0xff)
1560                 return rte_flow_error_set(error, EINVAL,
1561                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1562                                           "partial mask for tag index"
1563                                           " is not supported");
1564         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1565         if (ret < 0)
1566                 return ret;
1567         MLX5_ASSERT(ret != REG_NON);
1568         return 0;
1569 }
1570
1571 /**
1572  * Validate vport item.
1573  *
1574  * @param[in] dev
1575  *   Pointer to the rte_eth_dev structure.
1576  * @param[in] item
1577  *   Item specification.
1578  * @param[in] attr
1579  *   Attributes of flow that includes this item.
1580  * @param[in] item_flags
1581  *   Bit-fields that holds the items detected until now.
1582  * @param[out] error
1583  *   Pointer to error structure.
1584  *
1585  * @return
1586  *   0 on success, a negative errno value otherwise and rte_errno is set.
1587  */
1588 static int
1589 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1590                               const struct rte_flow_item *item,
1591                               const struct rte_flow_attr *attr,
1592                               uint64_t item_flags,
1593                               struct rte_flow_error *error)
1594 {
1595         const struct rte_flow_item_port_id *spec = item->spec;
1596         const struct rte_flow_item_port_id *mask = item->mask;
1597         const struct rte_flow_item_port_id switch_mask = {
1598                         .id = 0xffffffff,
1599         };
1600         struct mlx5_priv *esw_priv;
1601         struct mlx5_priv *dev_priv;
1602         int ret;
1603
1604         if (!attr->transfer)
1605                 return rte_flow_error_set(error, EINVAL,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM,
1607                                           NULL,
1608                                           "match on port id is valid only"
1609                                           " when transfer flag is enabled");
1610         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1611                 return rte_flow_error_set(error, ENOTSUP,
1612                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1613                                           "multiple source ports are not"
1614                                           " supported");
1615         if (!mask)
1616                 mask = &switch_mask;
1617         if (mask->id != 0xffffffff)
1618                 return rte_flow_error_set(error, ENOTSUP,
1619                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1620                                            mask,
1621                                            "no support for partial mask on"
1622                                            " \"id\" field");
1623         ret = mlx5_flow_item_acceptable
1624                                 (item, (const uint8_t *)mask,
1625                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1626                                  sizeof(struct rte_flow_item_port_id),
1627                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1628         if (ret)
1629                 return ret;
1630         if (!spec)
1631                 return 0;
1632         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1633         if (!esw_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636                                           "failed to obtain E-Switch info for"
1637                                           " port");
1638         dev_priv = mlx5_dev_to_eswitch_info(dev);
1639         if (!dev_priv)
1640                 return rte_flow_error_set(error, rte_errno,
1641                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1642                                           NULL,
1643                                           "failed to obtain E-Switch info");
1644         if (esw_priv->domain_id != dev_priv->domain_id)
1645                 return rte_flow_error_set(error, EINVAL,
1646                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1647                                           "cannot match on a port from a"
1648                                           " different E-Switch");
1649         return 0;
1650 }
1651
1652 /**
1653  * Validate VLAN item.
1654  *
1655  * @param[in] item
1656  *   Item specification.
1657  * @param[in] item_flags
1658  *   Bit-fields that holds the items detected until now.
1659  * @param[in] dev
1660  *   Ethernet device flow is being created on.
1661  * @param[out] error
1662  *   Pointer to error structure.
1663  *
1664  * @return
1665  *   0 on success, a negative errno value otherwise and rte_errno is set.
1666  */
1667 static int
1668 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1669                            uint64_t item_flags,
1670                            struct rte_eth_dev *dev,
1671                            struct rte_flow_error *error)
1672 {
1673         const struct rte_flow_item_vlan *mask = item->mask;
1674         const struct rte_flow_item_vlan nic_mask = {
1675                 .tci = RTE_BE16(UINT16_MAX),
1676                 .inner_type = RTE_BE16(UINT16_MAX),
1677                 .has_more_vlan = 1,
1678         };
1679         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1680         int ret;
1681         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682                                         MLX5_FLOW_LAYER_INNER_L4) :
1683                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1684                                         MLX5_FLOW_LAYER_OUTER_L4);
1685         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1687
1688         if (item_flags & vlanm)
1689                 return rte_flow_error_set(error, EINVAL,
1690                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1691                                           "multiple VLAN layers not supported");
1692         else if ((item_flags & l34m) != 0)
1693                 return rte_flow_error_set(error, EINVAL,
1694                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1695                                           "VLAN cannot follow L3/L4 layer");
1696         if (!mask)
1697                 mask = &rte_flow_item_vlan_mask;
1698         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699                                         (const uint8_t *)&nic_mask,
1700                                         sizeof(struct rte_flow_item_vlan),
1701                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1702         if (ret)
1703                 return ret;
1704         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705                 struct mlx5_priv *priv = dev->data->dev_private;
1706
1707                 if (priv->vmwa_context) {
1708                         /*
1709                          * Non-NULL context means we have a virtual machine
1710                          * and SR-IOV enabled, we have to create VLAN interface
1711                          * to make hypervisor to setup E-Switch vport
1712                          * context correctly. We avoid creating the multiple
1713                          * VLAN interfaces, so we cannot support VLAN tag mask.
1714                          */
1715                         return rte_flow_error_set(error, EINVAL,
1716                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1717                                                   item,
1718                                                   "VLAN tag mask is not"
1719                                                   " supported in virtual"
1720                                                   " environment");
1721                 }
1722         }
1723         return 0;
1724 }
1725
1726 /*
1727  * GTP flags are contained in 1 byte of the format:
1728  * -------------------------------------------
1729  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1730  * |-----------------------------------------|
1731  * | value | Version | PT | Res | E | S | PN |
1732  * -------------------------------------------
1733  *
1734  * Matching is supported only for GTP flags E, S, PN.
1735  */
1736 #define MLX5_GTP_FLAGS_MASK     0x07
1737
1738 /**
1739  * Validate GTP item.
1740  *
1741  * @param[in] dev
1742  *   Pointer to the rte_eth_dev structure.
1743  * @param[in] item
1744  *   Item specification.
1745  * @param[in] item_flags
1746  *   Bit-fields that holds the items detected until now.
1747  * @param[out] error
1748  *   Pointer to error structure.
1749  *
1750  * @return
1751  *   0 on success, a negative errno value otherwise and rte_errno is set.
1752  */
1753 static int
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755                           const struct rte_flow_item *item,
1756                           uint64_t item_flags,
1757                           struct rte_flow_error *error)
1758 {
1759         struct mlx5_priv *priv = dev->data->dev_private;
1760         const struct rte_flow_item_gtp *spec = item->spec;
1761         const struct rte_flow_item_gtp *mask = item->mask;
1762         const struct rte_flow_item_gtp nic_mask = {
1763                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1764                 .msg_type = 0xff,
1765                 .teid = RTE_BE32(0xffffffff),
1766         };
1767
1768         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769                 return rte_flow_error_set(error, ENOTSUP,
1770                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1771                                           "GTP support is not enabled");
1772         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773                 return rte_flow_error_set(error, ENOTSUP,
1774                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1775                                           "multiple tunnel layers not"
1776                                           " supported");
1777         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778                 return rte_flow_error_set(error, EINVAL,
1779                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1780                                           "no outer UDP layer found");
1781         if (!mask)
1782                 mask = &rte_flow_item_gtp_mask;
1783         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784                 return rte_flow_error_set(error, ENOTSUP,
1785                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1786                                           "Match is supported for GTP"
1787                                           " flags only");
1788         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1789                                          (const uint8_t *)&nic_mask,
1790                                          sizeof(struct rte_flow_item_gtp),
1791                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1792 }
1793
1794 /**
1795  * Validate IPV4 item.
1796  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1797  * add specific validation of fragment_offset field,
1798  *
1799  * @param[in] item
1800  *   Item specification.
1801  * @param[in] item_flags
1802  *   Bit-fields that holds the items detected until now.
1803  * @param[out] error
1804  *   Pointer to error structure.
1805  *
1806  * @return
1807  *   0 on success, a negative errno value otherwise and rte_errno is set.
1808  */
1809 static int
1810 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1811                            uint64_t item_flags,
1812                            uint64_t last_item,
1813                            uint16_t ether_type,
1814                            struct rte_flow_error *error)
1815 {
1816         int ret;
1817         const struct rte_flow_item_ipv4 *spec = item->spec;
1818         const struct rte_flow_item_ipv4 *last = item->last;
1819         const struct rte_flow_item_ipv4 *mask = item->mask;
1820         rte_be16_t fragment_offset_spec = 0;
1821         rte_be16_t fragment_offset_last = 0;
1822         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1823                 .hdr = {
1824                         .src_addr = RTE_BE32(0xffffffff),
1825                         .dst_addr = RTE_BE32(0xffffffff),
1826                         .type_of_service = 0xff,
1827                         .fragment_offset = RTE_BE16(0xffff),
1828                         .next_proto_id = 0xff,
1829                         .time_to_live = 0xff,
1830                 },
1831         };
1832
1833         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1834                                            ether_type, &nic_ipv4_mask,
1835                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1836         if (ret < 0)
1837                 return ret;
1838         if (spec && mask)
1839                 fragment_offset_spec = spec->hdr.fragment_offset &
1840                                        mask->hdr.fragment_offset;
1841         if (!fragment_offset_spec)
1842                 return 0;
1843         /*
1844          * spec and mask are valid, enforce using full mask to make sure the
1845          * complete value is used correctly.
1846          */
1847         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1848                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1849                 return rte_flow_error_set(error, EINVAL,
1850                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1851                                           item, "must use full mask for"
1852                                           " fragment_offset");
1853         /*
1854          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1855          * indicating this is 1st fragment of fragmented packet.
1856          * This is not yet supported in MLX5, return appropriate error message.
1857          */
1858         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1859                 return rte_flow_error_set(error, ENOTSUP,
1860                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1861                                           "match on first fragment not "
1862                                           "supported");
1863         if (fragment_offset_spec && !last)
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1866                                           "specified value not supported");
1867         /* spec and last are valid, validate the specified range. */
1868         fragment_offset_last = last->hdr.fragment_offset &
1869                                mask->hdr.fragment_offset;
1870         /*
1871          * Match on fragment_offset spec 0x2001 and last 0x3fff
1872          * means MF is 1 and frag-offset is > 0.
1873          * This packet is fragment 2nd and onward, excluding last.
1874          * This is not yet supported in MLX5, return appropriate
1875          * error message.
1876          */
1877         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1878             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1879                 return rte_flow_error_set(error, ENOTSUP,
1880                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1881                                           last, "match on following "
1882                                           "fragments not supported");
1883         /*
1884          * Match on fragment_offset spec 0x0001 and last 0x1fff
1885          * means MF is 0 and frag-offset is > 0.
1886          * This packet is last fragment of fragmented packet.
1887          * This is not yet supported in MLX5, return appropriate
1888          * error message.
1889          */
1890         if (fragment_offset_spec == RTE_BE16(1) &&
1891             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1892                 return rte_flow_error_set(error, ENOTSUP,
1893                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1894                                           last, "match on last "
1895                                           "fragment not supported");
1896         /*
1897          * Match on fragment_offset spec 0x0001 and last 0x3fff
1898          * means MF and/or frag-offset is not 0.
1899          * This is a fragmented packet.
1900          * Other range values are invalid and rejected.
1901          */
1902         if (!(fragment_offset_spec == RTE_BE16(1) &&
1903               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1904                 return rte_flow_error_set(error, ENOTSUP,
1905                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1906                                           "specified range not supported");
1907         return 0;
1908 }
1909
1910 /**
1911  * Validate IPV6 fragment extension item.
1912  *
1913  * @param[in] item
1914  *   Item specification.
1915  * @param[in] item_flags
1916  *   Bit-fields that holds the items detected until now.
1917  * @param[out] error
1918  *   Pointer to error structure.
1919  *
1920  * @return
1921  *   0 on success, a negative errno value otherwise and rte_errno is set.
1922  */
1923 static int
1924 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1925                                     uint64_t item_flags,
1926                                     struct rte_flow_error *error)
1927 {
1928         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1929         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1930         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1931         rte_be16_t frag_data_spec = 0;
1932         rte_be16_t frag_data_last = 0;
1933         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1934         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1935                                       MLX5_FLOW_LAYER_OUTER_L4;
1936         int ret = 0;
1937         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1938                 .hdr = {
1939                         .next_header = 0xff,
1940                         .frag_data = RTE_BE16(0xffff),
1941                 },
1942         };
1943
1944         if (item_flags & l4m)
1945                 return rte_flow_error_set(error, EINVAL,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "ipv6 fragment extension item cannot "
1948                                           "follow L4 item.");
1949         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1950             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1953                                           "ipv6 fragment extension item must "
1954                                           "follow ipv6 item");
1955         if (spec && mask)
1956                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1957         if (!frag_data_spec)
1958                 return 0;
1959         /*
1960          * spec and mask are valid, enforce using full mask to make sure the
1961          * complete value is used correctly.
1962          */
1963         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1964                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1967                                           item, "must use full mask for"
1968                                           " frag_data");
1969         /*
1970          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1971          * This is 1st fragment of fragmented packet.
1972          */
1973         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1974                 return rte_flow_error_set(error, ENOTSUP,
1975                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1976                                           "match on first fragment not "
1977                                           "supported");
1978         if (frag_data_spec && !last)
1979                 return rte_flow_error_set(error, EINVAL,
1980                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                           "specified value not supported");
1982         ret = mlx5_flow_item_acceptable
1983                                 (item, (const uint8_t *)mask,
1984                                  (const uint8_t *)&nic_mask,
1985                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1986                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1987         if (ret)
1988                 return ret;
1989         /* spec and last are valid, validate the specified range. */
1990         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1991         /*
1992          * Match on frag_data spec 0x0009 and last 0xfff9
1993          * means M is 1 and frag-offset is > 0.
1994          * This packet is fragment 2nd and onward, excluding last.
1995          * This is not yet supported in MLX5, return appropriate
1996          * error message.
1997          */
1998         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1999                                        RTE_IPV6_EHDR_MF_MASK) &&
2000             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2001                 return rte_flow_error_set(error, ENOTSUP,
2002                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2003                                           last, "match on following "
2004                                           "fragments not supported");
2005         /*
2006          * Match on frag_data spec 0x0008 and last 0xfff8
2007          * means M is 0 and frag-offset is > 0.
2008          * This packet is last fragment of fragmented packet.
2009          * This is not yet supported in MLX5, return appropriate
2010          * error message.
2011          */
2012         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2013             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2014                 return rte_flow_error_set(error, ENOTSUP,
2015                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2016                                           last, "match on last "
2017                                           "fragment not supported");
2018         /* Other range values are invalid and rejected. */
2019         return rte_flow_error_set(error, EINVAL,
2020                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2021                                   "specified range not supported");
2022 }
2023
2024 /**
2025  * Validate the pop VLAN action.
2026  *
2027  * @param[in] dev
2028  *   Pointer to the rte_eth_dev structure.
2029  * @param[in] action_flags
2030  *   Holds the actions detected until now.
2031  * @param[in] action
2032  *   Pointer to the pop vlan action.
2033  * @param[in] item_flags
2034  *   The items found in this flow rule.
2035  * @param[in] attr
2036  *   Pointer to flow attributes.
2037  * @param[out] error
2038  *   Pointer to error structure.
2039  *
2040  * @return
2041  *   0 on success, a negative errno value otherwise and rte_errno is set.
2042  */
2043 static int
2044 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2045                                  uint64_t action_flags,
2046                                  const struct rte_flow_action *action,
2047                                  uint64_t item_flags,
2048                                  const struct rte_flow_attr *attr,
2049                                  struct rte_flow_error *error)
2050 {
2051         const struct mlx5_priv *priv = dev->data->dev_private;
2052
2053         (void)action;
2054         (void)attr;
2055         if (!priv->sh->pop_vlan_action)
2056                 return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058                                           NULL,
2059                                           "pop vlan action is not supported");
2060         if (attr->egress)
2061                 return rte_flow_error_set(error, ENOTSUP,
2062                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2063                                           NULL,
2064                                           "pop vlan action not supported for "
2065                                           "egress");
2066         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2067                 return rte_flow_error_set(error, ENOTSUP,
2068                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2069                                           "no support for multiple VLAN "
2070                                           "actions");
2071         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2072         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2073             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2074                 return rte_flow_error_set(error, ENOTSUP,
2075                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2076                                           NULL,
2077                                           "cannot pop vlan after decap without "
2078                                           "match on inner vlan in the flow");
2079         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2080         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2081             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2082                 return rte_flow_error_set(error, ENOTSUP,
2083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2084                                           NULL,
2085                                           "cannot pop vlan without a "
2086                                           "match on (outer) vlan in the flow");
2087         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2088                 return rte_flow_error_set(error, EINVAL,
2089                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2090                                           "wrong action order, port_id should "
2091                                           "be after pop VLAN action");
2092         if (!attr->transfer && priv->representor)
2093                 return rte_flow_error_set(error, ENOTSUP,
2094                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2095                                           "pop vlan action for VF representor "
2096                                           "not supported on NIC table");
2097         return 0;
2098 }
2099
2100 /**
2101  * Get VLAN default info from vlan match info.
2102  *
2103  * @param[in] items
2104  *   the list of item specifications.
2105  * @param[out] vlan
2106  *   pointer VLAN info to fill to.
2107  *
2108  * @return
2109  *   0 on success, a negative errno value otherwise and rte_errno is set.
2110  */
2111 static void
2112 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2113                                   struct rte_vlan_hdr *vlan)
2114 {
2115         const struct rte_flow_item_vlan nic_mask = {
2116                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2117                                 MLX5DV_FLOW_VLAN_VID_MASK),
2118                 .inner_type = RTE_BE16(0xffff),
2119         };
2120
2121         if (items == NULL)
2122                 return;
2123         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2124                 int type = items->type;
2125
2126                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2127                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2128                         break;
2129         }
2130         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2131                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2132                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2133
2134                 /* If VLAN item in pattern doesn't contain data, return here. */
2135                 if (!vlan_v)
2136                         return;
2137                 if (!vlan_m)
2138                         vlan_m = &nic_mask;
2139                 /* Only full match values are accepted */
2140                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2141                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2142                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2143                         vlan->vlan_tci |=
2144                                 rte_be_to_cpu_16(vlan_v->tci &
2145                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2146                 }
2147                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2148                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2149                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2150                         vlan->vlan_tci |=
2151                                 rte_be_to_cpu_16(vlan_v->tci &
2152                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2153                 }
2154                 if (vlan_m->inner_type == nic_mask.inner_type)
2155                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2156                                                            vlan_m->inner_type);
2157         }
2158 }
2159
2160 /**
2161  * Validate the push VLAN action.
2162  *
2163  * @param[in] dev
2164  *   Pointer to the rte_eth_dev structure.
2165  * @param[in] action_flags
2166  *   Holds the actions detected until now.
2167  * @param[in] item_flags
2168  *   The items found in this flow rule.
2169  * @param[in] action
2170  *   Pointer to the action structure.
2171  * @param[in] attr
2172  *   Pointer to flow attributes
2173  * @param[out] error
2174  *   Pointer to error structure.
2175  *
2176  * @return
2177  *   0 on success, a negative errno value otherwise and rte_errno is set.
2178  */
2179 static int
2180 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2181                                   uint64_t action_flags,
2182                                   const struct rte_flow_item_vlan *vlan_m,
2183                                   const struct rte_flow_action *action,
2184                                   const struct rte_flow_attr *attr,
2185                                   struct rte_flow_error *error)
2186 {
2187         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2188         const struct mlx5_priv *priv = dev->data->dev_private;
2189
2190         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2191             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2192                 return rte_flow_error_set(error, EINVAL,
2193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2194                                           "invalid vlan ethertype");
2195         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2196                 return rte_flow_error_set(error, EINVAL,
2197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2198                                           "wrong action order, port_id should "
2199                                           "be after push VLAN");
2200         if (!attr->transfer && priv->representor)
2201                 return rte_flow_error_set(error, ENOTSUP,
2202                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2203                                           "push vlan action for VF representor "
2204                                           "not supported on NIC table");
2205         if (vlan_m &&
2206             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2207             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2208                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2209             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2210             !(mlx5_flow_find_action
2211                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2212                 return rte_flow_error_set(error, EINVAL,
2213                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2214                                           "not full match mask on VLAN PCP and "
2215                                           "there is no of_set_vlan_pcp action, "
2216                                           "push VLAN action cannot figure out "
2217                                           "PCP value");
2218         if (vlan_m &&
2219             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2220             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2221                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2222             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2223             !(mlx5_flow_find_action
2224                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2225                 return rte_flow_error_set(error, EINVAL,
2226                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2227                                           "not full match mask on VLAN VID and "
2228                                           "there is no of_set_vlan_vid action, "
2229                                           "push VLAN action cannot figure out "
2230                                           "VID value");
2231         (void)attr;
2232         return 0;
2233 }
2234
2235 /**
2236  * Validate the set VLAN PCP.
2237  *
2238  * @param[in] action_flags
2239  *   Holds the actions detected until now.
2240  * @param[in] actions
2241  *   Pointer to the list of actions remaining in the flow rule.
2242  * @param[out] error
2243  *   Pointer to error structure.
2244  *
2245  * @return
2246  *   0 on success, a negative errno value otherwise and rte_errno is set.
2247  */
2248 static int
2249 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2250                                      const struct rte_flow_action actions[],
2251                                      struct rte_flow_error *error)
2252 {
2253         const struct rte_flow_action *action = actions;
2254         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2255
2256         if (conf->vlan_pcp > 7)
2257                 return rte_flow_error_set(error, EINVAL,
2258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2259                                           "VLAN PCP value is too big");
2260         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2261                 return rte_flow_error_set(error, ENOTSUP,
2262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2263                                           "set VLAN PCP action must follow "
2264                                           "the push VLAN action");
2265         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2266                 return rte_flow_error_set(error, ENOTSUP,
2267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2268                                           "Multiple VLAN PCP modification are "
2269                                           "not supported");
2270         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2273                                           "wrong action order, port_id should "
2274                                           "be after set VLAN PCP");
2275         return 0;
2276 }
2277
2278 /**
2279  * Validate the set VLAN VID.
2280  *
2281  * @param[in] item_flags
2282  *   Holds the items detected in this rule.
2283  * @param[in] action_flags
2284  *   Holds the actions detected until now.
2285  * @param[in] actions
2286  *   Pointer to the list of actions remaining in the flow rule.
2287  * @param[out] error
2288  *   Pointer to error structure.
2289  *
2290  * @return
2291  *   0 on success, a negative errno value otherwise and rte_errno is set.
2292  */
2293 static int
2294 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2295                                      uint64_t action_flags,
2296                                      const struct rte_flow_action actions[],
2297                                      struct rte_flow_error *error)
2298 {
2299         const struct rte_flow_action *action = actions;
2300         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2301
2302         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2303                 return rte_flow_error_set(error, EINVAL,
2304                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2305                                           "VLAN VID value is too big");
2306         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2307             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2308                 return rte_flow_error_set(error, ENOTSUP,
2309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2310                                           "set VLAN VID action must follow push"
2311                                           " VLAN action or match on VLAN item");
2312         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2313                 return rte_flow_error_set(error, ENOTSUP,
2314                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2315                                           "Multiple VLAN VID modifications are "
2316                                           "not supported");
2317         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2318                 return rte_flow_error_set(error, EINVAL,
2319                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2320                                           "wrong action order, port_id should "
2321                                           "be after set VLAN VID");
2322         return 0;
2323 }
2324
2325 /*
2326  * Validate the FLAG action.
2327  *
2328  * @param[in] dev
2329  *   Pointer to the rte_eth_dev structure.
2330  * @param[in] action_flags
2331  *   Holds the actions detected until now.
2332  * @param[in] attr
2333  *   Pointer to flow attributes
2334  * @param[out] error
2335  *   Pointer to error structure.
2336  *
2337  * @return
2338  *   0 on success, a negative errno value otherwise and rte_errno is set.
2339  */
2340 static int
2341 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2342                              uint64_t action_flags,
2343                              const struct rte_flow_attr *attr,
2344                              struct rte_flow_error *error)
2345 {
2346         struct mlx5_priv *priv = dev->data->dev_private;
2347         struct mlx5_dev_config *config = &priv->config;
2348         int ret;
2349
2350         /* Fall back if no extended metadata register support. */
2351         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2352                 return mlx5_flow_validate_action_flag(action_flags, attr,
2353                                                       error);
2354         /* Extensive metadata mode requires registers. */
2355         if (!mlx5_flow_ext_mreg_supported(dev))
2356                 return rte_flow_error_set(error, ENOTSUP,
2357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2358                                           "no metadata registers "
2359                                           "to support flag action");
2360         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2361                 return rte_flow_error_set(error, ENOTSUP,
2362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2363                                           "extended metadata register"
2364                                           " isn't available");
2365         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2366         if (ret < 0)
2367                 return ret;
2368         MLX5_ASSERT(ret > 0);
2369         if (action_flags & MLX5_FLOW_ACTION_MARK)
2370                 return rte_flow_error_set(error, EINVAL,
2371                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2372                                           "can't mark and flag in same flow");
2373         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2374                 return rte_flow_error_set(error, EINVAL,
2375                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2376                                           "can't have 2 flag"
2377                                           " actions in same flow");
2378         return 0;
2379 }
2380
2381 /**
2382  * Validate MARK action.
2383  *
2384  * @param[in] dev
2385  *   Pointer to the rte_eth_dev structure.
2386  * @param[in] action
2387  *   Pointer to action.
2388  * @param[in] action_flags
2389  *   Holds the actions detected until now.
2390  * @param[in] attr
2391  *   Pointer to flow attributes
2392  * @param[out] error
2393  *   Pointer to error structure.
2394  *
2395  * @return
2396  *   0 on success, a negative errno value otherwise and rte_errno is set.
2397  */
2398 static int
2399 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2400                              const struct rte_flow_action *action,
2401                              uint64_t action_flags,
2402                              const struct rte_flow_attr *attr,
2403                              struct rte_flow_error *error)
2404 {
2405         struct mlx5_priv *priv = dev->data->dev_private;
2406         struct mlx5_dev_config *config = &priv->config;
2407         const struct rte_flow_action_mark *mark = action->conf;
2408         int ret;
2409
2410         /* Fall back if no extended metadata register support. */
2411         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2412                 return mlx5_flow_validate_action_mark(action, action_flags,
2413                                                       attr, error);
2414         /* Extensive metadata mode requires registers. */
2415         if (!mlx5_flow_ext_mreg_supported(dev))
2416                 return rte_flow_error_set(error, ENOTSUP,
2417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418                                           "no metadata registers "
2419                                           "to support mark action");
2420         if (!priv->sh->dv_mark_mask)
2421                 return rte_flow_error_set(error, ENOTSUP,
2422                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423                                           "extended metadata register"
2424                                           " isn't available");
2425         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2426         if (ret < 0)
2427                 return ret;
2428         MLX5_ASSERT(ret > 0);
2429         if (!mark)
2430                 return rte_flow_error_set(error, EINVAL,
2431                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2432                                           "configuration cannot be null");
2433         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2434                 return rte_flow_error_set(error, EINVAL,
2435                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2436                                           &mark->id,
2437                                           "mark id exceeds the limit");
2438         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2439                 return rte_flow_error_set(error, EINVAL,
2440                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441                                           "can't flag and mark in same flow");
2442         if (action_flags & MLX5_FLOW_ACTION_MARK)
2443                 return rte_flow_error_set(error, EINVAL,
2444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2445                                           "can't have 2 mark actions in same"
2446                                           " flow");
2447         return 0;
2448 }
2449
2450 /**
2451  * Validate SET_META action.
2452  *
2453  * @param[in] dev
2454  *   Pointer to the rte_eth_dev structure.
2455  * @param[in] action
2456  *   Pointer to the action structure.
2457  * @param[in] action_flags
2458  *   Holds the actions detected until now.
2459  * @param[in] attr
2460  *   Pointer to flow attributes
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno value otherwise and rte_errno is set.
2466  */
2467 static int
2468 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2469                                  const struct rte_flow_action *action,
2470                                  uint64_t action_flags __rte_unused,
2471                                  const struct rte_flow_attr *attr,
2472                                  struct rte_flow_error *error)
2473 {
2474         const struct rte_flow_action_set_meta *conf;
2475         uint32_t nic_mask = UINT32_MAX;
2476         int reg;
2477
2478         if (!mlx5_flow_ext_mreg_supported(dev))
2479                 return rte_flow_error_set(error, ENOTSUP,
2480                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2481                                           "extended metadata register"
2482                                           " isn't supported");
2483         reg = flow_dv_get_metadata_reg(dev, attr, error);
2484         if (reg < 0)
2485                 return reg;
2486         if (reg != REG_A && reg != REG_B) {
2487                 struct mlx5_priv *priv = dev->data->dev_private;
2488
2489                 nic_mask = priv->sh->dv_meta_mask;
2490         }
2491         if (!(action->conf))
2492                 return rte_flow_error_set(error, EINVAL,
2493                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2494                                           "configuration cannot be null");
2495         conf = (const struct rte_flow_action_set_meta *)action->conf;
2496         if (!conf->mask)
2497                 return rte_flow_error_set(error, EINVAL,
2498                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2499                                           "zero mask doesn't have any effect");
2500         if (conf->mask & ~nic_mask)
2501                 return rte_flow_error_set(error, EINVAL,
2502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2503                                           "meta data must be within reg C0");
2504         return 0;
2505 }
2506
2507 /**
2508  * Validate SET_TAG action.
2509  *
2510  * @param[in] dev
2511  *   Pointer to the rte_eth_dev structure.
2512  * @param[in] action
2513  *   Pointer to the action structure.
2514  * @param[in] action_flags
2515  *   Holds the actions detected until now.
2516  * @param[in] attr
2517  *   Pointer to flow attributes
2518  * @param[out] error
2519  *   Pointer to error structure.
2520  *
2521  * @return
2522  *   0 on success, a negative errno value otherwise and rte_errno is set.
2523  */
2524 static int
2525 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2526                                 const struct rte_flow_action *action,
2527                                 uint64_t action_flags,
2528                                 const struct rte_flow_attr *attr,
2529                                 struct rte_flow_error *error)
2530 {
2531         const struct rte_flow_action_set_tag *conf;
2532         const uint64_t terminal_action_flags =
2533                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2534                 MLX5_FLOW_ACTION_RSS;
2535         int ret;
2536
2537         if (!mlx5_flow_ext_mreg_supported(dev))
2538                 return rte_flow_error_set(error, ENOTSUP,
2539                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2540                                           "extensive metadata register"
2541                                           " isn't supported");
2542         if (!(action->conf))
2543                 return rte_flow_error_set(error, EINVAL,
2544                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2545                                           "configuration cannot be null");
2546         conf = (const struct rte_flow_action_set_tag *)action->conf;
2547         if (!conf->mask)
2548                 return rte_flow_error_set(error, EINVAL,
2549                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2550                                           "zero mask doesn't have any effect");
2551         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2552         if (ret < 0)
2553                 return ret;
2554         if (!attr->transfer && attr->ingress &&
2555             (action_flags & terminal_action_flags))
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2558                                           "set_tag has no effect"
2559                                           " with terminal actions");
2560         return 0;
2561 }
2562
2563 /**
2564  * Validate count action.
2565  *
2566  * @param[in] dev
2567  *   Pointer to rte_eth_dev structure.
2568  * @param[out] error
2569  *   Pointer to error structure.
2570  *
2571  * @return
2572  *   0 on success, a negative errno value otherwise and rte_errno is set.
2573  */
2574 static int
2575 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2576                               struct rte_flow_error *error)
2577 {
2578         struct mlx5_priv *priv = dev->data->dev_private;
2579
2580         if (!priv->config.devx)
2581                 goto notsup_err;
2582 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2583         return 0;
2584 #endif
2585 notsup_err:
2586         return rte_flow_error_set
2587                       (error, ENOTSUP,
2588                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2589                        NULL,
2590                        "count action not supported");
2591 }
2592
2593 /**
2594  * Validate the L2 encap action.
2595  *
2596  * @param[in] dev
2597  *   Pointer to the rte_eth_dev structure.
2598  * @param[in] action_flags
2599  *   Holds the actions detected until now.
2600  * @param[in] action
2601  *   Pointer to the action structure.
2602  * @param[in] attr
2603  *   Pointer to flow attributes.
2604  * @param[out] error
2605  *   Pointer to error structure.
2606  *
2607  * @return
2608  *   0 on success, a negative errno value otherwise and rte_errno is set.
2609  */
2610 static int
2611 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2612                                  uint64_t action_flags,
2613                                  const struct rte_flow_action *action,
2614                                  const struct rte_flow_attr *attr,
2615                                  struct rte_flow_error *error)
2616 {
2617         const struct mlx5_priv *priv = dev->data->dev_private;
2618
2619         if (!(action->conf))
2620                 return rte_flow_error_set(error, EINVAL,
2621                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2622                                           "configuration cannot be null");
2623         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2624                 return rte_flow_error_set(error, EINVAL,
2625                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626                                           "can only have a single encap action "
2627                                           "in a flow");
2628         if (!attr->transfer && priv->representor)
2629                 return rte_flow_error_set(error, ENOTSUP,
2630                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2631                                           "encap action for VF representor "
2632                                           "not supported on NIC table");
2633         return 0;
2634 }
2635
2636 /**
2637  * Validate a decap action.
2638  *
2639  * @param[in] dev
2640  *   Pointer to the rte_eth_dev structure.
2641  * @param[in] action_flags
2642  *   Holds the actions detected until now.
2643  * @param[in] attr
2644  *   Pointer to flow attributes
2645  * @param[out] error
2646  *   Pointer to error structure.
2647  *
2648  * @return
2649  *   0 on success, a negative errno value otherwise and rte_errno is set.
2650  */
2651 static int
2652 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2653                               uint64_t action_flags,
2654                               const struct rte_flow_attr *attr,
2655                               struct rte_flow_error *error)
2656 {
2657         const struct mlx5_priv *priv = dev->data->dev_private;
2658
2659         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2660             !priv->config.decap_en)
2661                 return rte_flow_error_set(error, ENOTSUP,
2662                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663                                           "decap is not enabled");
2664         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2665                 return rte_flow_error_set(error, ENOTSUP,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2667                                           action_flags &
2668                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2669                                           "have a single decap action" : "decap "
2670                                           "after encap is not supported");
2671         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2672                 return rte_flow_error_set(error, EINVAL,
2673                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2674                                           "can't have decap action after"
2675                                           " modify action");
2676         if (attr->egress)
2677                 return rte_flow_error_set(error, ENOTSUP,
2678                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2679                                           NULL,
2680                                           "decap action not supported for "
2681                                           "egress");
2682         if (!attr->transfer && priv->representor)
2683                 return rte_flow_error_set(error, ENOTSUP,
2684                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685                                           "decap action for VF representor "
2686                                           "not supported on NIC table");
2687         return 0;
2688 }
2689
2690 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2691
2692 /**
2693  * Validate the raw encap and decap actions.
2694  *
2695  * @param[in] dev
2696  *   Pointer to the rte_eth_dev structure.
2697  * @param[in] decap
2698  *   Pointer to the decap action.
2699  * @param[in] encap
2700  *   Pointer to the encap action.
2701  * @param[in] attr
2702  *   Pointer to flow attributes
2703  * @param[in/out] action_flags
2704  *   Holds the actions detected until now.
2705  * @param[out] actions_n
2706  *   pointer to the number of actions counter.
2707  * @param[out] error
2708  *   Pointer to error structure.
2709  *
2710  * @return
2711  *   0 on success, a negative errno value otherwise and rte_errno is set.
2712  */
2713 static int
2714 flow_dv_validate_action_raw_encap_decap
2715         (struct rte_eth_dev *dev,
2716          const struct rte_flow_action_raw_decap *decap,
2717          const struct rte_flow_action_raw_encap *encap,
2718          const struct rte_flow_attr *attr, uint64_t *action_flags,
2719          int *actions_n, struct rte_flow_error *error)
2720 {
2721         const struct mlx5_priv *priv = dev->data->dev_private;
2722         int ret;
2723
2724         if (encap && (!encap->size || !encap->data))
2725                 return rte_flow_error_set(error, EINVAL,
2726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727                                           "raw encap data cannot be empty");
2728         if (decap && encap) {
2729                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2730                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2731                         /* L3 encap. */
2732                         decap = NULL;
2733                 else if (encap->size <=
2734                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2735                            decap->size >
2736                            MLX5_ENCAPSULATION_DECISION_SIZE)
2737                         /* L3 decap. */
2738                         encap = NULL;
2739                 else if (encap->size >
2740                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2741                            decap->size >
2742                            MLX5_ENCAPSULATION_DECISION_SIZE)
2743                         /* 2 L2 actions: encap and decap. */
2744                         ;
2745                 else
2746                         return rte_flow_error_set(error,
2747                                 ENOTSUP,
2748                                 RTE_FLOW_ERROR_TYPE_ACTION,
2749                                 NULL, "unsupported too small "
2750                                 "raw decap and too small raw "
2751                                 "encap combination");
2752         }
2753         if (decap) {
2754                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2755                                                     error);
2756                 if (ret < 0)
2757                         return ret;
2758                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2759                 ++(*actions_n);
2760         }
2761         if (encap) {
2762                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2763                         return rte_flow_error_set(error, ENOTSUP,
2764                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2765                                                   NULL,
2766                                                   "small raw encap size");
2767                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2768                         return rte_flow_error_set(error, EINVAL,
2769                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2770                                                   NULL,
2771                                                   "more than one encap action");
2772                 if (!attr->transfer && priv->representor)
2773                         return rte_flow_error_set
2774                                         (error, ENOTSUP,
2775                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776                                          "encap action for VF representor "
2777                                          "not supported on NIC table");
2778                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2779                 ++(*actions_n);
2780         }
2781         return 0;
2782 }
2783
2784 /**
2785  * Match encap_decap resource.
2786  *
2787  * @param list
2788  *   Pointer to the hash list.
2789  * @param entry
2790  *   Pointer to exist resource entry object.
2791  * @param key
2792  *   Key of the new entry.
2793  * @param ctx_cb
2794  *   Pointer to new encap_decap resource.
2795  *
2796  * @return
2797  *   0 on matching, none-zero otherwise.
2798  */
2799 int
2800 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2801                              struct mlx5_hlist_entry *entry,
2802                              uint64_t key __rte_unused, void *cb_ctx)
2803 {
2804         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2805         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2806         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2807
2808         cache_resource = container_of(entry,
2809                                       struct mlx5_flow_dv_encap_decap_resource,
2810                                       entry);
2811         if (resource->entry.key == cache_resource->entry.key &&
2812             resource->reformat_type == cache_resource->reformat_type &&
2813             resource->ft_type == cache_resource->ft_type &&
2814             resource->flags == cache_resource->flags &&
2815             resource->size == cache_resource->size &&
2816             !memcmp((const void *)resource->buf,
2817                     (const void *)cache_resource->buf,
2818                     resource->size))
2819                 return 0;
2820         return -1;
2821 }
2822
2823 /**
2824  * Allocate encap_decap resource.
2825  *
2826  * @param list
2827  *   Pointer to the hash list.
2828  * @param entry
2829  *   Pointer to exist resource entry object.
2830  * @param ctx_cb
2831  *   Pointer to new encap_decap resource.
2832  *
2833  * @return
2834  *   0 on matching, none-zero otherwise.
2835  */
2836 struct mlx5_hlist_entry *
2837 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2838                               uint64_t key __rte_unused,
2839                               void *cb_ctx)
2840 {
2841         struct mlx5_dev_ctx_shared *sh = list->ctx;
2842         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2843         struct mlx5dv_dr_domain *domain;
2844         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2845         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2846         uint32_t idx;
2847         int ret;
2848
2849         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2850                 domain = sh->fdb_domain;
2851         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2852                 domain = sh->rx_domain;
2853         else
2854                 domain = sh->tx_domain;
2855         /* Register new encap/decap resource. */
2856         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2857                                        &idx);
2858         if (!cache_resource) {
2859                 rte_flow_error_set(ctx->error, ENOMEM,
2860                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2861                                    "cannot allocate resource memory");
2862                 return NULL;
2863         }
2864         *cache_resource = *resource;
2865         cache_resource->idx = idx;
2866         ret = mlx5_flow_os_create_flow_action_packet_reformat
2867                                         (sh->ctx, domain, cache_resource,
2868                                          &cache_resource->action);
2869         if (ret) {
2870                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2871                 rte_flow_error_set(ctx->error, ENOMEM,
2872                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2873                                    NULL, "cannot create action");
2874                 return NULL;
2875         }
2876
2877         return &cache_resource->entry;
2878 }
2879
2880 /**
2881  * Find existing encap/decap resource or create and register a new one.
2882  *
2883  * @param[in, out] dev
2884  *   Pointer to rte_eth_dev structure.
2885  * @param[in, out] resource
2886  *   Pointer to encap/decap resource.
2887  * @parm[in, out] dev_flow
2888  *   Pointer to the dev_flow.
2889  * @param[out] error
2890  *   pointer to error structure.
2891  *
2892  * @return
2893  *   0 on success otherwise -errno and errno is set.
2894  */
2895 static int
2896 flow_dv_encap_decap_resource_register
2897                         (struct rte_eth_dev *dev,
2898                          struct mlx5_flow_dv_encap_decap_resource *resource,
2899                          struct mlx5_flow *dev_flow,
2900                          struct rte_flow_error *error)
2901 {
2902         struct mlx5_priv *priv = dev->data->dev_private;
2903         struct mlx5_dev_ctx_shared *sh = priv->sh;
2904         struct mlx5_hlist_entry *entry;
2905         union mlx5_flow_encap_decap_key encap_decap_key = {
2906                 {
2907                         .ft_type = resource->ft_type,
2908                         .refmt_type = resource->reformat_type,
2909                         .buf_size = resource->size,
2910                         .table_level = !!dev_flow->dv.group,
2911                         .cksum = 0,
2912                 }
2913         };
2914         struct mlx5_flow_cb_ctx ctx = {
2915                 .error = error,
2916                 .data = resource,
2917         };
2918
2919         resource->flags = dev_flow->dv.group ? 0 : 1;
2920         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2921                                                 resource->size, 0);
2922         resource->entry.key = encap_decap_key.v64;
2923         entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2924                                     &ctx);
2925         if (!entry)
2926                 return -rte_errno;
2927         resource = container_of(entry, typeof(*resource), entry);
2928         dev_flow->dv.encap_decap = resource;
2929         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2930         return 0;
2931 }
2932
2933 /**
2934  * Find existing table jump resource or create and register a new one.
2935  *
2936  * @param[in, out] dev
2937  *   Pointer to rte_eth_dev structure.
2938  * @param[in, out] tbl
2939  *   Pointer to flow table resource.
2940  * @parm[in, out] dev_flow
2941  *   Pointer to the dev_flow.
2942  * @param[out] error
2943  *   pointer to error structure.
2944  *
2945  * @return
2946  *   0 on success otherwise -errno and errno is set.
2947  */
2948 static int
2949 flow_dv_jump_tbl_resource_register
2950                         (struct rte_eth_dev *dev __rte_unused,
2951                          struct mlx5_flow_tbl_resource *tbl,
2952                          struct mlx5_flow *dev_flow,
2953                          struct rte_flow_error *error __rte_unused)
2954 {
2955         struct mlx5_flow_tbl_data_entry *tbl_data =
2956                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2957
2958         MLX5_ASSERT(tbl);
2959         MLX5_ASSERT(tbl_data->jump.action);
2960         dev_flow->handle->rix_jump = tbl_data->idx;
2961         dev_flow->dv.jump = &tbl_data->jump;
2962         return 0;
2963 }
2964
2965 int
2966 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2967                          struct mlx5_cache_entry *entry, void *cb_ctx)
2968 {
2969         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2970         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2971         struct mlx5_flow_dv_port_id_action_resource *res =
2972                         container_of(entry, typeof(*res), entry);
2973
2974         return ref->port_id != res->port_id;
2975 }
2976
2977 struct mlx5_cache_entry *
2978 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2979                           struct mlx5_cache_entry *entry __rte_unused,
2980                           void *cb_ctx)
2981 {
2982         struct mlx5_dev_ctx_shared *sh = list->ctx;
2983         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2984         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2985         struct mlx5_flow_dv_port_id_action_resource *cache;
2986         uint32_t idx;
2987         int ret;
2988
2989         /* Register new port id action resource. */
2990         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2991         if (!cache) {
2992                 rte_flow_error_set(ctx->error, ENOMEM,
2993                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994                                    "cannot allocate port_id action cache memory");
2995                 return NULL;
2996         }
2997         *cache = *ref;
2998         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2999                                                         ref->port_id,
3000                                                         &cache->action);
3001         if (ret) {
3002                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3003                 rte_flow_error_set(ctx->error, ENOMEM,
3004                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3005                                    "cannot create action");
3006                 return NULL;
3007         }
3008         return &cache->entry;
3009 }
3010
3011 /**
3012  * Find existing table port ID resource or create and register a new one.
3013  *
3014  * @param[in, out] dev
3015  *   Pointer to rte_eth_dev structure.
3016  * @param[in, out] resource
3017  *   Pointer to port ID action resource.
3018  * @parm[in, out] dev_flow
3019  *   Pointer to the dev_flow.
3020  * @param[out] error
3021  *   pointer to error structure.
3022  *
3023  * @return
3024  *   0 on success otherwise -errno and errno is set.
3025  */
3026 static int
3027 flow_dv_port_id_action_resource_register
3028                         (struct rte_eth_dev *dev,
3029                          struct mlx5_flow_dv_port_id_action_resource *resource,
3030                          struct mlx5_flow *dev_flow,
3031                          struct rte_flow_error *error)
3032 {
3033         struct mlx5_priv *priv = dev->data->dev_private;
3034         struct mlx5_cache_entry *entry;
3035         struct mlx5_flow_dv_port_id_action_resource *cache;
3036         struct mlx5_flow_cb_ctx ctx = {
3037                 .error = error,
3038                 .data = resource,
3039         };
3040
3041         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3042         if (!entry)
3043                 return -rte_errno;
3044         cache = container_of(entry, typeof(*cache), entry);
3045         dev_flow->dv.port_id_action = cache;
3046         dev_flow->handle->rix_port_id_action = cache->idx;
3047         return 0;
3048 }
3049
3050 int
3051 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3052                          struct mlx5_cache_entry *entry, void *cb_ctx)
3053 {
3054         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3055         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3056         struct mlx5_flow_dv_push_vlan_action_resource *res =
3057                         container_of(entry, typeof(*res), entry);
3058
3059         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3060 }
3061
3062 struct mlx5_cache_entry *
3063 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3064                           struct mlx5_cache_entry *entry __rte_unused,
3065                           void *cb_ctx)
3066 {
3067         struct mlx5_dev_ctx_shared *sh = list->ctx;
3068         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3069         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3070         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3071         struct mlx5dv_dr_domain *domain;
3072         uint32_t idx;
3073         int ret;
3074
3075         /* Register new port id action resource. */
3076         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3077         if (!cache) {
3078                 rte_flow_error_set(ctx->error, ENOMEM,
3079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3080                                    "cannot allocate push_vlan action cache memory");
3081                 return NULL;
3082         }
3083         *cache = *ref;
3084         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3085                 domain = sh->fdb_domain;
3086         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3087                 domain = sh->rx_domain;
3088         else
3089                 domain = sh->tx_domain;
3090         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3091                                                         &cache->action);
3092         if (ret) {
3093                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3094                 rte_flow_error_set(ctx->error, ENOMEM,
3095                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3096                                    "cannot create push vlan action");
3097                 return NULL;
3098         }
3099         return &cache->entry;
3100 }
3101
3102 /**
3103  * Find existing push vlan resource or create and register a new one.
3104  *
3105  * @param [in, out] dev
3106  *   Pointer to rte_eth_dev structure.
3107  * @param[in, out] resource
3108  *   Pointer to port ID action resource.
3109  * @parm[in, out] dev_flow
3110  *   Pointer to the dev_flow.
3111  * @param[out] error
3112  *   pointer to error structure.
3113  *
3114  * @return
3115  *   0 on success otherwise -errno and errno is set.
3116  */
3117 static int
3118 flow_dv_push_vlan_action_resource_register
3119                        (struct rte_eth_dev *dev,
3120                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3121                         struct mlx5_flow *dev_flow,
3122                         struct rte_flow_error *error)
3123 {
3124         struct mlx5_priv *priv = dev->data->dev_private;
3125         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3126         struct mlx5_cache_entry *entry;
3127         struct mlx5_flow_cb_ctx ctx = {
3128                 .error = error,
3129                 .data = resource,
3130         };
3131
3132         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3133         if (!entry)
3134                 return -rte_errno;
3135         cache = container_of(entry, typeof(*cache), entry);
3136
3137         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3138         dev_flow->dv.push_vlan_res = cache;
3139         return 0;
3140 }
3141
3142 /**
3143  * Get the size of specific rte_flow_item_type hdr size
3144  *
3145  * @param[in] item_type
3146  *   Tested rte_flow_item_type.
3147  *
3148  * @return
3149  *   sizeof struct item_type, 0 if void or irrelevant.
3150  */
3151 static size_t
3152 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3153 {
3154         size_t retval;
3155
3156         switch (item_type) {
3157         case RTE_FLOW_ITEM_TYPE_ETH:
3158                 retval = sizeof(struct rte_ether_hdr);
3159                 break;
3160         case RTE_FLOW_ITEM_TYPE_VLAN:
3161                 retval = sizeof(struct rte_vlan_hdr);
3162                 break;
3163         case RTE_FLOW_ITEM_TYPE_IPV4:
3164                 retval = sizeof(struct rte_ipv4_hdr);
3165                 break;
3166         case RTE_FLOW_ITEM_TYPE_IPV6:
3167                 retval = sizeof(struct rte_ipv6_hdr);
3168                 break;
3169         case RTE_FLOW_ITEM_TYPE_UDP:
3170                 retval = sizeof(struct rte_udp_hdr);
3171                 break;
3172         case RTE_FLOW_ITEM_TYPE_TCP:
3173                 retval = sizeof(struct rte_tcp_hdr);
3174                 break;
3175         case RTE_FLOW_ITEM_TYPE_VXLAN:
3176         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3177                 retval = sizeof(struct rte_vxlan_hdr);
3178                 break;
3179         case RTE_FLOW_ITEM_TYPE_GRE:
3180         case RTE_FLOW_ITEM_TYPE_NVGRE:
3181                 retval = sizeof(struct rte_gre_hdr);
3182                 break;
3183         case RTE_FLOW_ITEM_TYPE_MPLS:
3184                 retval = sizeof(struct rte_mpls_hdr);
3185                 break;
3186         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3187         default:
3188                 retval = 0;
3189                 break;
3190         }
3191         return retval;
3192 }
3193
3194 #define MLX5_ENCAP_IPV4_VERSION         0x40
3195 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3196 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3197 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3198 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3199 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3200 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3201
3202 /**
3203  * Convert the encap action data from list of rte_flow_item to raw buffer
3204  *
3205  * @param[in] items
3206  *   Pointer to rte_flow_item objects list.
3207  * @param[out] buf
3208  *   Pointer to the output buffer.
3209  * @param[out] size
3210  *   Pointer to the output buffer size.
3211  * @param[out] error
3212  *   Pointer to the error structure.
3213  *
3214  * @return
3215  *   0 on success, a negative errno value otherwise and rte_errno is set.
3216  */
3217 static int
3218 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3219                            size_t *size, struct rte_flow_error *error)
3220 {
3221         struct rte_ether_hdr *eth = NULL;
3222         struct rte_vlan_hdr *vlan = NULL;
3223         struct rte_ipv4_hdr *ipv4 = NULL;
3224         struct rte_ipv6_hdr *ipv6 = NULL;
3225         struct rte_udp_hdr *udp = NULL;
3226         struct rte_vxlan_hdr *vxlan = NULL;
3227         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3228         struct rte_gre_hdr *gre = NULL;
3229         size_t len;
3230         size_t temp_size = 0;
3231
3232         if (!items)
3233                 return rte_flow_error_set(error, EINVAL,
3234                                           RTE_FLOW_ERROR_TYPE_ACTION,
3235                                           NULL, "invalid empty data");
3236         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3237                 len = flow_dv_get_item_hdr_len(items->type);
3238                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3239                         return rte_flow_error_set(error, EINVAL,
3240                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3241                                                   (void *)items->type,
3242                                                   "items total size is too big"
3243                                                   " for encap action");
3244                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3245                 switch (items->type) {
3246                 case RTE_FLOW_ITEM_TYPE_ETH:
3247                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3248                         break;
3249                 case RTE_FLOW_ITEM_TYPE_VLAN:
3250                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3251                         if (!eth)
3252                                 return rte_flow_error_set(error, EINVAL,
3253                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3254                                                 (void *)items->type,
3255                                                 "eth header not found");
3256                         if (!eth->ether_type)
3257                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3258                         break;
3259                 case RTE_FLOW_ITEM_TYPE_IPV4:
3260                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3261                         if (!vlan && !eth)
3262                                 return rte_flow_error_set(error, EINVAL,
3263                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3264                                                 (void *)items->type,
3265                                                 "neither eth nor vlan"
3266                                                 " header found");
3267                         if (vlan && !vlan->eth_proto)
3268                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3269                         else if (eth && !eth->ether_type)
3270                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3271                         if (!ipv4->version_ihl)
3272                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3273                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3274                         if (!ipv4->time_to_live)
3275                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3276                         break;
3277                 case RTE_FLOW_ITEM_TYPE_IPV6:
3278                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3279                         if (!vlan && !eth)
3280                                 return rte_flow_error_set(error, EINVAL,
3281                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3282                                                 (void *)items->type,
3283                                                 "neither eth nor vlan"
3284                                                 " header found");
3285                         if (vlan && !vlan->eth_proto)
3286                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3287                         else if (eth && !eth->ether_type)
3288                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3289                         if (!ipv6->vtc_flow)
3290                                 ipv6->vtc_flow =
3291                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3292                         if (!ipv6->hop_limits)
3293                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3294                         break;
3295                 case RTE_FLOW_ITEM_TYPE_UDP:
3296                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3297                         if (!ipv4 && !ipv6)
3298                                 return rte_flow_error_set(error, EINVAL,
3299                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3300                                                 (void *)items->type,
3301                                                 "ip header not found");
3302                         if (ipv4 && !ipv4->next_proto_id)
3303                                 ipv4->next_proto_id = IPPROTO_UDP;
3304                         else if (ipv6 && !ipv6->proto)
3305                                 ipv6->proto = IPPROTO_UDP;
3306                         break;
3307                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3308                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3309                         if (!udp)
3310                                 return rte_flow_error_set(error, EINVAL,
3311                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3312                                                 (void *)items->type,
3313                                                 "udp header not found");
3314                         if (!udp->dst_port)
3315                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3316                         if (!vxlan->vx_flags)
3317                                 vxlan->vx_flags =
3318                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3319                         break;
3320                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3321                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3322                         if (!udp)
3323                                 return rte_flow_error_set(error, EINVAL,
3324                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3325                                                 (void *)items->type,
3326                                                 "udp header not found");
3327                         if (!vxlan_gpe->proto)
3328                                 return rte_flow_error_set(error, EINVAL,
3329                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3330                                                 (void *)items->type,
3331                                                 "next protocol not found");
3332                         if (!udp->dst_port)
3333                                 udp->dst_port =
3334                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3335                         if (!vxlan_gpe->vx_flags)
3336                                 vxlan_gpe->vx_flags =
3337                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3338                         break;
3339                 case RTE_FLOW_ITEM_TYPE_GRE:
3340                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3341                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3342                         if (!gre->proto)
3343                                 return rte_flow_error_set(error, EINVAL,
3344                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3345                                                 (void *)items->type,
3346                                                 "next protocol not found");
3347                         if (!ipv4 && !ipv6)
3348                                 return rte_flow_error_set(error, EINVAL,
3349                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3350                                                 (void *)items->type,
3351                                                 "ip header not found");
3352                         if (ipv4 && !ipv4->next_proto_id)
3353                                 ipv4->next_proto_id = IPPROTO_GRE;
3354                         else if (ipv6 && !ipv6->proto)
3355                                 ipv6->proto = IPPROTO_GRE;
3356                         break;
3357                 case RTE_FLOW_ITEM_TYPE_VOID:
3358                         break;
3359                 default:
3360                         return rte_flow_error_set(error, EINVAL,
3361                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3362                                                   (void *)items->type,
3363                                                   "unsupported item type");
3364                         break;
3365                 }
3366                 temp_size += len;
3367         }
3368         *size = temp_size;
3369         return 0;
3370 }
3371
3372 static int
3373 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3374 {
3375         struct rte_ether_hdr *eth = NULL;
3376         struct rte_vlan_hdr *vlan = NULL;
3377         struct rte_ipv6_hdr *ipv6 = NULL;
3378         struct rte_udp_hdr *udp = NULL;
3379         char *next_hdr;
3380         uint16_t proto;
3381
3382         eth = (struct rte_ether_hdr *)data;
3383         next_hdr = (char *)(eth + 1);
3384         proto = RTE_BE16(eth->ether_type);
3385
3386         /* VLAN skipping */
3387         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3388                 vlan = (struct rte_vlan_hdr *)next_hdr;
3389                 proto = RTE_BE16(vlan->eth_proto);
3390                 next_hdr += sizeof(struct rte_vlan_hdr);
3391         }
3392
3393         /* HW calculates IPv4 csum. no need to proceed */
3394         if (proto == RTE_ETHER_TYPE_IPV4)
3395                 return 0;
3396
3397         /* non IPv4/IPv6 header. not supported */
3398         if (proto != RTE_ETHER_TYPE_IPV6) {
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION,
3401                                           NULL, "Cannot offload non IPv4/IPv6");
3402         }
3403
3404         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3405
3406         /* ignore non UDP */
3407         if (ipv6->proto != IPPROTO_UDP)
3408                 return 0;
3409
3410         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3411         udp->dgram_cksum = 0;
3412
3413         return 0;
3414 }
3415
3416 /**
3417  * Convert L2 encap action to DV specification.
3418  *
3419  * @param[in] dev
3420  *   Pointer to rte_eth_dev structure.
3421  * @param[in] action
3422  *   Pointer to action structure.
3423  * @param[in, out] dev_flow
3424  *   Pointer to the mlx5_flow.
3425  * @param[in] transfer
3426  *   Mark if the flow is E-Switch flow.
3427  * @param[out] error
3428  *   Pointer to the error structure.
3429  *
3430  * @return
3431  *   0 on success, a negative errno value otherwise and rte_errno is set.
3432  */
3433 static int
3434 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3435                                const struct rte_flow_action *action,
3436                                struct mlx5_flow *dev_flow,
3437                                uint8_t transfer,
3438                                struct rte_flow_error *error)
3439 {
3440         const struct rte_flow_item *encap_data;
3441         const struct rte_flow_action_raw_encap *raw_encap_data;
3442         struct mlx5_flow_dv_encap_decap_resource res = {
3443                 .reformat_type =
3444                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3445                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3446                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3447         };
3448
3449         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3450                 raw_encap_data =
3451                         (const struct rte_flow_action_raw_encap *)action->conf;
3452                 res.size = raw_encap_data->size;
3453                 memcpy(res.buf, raw_encap_data->data, res.size);
3454         } else {
3455                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3456                         encap_data =
3457                                 ((const struct rte_flow_action_vxlan_encap *)
3458                                                 action->conf)->definition;
3459                 else
3460                         encap_data =
3461                                 ((const struct rte_flow_action_nvgre_encap *)
3462                                                 action->conf)->definition;
3463                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3464                                                &res.size, error))
3465                         return -rte_errno;
3466         }
3467         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3468                 return -rte_errno;
3469         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3470                 return rte_flow_error_set(error, EINVAL,
3471                                           RTE_FLOW_ERROR_TYPE_ACTION,
3472                                           NULL, "can't create L2 encap action");
3473         return 0;
3474 }
3475
3476 /**
3477  * Convert L2 decap action to DV specification.
3478  *
3479  * @param[in] dev
3480  *   Pointer to rte_eth_dev structure.
3481  * @param[in, out] dev_flow
3482  *   Pointer to the mlx5_flow.
3483  * @param[in] transfer
3484  *   Mark if the flow is E-Switch flow.
3485  * @param[out] error
3486  *   Pointer to the error structure.
3487  *
3488  * @return
3489  *   0 on success, a negative errno value otherwise and rte_errno is set.
3490  */
3491 static int
3492 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3493                                struct mlx5_flow *dev_flow,
3494                                uint8_t transfer,
3495                                struct rte_flow_error *error)
3496 {
3497         struct mlx5_flow_dv_encap_decap_resource res = {
3498                 .size = 0,
3499                 .reformat_type =
3500                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3501                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3502                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3503         };
3504
3505         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3506                 return rte_flow_error_set(error, EINVAL,
3507                                           RTE_FLOW_ERROR_TYPE_ACTION,
3508                                           NULL, "can't create L2 decap action");
3509         return 0;
3510 }
3511
3512 /**
3513  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3514  *
3515  * @param[in] dev
3516  *   Pointer to rte_eth_dev structure.
3517  * @param[in] action
3518  *   Pointer to action structure.
3519  * @param[in, out] dev_flow
3520  *   Pointer to the mlx5_flow.
3521  * @param[in] attr
3522  *   Pointer to the flow attributes.
3523  * @param[out] error
3524  *   Pointer to the error structure.
3525  *
3526  * @return
3527  *   0 on success, a negative errno value otherwise and rte_errno is set.
3528  */
3529 static int
3530 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3531                                 const struct rte_flow_action *action,
3532                                 struct mlx5_flow *dev_flow,
3533                                 const struct rte_flow_attr *attr,
3534                                 struct rte_flow_error *error)
3535 {
3536         const struct rte_flow_action_raw_encap *encap_data;
3537         struct mlx5_flow_dv_encap_decap_resource res;
3538
3539         memset(&res, 0, sizeof(res));
3540         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3541         res.size = encap_data->size;
3542         memcpy(res.buf, encap_data->data, res.size);
3543         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3544                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3545                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3546         if (attr->transfer)
3547                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3548         else
3549                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3550                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3551         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3552                 return rte_flow_error_set(error, EINVAL,
3553                                           RTE_FLOW_ERROR_TYPE_ACTION,
3554                                           NULL, "can't create encap action");
3555         return 0;
3556 }
3557
3558 /**
3559  * Create action push VLAN.
3560  *
3561  * @param[in] dev
3562  *   Pointer to rte_eth_dev structure.
3563  * @param[in] attr
3564  *   Pointer to the flow attributes.
3565  * @param[in] vlan
3566  *   Pointer to the vlan to push to the Ethernet header.
3567  * @param[in, out] dev_flow
3568  *   Pointer to the mlx5_flow.
3569  * @param[out] error
3570  *   Pointer to the error structure.
3571  *
3572  * @return
3573  *   0 on success, a negative errno value otherwise and rte_errno is set.
3574  */
3575 static int
3576 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3577                                 const struct rte_flow_attr *attr,
3578                                 const struct rte_vlan_hdr *vlan,
3579                                 struct mlx5_flow *dev_flow,
3580                                 struct rte_flow_error *error)
3581 {
3582         struct mlx5_flow_dv_push_vlan_action_resource res;
3583
3584         memset(&res, 0, sizeof(res));
3585         res.vlan_tag =
3586                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3587                                  vlan->vlan_tci);
3588         if (attr->transfer)
3589                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3590         else
3591                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3592                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3593         return flow_dv_push_vlan_action_resource_register
3594                                             (dev, &res, dev_flow, error);
3595 }
3596
3597 static int fdb_mirror;
3598
3599 /**
3600  * Validate the modify-header actions.
3601  *
3602  * @param[in] action_flags
3603  *   Holds the actions detected until now.
3604  * @param[in] action
3605  *   Pointer to the modify action.
3606  * @param[out] error
3607  *   Pointer to error structure.
3608  *
3609  * @return
3610  *   0 on success, a negative errno value otherwise and rte_errno is set.
3611  */
3612 static int
3613 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3614                                    const struct rte_flow_action *action,
3615                                    struct rte_flow_error *error)
3616 {
3617         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3618                 return rte_flow_error_set(error, EINVAL,
3619                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3620                                           NULL, "action configuration not set");
3621         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3622                 return rte_flow_error_set(error, EINVAL,
3623                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3624                                           "can't have encap action before"
3625                                           " modify action");
3626         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3627                 return rte_flow_error_set(error, EINVAL,
3628                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3629                                           "can't support sample action before"
3630                                           " modify action for E-Switch"
3631                                           " mirroring");
3632         return 0;
3633 }
3634
3635 /**
3636  * Validate the modify-header MAC address actions.
3637  *
3638  * @param[in] action_flags
3639  *   Holds the actions detected until now.
3640  * @param[in] action
3641  *   Pointer to the modify action.
3642  * @param[in] item_flags
3643  *   Holds the items detected.
3644  * @param[out] error
3645  *   Pointer to error structure.
3646  *
3647  * @return
3648  *   0 on success, a negative errno value otherwise and rte_errno is set.
3649  */
3650 static int
3651 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3652                                    const struct rte_flow_action *action,
3653                                    const uint64_t item_flags,
3654                                    struct rte_flow_error *error)
3655 {
3656         int ret = 0;
3657
3658         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3659         if (!ret) {
3660                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3661                         return rte_flow_error_set(error, EINVAL,
3662                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3663                                                   NULL,
3664                                                   "no L2 item in pattern");
3665         }
3666         return ret;
3667 }
3668
3669 /**
3670  * Validate the modify-header IPv4 address actions.
3671  *
3672  * @param[in] action_flags
3673  *   Holds the actions detected until now.
3674  * @param[in] action
3675  *   Pointer to the modify action.
3676  * @param[in] item_flags
3677  *   Holds the items detected.
3678  * @param[out] error
3679  *   Pointer to error structure.
3680  *
3681  * @return
3682  *   0 on success, a negative errno value otherwise and rte_errno is set.
3683  */
3684 static int
3685 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3686                                     const struct rte_flow_action *action,
3687                                     const uint64_t item_flags,
3688                                     struct rte_flow_error *error)
3689 {
3690         int ret = 0;
3691         uint64_t layer;
3692
3693         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3694         if (!ret) {
3695                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3696                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3697                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3698                 if (!(item_flags & layer))
3699                         return rte_flow_error_set(error, EINVAL,
3700                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3701                                                   NULL,
3702                                                   "no ipv4 item in pattern");
3703         }
3704         return ret;
3705 }
3706
3707 /**
3708  * Validate the modify-header IPv6 address actions.
3709  *
3710  * @param[in] action_flags
3711  *   Holds the actions detected until now.
3712  * @param[in] action
3713  *   Pointer to the modify action.
3714  * @param[in] item_flags
3715  *   Holds the items detected.
3716  * @param[out] error
3717  *   Pointer to error structure.
3718  *
3719  * @return
3720  *   0 on success, a negative errno value otherwise and rte_errno is set.
3721  */
3722 static int
3723 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3724                                     const struct rte_flow_action *action,
3725                                     const uint64_t item_flags,
3726                                     struct rte_flow_error *error)
3727 {
3728         int ret = 0;
3729         uint64_t layer;
3730
3731         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3732         if (!ret) {
3733                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3734                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3735                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3736                 if (!(item_flags & layer))
3737                         return rte_flow_error_set(error, EINVAL,
3738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3739                                                   NULL,
3740                                                   "no ipv6 item in pattern");
3741         }
3742         return ret;
3743 }
3744
3745 /**
3746  * Validate the modify-header TP actions.
3747  *
3748  * @param[in] action_flags
3749  *   Holds the actions detected until now.
3750  * @param[in] action
3751  *   Pointer to the modify action.
3752  * @param[in] item_flags
3753  *   Holds the items detected.
3754  * @param[out] error
3755  *   Pointer to error structure.
3756  *
3757  * @return
3758  *   0 on success, a negative errno value otherwise and rte_errno is set.
3759  */
3760 static int
3761 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3762                                   const struct rte_flow_action *action,
3763                                   const uint64_t item_flags,
3764                                   struct rte_flow_error *error)
3765 {
3766         int ret = 0;
3767         uint64_t layer;
3768
3769         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3770         if (!ret) {
3771                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772                                  MLX5_FLOW_LAYER_INNER_L4 :
3773                                  MLX5_FLOW_LAYER_OUTER_L4;
3774                 if (!(item_flags & layer))
3775                         return rte_flow_error_set(error, EINVAL,
3776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3777                                                   NULL, "no transport layer "
3778                                                   "in pattern");
3779         }
3780         return ret;
3781 }
3782
3783 /**
3784  * Validate the modify-header actions of increment/decrement
3785  * TCP Sequence-number.
3786  *
3787  * @param[in] action_flags
3788  *   Holds the actions detected until now.
3789  * @param[in] action
3790  *   Pointer to the modify action.
3791  * @param[in] item_flags
3792  *   Holds the items detected.
3793  * @param[out] error
3794  *   Pointer to error structure.
3795  *
3796  * @return
3797  *   0 on success, a negative errno value otherwise and rte_errno is set.
3798  */
3799 static int
3800 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3801                                        const struct rte_flow_action *action,
3802                                        const uint64_t item_flags,
3803                                        struct rte_flow_error *error)
3804 {
3805         int ret = 0;
3806         uint64_t layer;
3807
3808         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3809         if (!ret) {
3810                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3811                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3812                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3813                 if (!(item_flags & layer))
3814                         return rte_flow_error_set(error, EINVAL,
3815                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3816                                                   NULL, "no TCP item in"
3817                                                   " pattern");
3818                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3819                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3820                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3821                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3822                         return rte_flow_error_set(error, EINVAL,
3823                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3824                                                   NULL,
3825                                                   "cannot decrease and increase"
3826                                                   " TCP sequence number"
3827                                                   " at the same time");
3828         }
3829         return ret;
3830 }
3831
3832 /**
3833  * Validate the modify-header actions of increment/decrement
3834  * TCP Acknowledgment number.
3835  *
3836  * @param[in] action_flags
3837  *   Holds the actions detected until now.
3838  * @param[in] action
3839  *   Pointer to the modify action.
3840  * @param[in] item_flags
3841  *   Holds the items detected.
3842  * @param[out] error
3843  *   Pointer to error structure.
3844  *
3845  * @return
3846  *   0 on success, a negative errno value otherwise and rte_errno is set.
3847  */
3848 static int
3849 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3850                                        const struct rte_flow_action *action,
3851                                        const uint64_t item_flags,
3852                                        struct rte_flow_error *error)
3853 {
3854         int ret = 0;
3855         uint64_t layer;
3856
3857         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3858         if (!ret) {
3859                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3860                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3861                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3862                 if (!(item_flags & layer))
3863                         return rte_flow_error_set(error, EINVAL,
3864                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3865                                                   NULL, "no TCP item in"
3866                                                   " pattern");
3867                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3868                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3869                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3870                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3871                         return rte_flow_error_set(error, EINVAL,
3872                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3873                                                   NULL,
3874                                                   "cannot decrease and increase"
3875                                                   " TCP acknowledgment number"
3876                                                   " at the same time");
3877         }
3878         return ret;
3879 }
3880
3881 /**
3882  * Validate the modify-header TTL actions.
3883  *
3884  * @param[in] action_flags
3885  *   Holds the actions detected until now.
3886  * @param[in] action
3887  *   Pointer to the modify action.
3888  * @param[in] item_flags
3889  *   Holds the items detected.
3890  * @param[out] error
3891  *   Pointer to error structure.
3892  *
3893  * @return
3894  *   0 on success, a negative errno value otherwise and rte_errno is set.
3895  */
3896 static int
3897 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3898                                    const struct rte_flow_action *action,
3899                                    const uint64_t item_flags,
3900                                    struct rte_flow_error *error)
3901 {
3902         int ret = 0;
3903         uint64_t layer;
3904
3905         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3906         if (!ret) {
3907                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3908                                  MLX5_FLOW_LAYER_INNER_L3 :
3909                                  MLX5_FLOW_LAYER_OUTER_L3;
3910                 if (!(item_flags & layer))
3911                         return rte_flow_error_set(error, EINVAL,
3912                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3913                                                   NULL,
3914                                                   "no IP protocol in pattern");
3915         }
3916         return ret;
3917 }
3918
3919 /**
3920  * Validate jump action.
3921  *
3922  * @param[in] action
3923  *   Pointer to the jump action.
3924  * @param[in] action_flags
3925  *   Holds the actions detected until now.
3926  * @param[in] attributes
3927  *   Pointer to flow attributes
3928  * @param[in] external
3929  *   Action belongs to flow rule created by request external to PMD.
3930  * @param[out] error
3931  *   Pointer to error structure.
3932  *
3933  * @return
3934  *   0 on success, a negative errno value otherwise and rte_errno is set.
3935  */
3936 static int
3937 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3938                              const struct mlx5_flow_tunnel *tunnel,
3939                              const struct rte_flow_action *action,
3940                              uint64_t action_flags,
3941                              const struct rte_flow_attr *attributes,
3942                              bool external, struct rte_flow_error *error)
3943 {
3944         uint32_t target_group, table;
3945         int ret = 0;
3946         struct flow_grp_info grp_info = {
3947                 .external = !!external,
3948                 .transfer = !!attributes->transfer,
3949                 .fdb_def_rule = 1,
3950                 .std_tbl_fix = 0
3951         };
3952         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3953                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3954                 return rte_flow_error_set(error, EINVAL,
3955                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3956                                           "can't have 2 fate actions in"
3957                                           " same flow");
3958         if (action_flags & MLX5_FLOW_ACTION_METER)
3959                 return rte_flow_error_set(error, ENOTSUP,
3960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3961                                           "jump with meter not support");
3962         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3963                 return rte_flow_error_set(error, EINVAL,
3964                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3965                                           "E-Switch mirroring can't support"
3966                                           " Sample action and jump action in"
3967                                           " same flow now");
3968         if (!action->conf)
3969                 return rte_flow_error_set(error, EINVAL,
3970                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3971                                           NULL, "action configuration not set");
3972         target_group =
3973                 ((const struct rte_flow_action_jump *)action->conf)->group;
3974         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3975                                        grp_info, error);
3976         if (ret)
3977                 return ret;
3978         if (attributes->group == target_group &&
3979             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3980                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3981                 return rte_flow_error_set(error, EINVAL,
3982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3983                                           "target group must be other than"
3984                                           " the current flow group");
3985         return 0;
3986 }
3987
3988 /*
3989  * Validate the port_id action.
3990  *
3991  * @param[in] dev
3992  *   Pointer to rte_eth_dev structure.
3993  * @param[in] action_flags
3994  *   Bit-fields that holds the actions detected until now.
3995  * @param[in] action
3996  *   Port_id RTE action structure.
3997  * @param[in] attr
3998  *   Attributes of flow that includes this action.
3999  * @param[out] error
4000  *   Pointer to error structure.
4001  *
4002  * @return
4003  *   0 on success, a negative errno value otherwise and rte_errno is set.
4004  */
4005 static int
4006 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4007                                 uint64_t action_flags,
4008                                 const struct rte_flow_action *action,
4009                                 const struct rte_flow_attr *attr,
4010                                 struct rte_flow_error *error)
4011 {
4012         const struct rte_flow_action_port_id *port_id;
4013         struct mlx5_priv *act_priv;
4014         struct mlx5_priv *dev_priv;
4015         uint16_t port;
4016
4017         if (!attr->transfer)
4018                 return rte_flow_error_set(error, ENOTSUP,
4019                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4020                                           NULL,
4021                                           "port id action is valid in transfer"
4022                                           " mode only");
4023         if (!action || !action->conf)
4024                 return rte_flow_error_set(error, ENOTSUP,
4025                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4026                                           NULL,
4027                                           "port id action parameters must be"
4028                                           " specified");
4029         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4030                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4031                 return rte_flow_error_set(error, EINVAL,
4032                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4033                                           "can have only one fate actions in"
4034                                           " a flow");
4035         dev_priv = mlx5_dev_to_eswitch_info(dev);
4036         if (!dev_priv)
4037                 return rte_flow_error_set(error, rte_errno,
4038                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4039                                           NULL,
4040                                           "failed to obtain E-Switch info");
4041         port_id = action->conf;
4042         port = port_id->original ? dev->data->port_id : port_id->id;
4043         act_priv = mlx5_port_to_eswitch_info(port, false);
4044         if (!act_priv)
4045                 return rte_flow_error_set
4046                                 (error, rte_errno,
4047                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4048                                  "failed to obtain E-Switch port id for port");
4049         if (act_priv->domain_id != dev_priv->domain_id)
4050                 return rte_flow_error_set
4051                                 (error, EINVAL,
4052                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4053                                  "port does not belong to"
4054                                  " E-Switch being configured");
4055         return 0;
4056 }
4057
4058 /**
4059  * Get the maximum number of modify header actions.
4060  *
4061  * @param dev
4062  *   Pointer to rte_eth_dev structure.
4063  * @param flags
4064  *   Flags bits to check if root level.
4065  *
4066  * @return
4067  *   Max number of modify header actions device can support.
4068  */
4069 static inline unsigned int
4070 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4071                               uint64_t flags)
4072 {
4073         /*
4074          * There's no way to directly query the max capacity from FW.
4075          * The maximal value on root table should be assumed to be supported.
4076          */
4077         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4078                 return MLX5_MAX_MODIFY_NUM;
4079         else
4080                 return MLX5_ROOT_TBL_MODIFY_NUM;
4081 }
4082
4083 /**
4084  * Validate the meter action.
4085  *
4086  * @param[in] dev
4087  *   Pointer to rte_eth_dev structure.
4088  * @param[in] action_flags
4089  *   Bit-fields that holds the actions detected until now.
4090  * @param[in] action
4091  *   Pointer to the meter action.
4092  * @param[in] attr
4093  *   Attributes of flow that includes this action.
4094  * @param[out] error
4095  *   Pointer to error structure.
4096  *
4097  * @return
4098  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4099  */
4100 static int
4101 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4102                                 uint64_t action_flags,
4103                                 const struct rte_flow_action *action,
4104                                 const struct rte_flow_attr *attr,
4105                                 struct rte_flow_error *error)
4106 {
4107         struct mlx5_priv *priv = dev->data->dev_private;
4108         const struct rte_flow_action_meter *am = action->conf;
4109         struct mlx5_flow_meter *fm;
4110
4111         if (!am)
4112                 return rte_flow_error_set(error, EINVAL,
4113                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4114                                           "meter action conf is NULL");
4115
4116         if (action_flags & MLX5_FLOW_ACTION_METER)
4117                 return rte_flow_error_set(error, ENOTSUP,
4118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4119                                           "meter chaining not support");
4120         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4121                 return rte_flow_error_set(error, ENOTSUP,
4122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4123                                           "meter with jump not support");
4124         if (!priv->mtr_en)
4125                 return rte_flow_error_set(error, ENOTSUP,
4126                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4127                                           NULL,
4128                                           "meter action not supported");
4129         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4130         if (!fm)
4131                 return rte_flow_error_set(error, EINVAL,
4132                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4133                                           "Meter not found");
4134         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4135               (!fm->ingress && !attr->ingress && attr->egress) ||
4136               (!fm->egress && !attr->egress && attr->ingress))))
4137                 return rte_flow_error_set(error, EINVAL,
4138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4139                                           "Flow attributes are either invalid "
4140                                           "or have a conflict with current "
4141                                           "meter attributes");
4142         return 0;
4143 }
4144
4145 /**
4146  * Validate the age action.
4147  *
4148  * @param[in] action_flags
4149  *   Holds the actions detected until now.
4150  * @param[in] action
4151  *   Pointer to the age action.
4152  * @param[in] dev
4153  *   Pointer to the Ethernet device structure.
4154  * @param[out] error
4155  *   Pointer to error structure.
4156  *
4157  * @return
4158  *   0 on success, a negative errno value otherwise and rte_errno is set.
4159  */
4160 static int
4161 flow_dv_validate_action_age(uint64_t action_flags,
4162                             const struct rte_flow_action *action,
4163                             struct rte_eth_dev *dev,
4164                             struct rte_flow_error *error)
4165 {
4166         struct mlx5_priv *priv = dev->data->dev_private;
4167         const struct rte_flow_action_age *age = action->conf;
4168
4169         if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4170                 return rte_flow_error_set(error, ENOTSUP,
4171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4172                                           NULL,
4173                                           "age action not supported");
4174         if (!(action->conf))
4175                 return rte_flow_error_set(error, EINVAL,
4176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4177                                           "configuration cannot be null");
4178         if (!(age->timeout))
4179                 return rte_flow_error_set(error, EINVAL,
4180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4181                                           "invalid timeout value 0");
4182         if (action_flags & MLX5_FLOW_ACTION_AGE)
4183                 return rte_flow_error_set(error, EINVAL,
4184                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4185                                           "duplicate age actions set");
4186         return 0;
4187 }
4188
4189 /**
4190  * Validate the modify-header IPv4 DSCP actions.
4191  *
4192  * @param[in] action_flags
4193  *   Holds the actions detected until now.
4194  * @param[in] action
4195  *   Pointer to the modify action.
4196  * @param[in] item_flags
4197  *   Holds the items detected.
4198  * @param[out] error
4199  *   Pointer to error structure.
4200  *
4201  * @return
4202  *   0 on success, a negative errno value otherwise and rte_errno is set.
4203  */
4204 static int
4205 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4206                                          const struct rte_flow_action *action,
4207                                          const uint64_t item_flags,
4208                                          struct rte_flow_error *error)
4209 {
4210         int ret = 0;
4211
4212         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4213         if (!ret) {
4214                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4215                         return rte_flow_error_set(error, EINVAL,
4216                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4217                                                   NULL,
4218                                                   "no ipv4 item in pattern");
4219         }
4220         return ret;
4221 }
4222
4223 /**
4224  * Validate the modify-header IPv6 DSCP actions.
4225  *
4226  * @param[in] action_flags
4227  *   Holds the actions detected until now.
4228  * @param[in] action
4229  *   Pointer to the modify action.
4230  * @param[in] item_flags
4231  *   Holds the items detected.
4232  * @param[out] error
4233  *   Pointer to error structure.
4234  *
4235  * @return
4236  *   0 on success, a negative errno value otherwise and rte_errno is set.
4237  */
4238 static int
4239 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4240                                          const struct rte_flow_action *action,
4241                                          const uint64_t item_flags,
4242                                          struct rte_flow_error *error)
4243 {
4244         int ret = 0;
4245
4246         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4247         if (!ret) {
4248                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4249                         return rte_flow_error_set(error, EINVAL,
4250                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4251                                                   NULL,
4252                                                   "no ipv6 item in pattern");
4253         }
4254         return ret;
4255 }
4256
4257 /**
4258  * Match modify-header resource.
4259  *
4260  * @param list
4261  *   Pointer to the hash list.
4262  * @param entry
4263  *   Pointer to exist resource entry object.
4264  * @param key
4265  *   Key of the new entry.
4266  * @param ctx
4267  *   Pointer to new modify-header resource.
4268  *
4269  * @return
4270  *   0 on matching, non-zero otherwise.
4271  */
4272 int
4273 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4274                         struct mlx5_hlist_entry *entry,
4275                         uint64_t key __rte_unused, void *cb_ctx)
4276 {
4277         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4278         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4279         struct mlx5_flow_dv_modify_hdr_resource *resource =
4280                         container_of(entry, typeof(*resource), entry);
4281         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4282
4283         key_len += ref->actions_num * sizeof(ref->actions[0]);
4284         return ref->actions_num != resource->actions_num ||
4285                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4286 }
4287
4288 struct mlx5_hlist_entry *
4289 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4290                          void *cb_ctx)
4291 {
4292         struct mlx5_dev_ctx_shared *sh = list->ctx;
4293         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4294         struct mlx5dv_dr_domain *ns;
4295         struct mlx5_flow_dv_modify_hdr_resource *entry;
4296         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4297         int ret;
4298         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4299         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4300
4301         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4302                             SOCKET_ID_ANY);
4303         if (!entry) {
4304                 rte_flow_error_set(ctx->error, ENOMEM,
4305                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4306                                    "cannot allocate resource memory");
4307                 return NULL;
4308         }
4309         rte_memcpy(&entry->ft_type,
4310                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4311                    key_len + data_len);
4312         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4313                 ns = sh->fdb_domain;
4314         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4315                 ns = sh->tx_domain;
4316         else
4317                 ns = sh->rx_domain;
4318         ret = mlx5_flow_os_create_flow_action_modify_header
4319                                         (sh->ctx, ns, entry,
4320                                          data_len, &entry->action);
4321         if (ret) {
4322                 mlx5_free(entry);
4323                 rte_flow_error_set(ctx->error, ENOMEM,
4324                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4325                                    NULL, "cannot create modification action");
4326                 return NULL;
4327         }
4328         return &entry->entry;
4329 }
4330
4331 /**
4332  * Validate the sample action.
4333  *
4334  * @param[in] action_flags
4335  *   Holds the actions detected until now.
4336  * @param[in] action
4337  *   Pointer to the sample action.
4338  * @param[in] dev
4339  *   Pointer to the Ethernet device structure.
4340  * @param[in] attr
4341  *   Attributes of flow that includes this action.
4342  * @param[out] error
4343  *   Pointer to error structure.
4344  *
4345  * @return
4346  *   0 on success, a negative errno value otherwise and rte_errno is set.
4347  */
4348 static int
4349 flow_dv_validate_action_sample(uint64_t action_flags,
4350                                const struct rte_flow_action *action,
4351                                struct rte_eth_dev *dev,
4352                                const struct rte_flow_attr *attr,
4353                                struct rte_flow_error *error)
4354 {
4355         struct mlx5_priv *priv = dev->data->dev_private;
4356         struct mlx5_dev_config *dev_conf = &priv->config;
4357         const struct rte_flow_action_sample *sample = action->conf;
4358         const struct rte_flow_action *act;
4359         uint64_t sub_action_flags = 0;
4360         uint16_t queue_index = 0xFFFF;
4361         int actions_n = 0;
4362         int ret;
4363         fdb_mirror = 0;
4364
4365         if (!sample)
4366                 return rte_flow_error_set(error, EINVAL,
4367                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4368                                           "configuration cannot be NULL");
4369         if (sample->ratio == 0)
4370                 return rte_flow_error_set(error, EINVAL,
4371                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4372                                           "ratio value starts from 1");
4373         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4374                 return rte_flow_error_set(error, ENOTSUP,
4375                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4376                                           NULL,
4377                                           "sample action not supported");
4378         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4379                 return rte_flow_error_set(error, EINVAL,
4380                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4381                                           "Multiple sample actions not "
4382                                           "supported");
4383         if (action_flags & MLX5_FLOW_ACTION_METER)
4384                 return rte_flow_error_set(error, EINVAL,
4385                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4386                                           "wrong action order, meter should "
4387                                           "be after sample action");
4388         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4389                 return rte_flow_error_set(error, EINVAL,
4390                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4391                                           "wrong action order, jump should "
4392                                           "be after sample action");
4393         act = sample->actions;
4394         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4395                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4396                         return rte_flow_error_set(error, ENOTSUP,
4397                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4398                                                   act, "too many actions");
4399                 switch (act->type) {
4400                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4401                         ret = mlx5_flow_validate_action_queue(act,
4402                                                               sub_action_flags,
4403                                                               dev,
4404                                                               attr, error);
4405                         if (ret < 0)
4406                                 return ret;
4407                         queue_index = ((const struct rte_flow_action_queue *)
4408                                                         (act->conf))->index;
4409                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4410                         ++actions_n;
4411                         break;
4412                 case RTE_FLOW_ACTION_TYPE_MARK:
4413                         ret = flow_dv_validate_action_mark(dev, act,
4414                                                            sub_action_flags,
4415                                                            attr, error);
4416                         if (ret < 0)
4417                                 return ret;
4418                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4419                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4420                                                 MLX5_FLOW_ACTION_MARK_EXT;
4421                         else
4422                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4423                         ++actions_n;
4424                         break;
4425                 case RTE_FLOW_ACTION_TYPE_COUNT:
4426                         ret = flow_dv_validate_action_count(dev, error);
4427                         if (ret < 0)
4428                                 return ret;
4429                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4430                         ++actions_n;
4431                         break;
4432                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4433                         ret = flow_dv_validate_action_port_id(dev,
4434                                                               sub_action_flags,
4435                                                               act,
4436                                                               attr,
4437                                                               error);
4438                         if (ret)
4439                                 return ret;
4440                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4441                         ++actions_n;
4442                         break;
4443                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4444                         ret = flow_dv_validate_action_raw_encap_decap
4445                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4446                                  &actions_n, error);
4447                         if (ret < 0)
4448                                 return ret;
4449                         ++actions_n;
4450                         break;
4451                 default:
4452                         return rte_flow_error_set(error, ENOTSUP,
4453                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4454                                                   NULL,
4455                                                   "Doesn't support optional "
4456                                                   "action");
4457                 }
4458         }
4459         if (attr->ingress && !attr->transfer) {
4460                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4461                         return rte_flow_error_set(error, EINVAL,
4462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4463                                                   NULL,
4464                                                   "Ingress must has a dest "
4465                                                   "QUEUE for Sample");
4466         } else if (attr->egress && !attr->transfer) {
4467                 return rte_flow_error_set(error, ENOTSUP,
4468                                           RTE_FLOW_ERROR_TYPE_ACTION,
4469                                           NULL,
4470                                           "Sample Only support Ingress "
4471                                           "or E-Switch");
4472         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4473                 MLX5_ASSERT(attr->transfer);
4474                 if (sample->ratio > 1)
4475                         return rte_flow_error_set(error, ENOTSUP,
4476                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4477                                                   NULL,
4478                                                   "E-Switch doesn't support "
4479                                                   "any optional action "
4480                                                   "for sampling");
4481                 fdb_mirror = 1;
4482                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4483                         return rte_flow_error_set(error, ENOTSUP,
4484                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4485                                                   NULL,
4486                                                   "unsupported action QUEUE");
4487                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4488                         return rte_flow_error_set(error, EINVAL,
4489                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4490                                                   NULL,
4491                                                   "E-Switch must has a dest "
4492                                                   "port for mirroring");
4493         }
4494         /* Continue validation for Xcap actions.*/
4495         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4496             (queue_index == 0xFFFF ||
4497              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4498                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4499                      MLX5_FLOW_XCAP_ACTIONS)
4500                         return rte_flow_error_set(error, ENOTSUP,
4501                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4502                                                   NULL, "encap and decap "
4503                                                   "combination aren't "
4504                                                   "supported");
4505                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4506                                                         MLX5_FLOW_ACTION_ENCAP))
4507                         return rte_flow_error_set(error, ENOTSUP,
4508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4509                                                   NULL, "encap is not supported"
4510                                                   " for ingress traffic");
4511         }
4512         return 0;
4513 }
4514
4515 /**
4516  * Find existing modify-header resource or create and register a new one.
4517  *
4518  * @param dev[in, out]
4519  *   Pointer to rte_eth_dev structure.
4520  * @param[in, out] resource
4521  *   Pointer to modify-header resource.
4522  * @parm[in, out] dev_flow
4523  *   Pointer to the dev_flow.
4524  * @param[out] error
4525  *   pointer to error structure.
4526  *
4527  * @return
4528  *   0 on success otherwise -errno and errno is set.
4529  */
4530 static int
4531 flow_dv_modify_hdr_resource_register
4532                         (struct rte_eth_dev *dev,
4533                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4534                          struct mlx5_flow *dev_flow,
4535                          struct rte_flow_error *error)
4536 {
4537         struct mlx5_priv *priv = dev->data->dev_private;
4538         struct mlx5_dev_ctx_shared *sh = priv->sh;
4539         uint32_t key_len = sizeof(*resource) -
4540                            offsetof(typeof(*resource), ft_type) +
4541                            resource->actions_num * sizeof(resource->actions[0]);
4542         struct mlx5_hlist_entry *entry;
4543         struct mlx5_flow_cb_ctx ctx = {
4544                 .error = error,
4545                 .data = resource,
4546         };
4547
4548         resource->flags = dev_flow->dv.group ? 0 :
4549                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4550         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4551                                     resource->flags))
4552                 return rte_flow_error_set(error, EOVERFLOW,
4553                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4554                                           "too many modify header items");
4555         resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4556         entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4557         if (!entry)
4558                 return -rte_errno;
4559         resource = container_of(entry, typeof(*resource), entry);
4560         dev_flow->handle->dvh.modify_hdr = resource;
4561         return 0;
4562 }
4563
4564 /**
4565  * Get DV flow counter by index.
4566  *
4567  * @param[in] dev
4568  *   Pointer to the Ethernet device structure.
4569  * @param[in] idx
4570  *   mlx5 flow counter index in the container.
4571  * @param[out] ppool
4572  *   mlx5 flow counter pool in the container,
4573  *
4574  * @return
4575  *   Pointer to the counter, NULL otherwise.
4576  */
4577 static struct mlx5_flow_counter *
4578 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4579                            uint32_t idx,
4580                            struct mlx5_flow_counter_pool **ppool)
4581 {
4582         struct mlx5_priv *priv = dev->data->dev_private;
4583         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4584         struct mlx5_flow_counter_pool *pool;
4585
4586         /* Decrease to original index and clear shared bit. */
4587         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4588         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4589         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4590         MLX5_ASSERT(pool);
4591         if (ppool)
4592                 *ppool = pool;
4593         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4594 }
4595
4596 /**
4597  * Check the devx counter belongs to the pool.
4598  *
4599  * @param[in] pool
4600  *   Pointer to the counter pool.
4601  * @param[in] id
4602  *   The counter devx ID.
4603  *
4604  * @return
4605  *   True if counter belongs to the pool, false otherwise.
4606  */
4607 static bool
4608 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4609 {
4610         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4611                    MLX5_COUNTERS_PER_POOL;
4612
4613         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4614                 return true;
4615         return false;
4616 }
4617
4618 /**
4619  * Get a pool by devx counter ID.
4620  *
4621  * @param[in] cmng
4622  *   Pointer to the counter management.
4623  * @param[in] id
4624  *   The counter devx ID.
4625  *
4626  * @return
4627  *   The counter pool pointer if exists, NULL otherwise,
4628  */
4629 static struct mlx5_flow_counter_pool *
4630 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4631 {
4632         uint32_t i;
4633         struct mlx5_flow_counter_pool *pool = NULL;
4634
4635         rte_spinlock_lock(&cmng->pool_update_sl);
4636         /* Check last used pool. */
4637         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4638             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4639                 pool = cmng->pools[cmng->last_pool_idx];
4640                 goto out;
4641         }
4642         /* ID out of range means no suitable pool in the container. */
4643         if (id > cmng->max_id || id < cmng->min_id)
4644                 goto out;
4645         /*
4646          * Find the pool from the end of the container, since mostly counter
4647          * ID is sequence increasing, and the last pool should be the needed
4648          * one.
4649          */
4650         i = cmng->n_valid;
4651         while (i--) {
4652                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4653
4654                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4655                         pool = pool_tmp;
4656                         break;
4657                 }
4658         }
4659 out:
4660         rte_spinlock_unlock(&cmng->pool_update_sl);
4661         return pool;
4662 }
4663
4664 /**
4665  * Resize a counter container.
4666  *
4667  * @param[in] dev
4668  *   Pointer to the Ethernet device structure.
4669  *
4670  * @return
4671  *   0 on success, otherwise negative errno value and rte_errno is set.
4672  */
4673 static int
4674 flow_dv_container_resize(struct rte_eth_dev *dev)
4675 {
4676         struct mlx5_priv *priv = dev->data->dev_private;
4677         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4678         void *old_pools = cmng->pools;
4679         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4680         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4681         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4682
4683         if (!pools) {
4684                 rte_errno = ENOMEM;
4685                 return -ENOMEM;
4686         }
4687         if (old_pools)
4688                 memcpy(pools, old_pools, cmng->n *
4689                                        sizeof(struct mlx5_flow_counter_pool *));
4690         cmng->n = resize;
4691         cmng->pools = pools;
4692         if (old_pools)
4693                 mlx5_free(old_pools);
4694         return 0;
4695 }
4696
4697 /**
4698  * Query a devx flow counter.
4699  *
4700  * @param[in] dev
4701  *   Pointer to the Ethernet device structure.
4702  * @param[in] cnt
4703  *   Index to the flow counter.
4704  * @param[out] pkts
4705  *   The statistics value of packets.
4706  * @param[out] bytes
4707  *   The statistics value of bytes.
4708  *
4709  * @return
4710  *   0 on success, otherwise a negative errno value and rte_errno is set.
4711  */
4712 static inline int
4713 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4714                      uint64_t *bytes)
4715 {
4716         struct mlx5_priv *priv = dev->data->dev_private;
4717         struct mlx5_flow_counter_pool *pool = NULL;
4718         struct mlx5_flow_counter *cnt;
4719         int offset;
4720
4721         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4722         MLX5_ASSERT(pool);
4723         if (priv->sh->cmng.counter_fallback)
4724                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4725                                         0, pkts, bytes, 0, NULL, NULL, 0);
4726         rte_spinlock_lock(&pool->sl);
4727         if (!pool->raw) {
4728                 *pkts = 0;
4729                 *bytes = 0;
4730         } else {
4731                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4732                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4733                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4734         }
4735         rte_spinlock_unlock(&pool->sl);
4736         return 0;
4737 }
4738
4739 /**
4740  * Create and initialize a new counter pool.
4741  *
4742  * @param[in] dev
4743  *   Pointer to the Ethernet device structure.
4744  * @param[out] dcs
4745  *   The devX counter handle.
4746  * @param[in] age
4747  *   Whether the pool is for counter that was allocated for aging.
4748  * @param[in/out] cont_cur
4749  *   Pointer to the container pointer, it will be update in pool resize.
4750  *
4751  * @return
4752  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4753  */
4754 static struct mlx5_flow_counter_pool *
4755 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4756                     uint32_t age)
4757 {
4758         struct mlx5_priv *priv = dev->data->dev_private;
4759         struct mlx5_flow_counter_pool *pool;
4760         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4761         bool fallback = priv->sh->cmng.counter_fallback;
4762         uint32_t size = sizeof(*pool);
4763
4764         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4765         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4766         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4767         if (!pool) {
4768                 rte_errno = ENOMEM;
4769                 return NULL;
4770         }
4771         pool->raw = NULL;
4772         pool->is_aged = !!age;
4773         pool->query_gen = 0;
4774         pool->min_dcs = dcs;
4775         rte_spinlock_init(&pool->sl);
4776         rte_spinlock_init(&pool->csl);
4777         TAILQ_INIT(&pool->counters[0]);
4778         TAILQ_INIT(&pool->counters[1]);
4779         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4780         rte_spinlock_lock(&cmng->pool_update_sl);
4781         pool->index = cmng->n_valid;
4782         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4783                 mlx5_free(pool);
4784                 rte_spinlock_unlock(&cmng->pool_update_sl);
4785                 return NULL;
4786         }
4787         cmng->pools[pool->index] = pool;
4788         cmng->n_valid++;
4789         if (unlikely(fallback)) {
4790                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4791
4792                 if (base < cmng->min_id)
4793                         cmng->min_id = base;
4794                 if (base > cmng->max_id)
4795                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4796                 cmng->last_pool_idx = pool->index;
4797         }
4798         rte_spinlock_unlock(&cmng->pool_update_sl);
4799         return pool;
4800 }
4801
4802 /**
4803  * Prepare a new counter and/or a new counter pool.
4804  *
4805  * @param[in] dev
4806  *   Pointer to the Ethernet device structure.
4807  * @param[out] cnt_free
4808  *   Where to put the pointer of a new counter.
4809  * @param[in] age
4810  *   Whether the pool is for counter that was allocated for aging.
4811  *
4812  * @return
4813  *   The counter pool pointer and @p cnt_free is set on success,
4814  *   NULL otherwise and rte_errno is set.
4815  */
4816 static struct mlx5_flow_counter_pool *
4817 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4818                              struct mlx5_flow_counter **cnt_free,
4819                              uint32_t age)
4820 {
4821         struct mlx5_priv *priv = dev->data->dev_private;
4822         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4823         struct mlx5_flow_counter_pool *pool;
4824         struct mlx5_counters tmp_tq;
4825         struct mlx5_devx_obj *dcs = NULL;
4826         struct mlx5_flow_counter *cnt;
4827         enum mlx5_counter_type cnt_type =
4828                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4829         bool fallback = priv->sh->cmng.counter_fallback;
4830         uint32_t i;
4831
4832         if (fallback) {
4833                 /* bulk_bitmap must be 0 for single counter allocation. */
4834                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4835                 if (!dcs)
4836                         return NULL;
4837                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4838                 if (!pool) {
4839                         pool = flow_dv_pool_create(dev, dcs, age);
4840                         if (!pool) {
4841                                 mlx5_devx_cmd_destroy(dcs);
4842                                 return NULL;
4843                         }
4844                 }
4845                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4846                 cnt = MLX5_POOL_GET_CNT(pool, i);
4847                 cnt->pool = pool;
4848                 cnt->dcs_when_free = dcs;
4849                 *cnt_free = cnt;
4850                 return pool;
4851         }
4852         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4853         if (!dcs) {
4854                 rte_errno = ENODATA;
4855                 return NULL;
4856         }
4857         pool = flow_dv_pool_create(dev, dcs, age);
4858         if (!pool) {
4859                 mlx5_devx_cmd_destroy(dcs);
4860                 return NULL;
4861         }
4862         TAILQ_INIT(&tmp_tq);
4863         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4864                 cnt = MLX5_POOL_GET_CNT(pool, i);
4865                 cnt->pool = pool;
4866                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4867         }
4868         rte_spinlock_lock(&cmng->csl[cnt_type]);
4869         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4870         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4871         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4872         (*cnt_free)->pool = pool;
4873         return pool;
4874 }
4875
4876 /**
4877  * Allocate a flow counter.
4878  *
4879  * @param[in] dev
4880  *   Pointer to the Ethernet device structure.
4881  * @param[in] age
4882  *   Whether the counter was allocated for aging.
4883  *
4884  * @return
4885  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4886  */
4887 static uint32_t
4888 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4889 {
4890         struct mlx5_priv *priv = dev->data->dev_private;
4891         struct mlx5_flow_counter_pool *pool = NULL;
4892         struct mlx5_flow_counter *cnt_free = NULL;
4893         bool fallback = priv->sh->cmng.counter_fallback;
4894         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4895         enum mlx5_counter_type cnt_type =
4896                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4897         uint32_t cnt_idx;
4898
4899         if (!priv->config.devx) {
4900                 rte_errno = ENOTSUP;
4901                 return 0;
4902         }
4903         /* Get free counters from container. */
4904         rte_spinlock_lock(&cmng->csl[cnt_type]);
4905         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4906         if (cnt_free)
4907                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4908         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4909         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4910                 goto err;
4911         pool = cnt_free->pool;
4912         if (fallback)
4913                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4914         /* Create a DV counter action only in the first time usage. */
4915         if (!cnt_free->action) {
4916                 uint16_t offset;
4917                 struct mlx5_devx_obj *dcs;
4918                 int ret;
4919
4920                 if (!fallback) {
4921                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4922                         dcs = pool->min_dcs;
4923                 } else {
4924                         offset = 0;
4925                         dcs = cnt_free->dcs_when_free;
4926                 }
4927                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4928                                                             &cnt_free->action);
4929                 if (ret) {
4930                         rte_errno = errno;
4931                         goto err;
4932                 }
4933         }
4934         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4935                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4936         /* Update the counter reset values. */
4937         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4938                                  &cnt_free->bytes))
4939                 goto err;
4940         if (!fallback && !priv->sh->cmng.query_thread_on)
4941                 /* Start the asynchronous batch query by the host thread. */
4942                 mlx5_set_query_alarm(priv->sh);
4943         return cnt_idx;
4944 err:
4945         if (cnt_free) {
4946                 cnt_free->pool = pool;
4947                 if (fallback)
4948                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4949                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4950                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4951                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4952         }
4953         return 0;
4954 }
4955
4956 /**
4957  * Allocate a shared flow counter.
4958  *
4959  * @param[in] ctx
4960  *   Pointer to the shared counter configuration.
4961  * @param[in] data
4962  *   Pointer to save the allocated counter index.
4963  *
4964  * @return
4965  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4966  */
4967
4968 static int32_t
4969 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4970 {
4971         struct mlx5_shared_counter_conf *conf = ctx;
4972         struct rte_eth_dev *dev = conf->dev;
4973         struct mlx5_flow_counter *cnt;
4974
4975         data->dword = flow_dv_counter_alloc(dev, 0);
4976         data->dword |= MLX5_CNT_SHARED_OFFSET;
4977         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4978         cnt->shared_info.id = conf->id;
4979         return 0;
4980 }
4981
4982 /**
4983  * Get a shared flow counter.
4984  *
4985  * @param[in] dev
4986  *   Pointer to the Ethernet device structure.
4987  * @param[in] id
4988  *   Counter identifier.
4989  *
4990  * @return
4991  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4992  */
4993 static uint32_t
4994 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4995 {
4996         struct mlx5_priv *priv = dev->data->dev_private;
4997         struct mlx5_shared_counter_conf conf = {
4998                 .dev = dev,
4999                 .id = id,
5000         };
5001         union mlx5_l3t_data data = {
5002                 .dword = 0,
5003         };
5004
5005         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5006                                flow_dv_counter_alloc_shared_cb, &conf);
5007         return data.dword;
5008 }
5009
5010 /**
5011  * Get age param from counter index.
5012  *
5013  * @param[in] dev
5014  *   Pointer to the Ethernet device structure.
5015  * @param[in] counter
5016  *   Index to the counter handler.
5017  *
5018  * @return
5019  *   The aging parameter specified for the counter index.
5020  */
5021 static struct mlx5_age_param*
5022 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5023                                 uint32_t counter)
5024 {
5025         struct mlx5_flow_counter *cnt;
5026         struct mlx5_flow_counter_pool *pool = NULL;
5027
5028         flow_dv_counter_get_by_idx(dev, counter, &pool);
5029         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5030         cnt = MLX5_POOL_GET_CNT(pool, counter);
5031         return MLX5_CNT_TO_AGE(cnt);
5032 }
5033
5034 /**
5035  * Remove a flow counter from aged counter list.
5036  *
5037  * @param[in] dev
5038  *   Pointer to the Ethernet device structure.
5039  * @param[in] counter
5040  *   Index to the counter handler.
5041  * @param[in] cnt
5042  *   Pointer to the counter handler.
5043  */
5044 static void
5045 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5046                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5047 {
5048         struct mlx5_age_info *age_info;
5049         struct mlx5_age_param *age_param;
5050         struct mlx5_priv *priv = dev->data->dev_private;
5051         uint16_t expected = AGE_CANDIDATE;
5052
5053         age_info = GET_PORT_AGE_INFO(priv);
5054         age_param = flow_dv_counter_idx_get_age(dev, counter);
5055         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5056                                          AGE_FREE, false, __ATOMIC_RELAXED,
5057                                          __ATOMIC_RELAXED)) {
5058                 /**
5059                  * We need the lock even it is age timeout,
5060                  * since counter may still in process.
5061                  */
5062                 rte_spinlock_lock(&age_info->aged_sl);
5063                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5064                 rte_spinlock_unlock(&age_info->aged_sl);
5065                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5066         }
5067 }
5068
5069 /**
5070  * Release a flow counter.
5071  *
5072  * @param[in] dev
5073  *   Pointer to the Ethernet device structure.
5074  * @param[in] counter
5075  *   Index to the counter handler.
5076  */
5077 static void
5078 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5079 {
5080         struct mlx5_priv *priv = dev->data->dev_private;
5081         struct mlx5_flow_counter_pool *pool = NULL;
5082         struct mlx5_flow_counter *cnt;
5083         enum mlx5_counter_type cnt_type;
5084
5085         if (!counter)
5086                 return;
5087         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5088         MLX5_ASSERT(pool);
5089         if (IS_SHARED_CNT(counter) &&
5090             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5091                 return;
5092         if (pool->is_aged)
5093                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5094         cnt->pool = pool;
5095         /*
5096          * Put the counter back to list to be updated in none fallback mode.
5097          * Currently, we are using two list alternately, while one is in query,
5098          * add the freed counter to the other list based on the pool query_gen
5099          * value. After query finishes, add counter the list to the global
5100          * container counter list. The list changes while query starts. In
5101          * this case, lock will not be needed as query callback and release
5102          * function both operate with the different list.
5103          *
5104          */
5105         if (!priv->sh->cmng.counter_fallback) {
5106                 rte_spinlock_lock(&pool->csl);
5107                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5108                 rte_spinlock_unlock(&pool->csl);
5109         } else {
5110                 cnt->dcs_when_free = cnt->dcs_when_active;
5111                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5112                                            MLX5_COUNTER_TYPE_ORIGIN;
5113                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5114                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5115                                   cnt, next);
5116                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5117         }
5118 }
5119
5120 /**
5121  * Verify the @p attributes will be correctly understood by the NIC and store
5122  * them in the @p flow if everything is correct.
5123  *
5124  * @param[in] dev
5125  *   Pointer to dev struct.
5126  * @param[in] attributes
5127  *   Pointer to flow attributes
5128  * @param[in] external
5129  *   This flow rule is created by request external to PMD.
5130  * @param[out] error
5131  *   Pointer to error structure.
5132  *
5133  * @return
5134  *   - 0 on success and non root table.
5135  *   - 1 on success and root table.
5136  *   - a negative errno value otherwise and rte_errno is set.
5137  */
5138 static int
5139 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5140                             const struct mlx5_flow_tunnel *tunnel,
5141                             const struct rte_flow_attr *attributes,
5142                             struct flow_grp_info grp_info,
5143                             struct rte_flow_error *error)
5144 {
5145         struct mlx5_priv *priv = dev->data->dev_private;
5146         uint32_t priority_max = priv->config.flow_prio - 1;
5147         int ret = 0;
5148
5149 #ifndef HAVE_MLX5DV_DR
5150         RTE_SET_USED(tunnel);
5151         RTE_SET_USED(grp_info);
5152         if (attributes->group)
5153                 return rte_flow_error_set(error, ENOTSUP,
5154                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5155                                           NULL,
5156                                           "groups are not supported");
5157 #else
5158         uint32_t table = 0;
5159
5160         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5161                                        grp_info, error);
5162         if (ret)
5163                 return ret;
5164         if (!table)
5165                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5166 #endif
5167         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5168             attributes->priority >= priority_max)
5169                 return rte_flow_error_set(error, ENOTSUP,
5170                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5171                                           NULL,
5172                                           "priority out of range");
5173         if (attributes->transfer) {
5174                 if (!priv->config.dv_esw_en)
5175                         return rte_flow_error_set
5176                                 (error, ENOTSUP,
5177                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5178                                  "E-Switch dr is not supported");
5179                 if (!(priv->representor || priv->master))
5180                         return rte_flow_error_set
5181                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5182                                  NULL, "E-Switch configuration can only be"
5183                                  " done by a master or a representor device");
5184                 if (attributes->egress)
5185                         return rte_flow_error_set
5186                                 (error, ENOTSUP,
5187                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5188                                  "egress is not supported");
5189         }
5190         if (!(attributes->egress ^ attributes->ingress))
5191                 return rte_flow_error_set(error, ENOTSUP,
5192                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5193                                           "must specify exactly one of "
5194                                           "ingress or egress");
5195         return ret;
5196 }
5197
5198 /**
5199  * Internal validation function. For validating both actions and items.
5200  *
5201  * @param[in] dev
5202  *   Pointer to the rte_eth_dev structure.
5203  * @param[in] attr
5204  *   Pointer to the flow attributes.
5205  * @param[in] items
5206  *   Pointer to the list of items.
5207  * @param[in] actions
5208  *   Pointer to the list of actions.
5209  * @param[in] external
5210  *   This flow rule is created by request external to PMD.
5211  * @param[in] hairpin
5212  *   Number of hairpin TX actions, 0 means classic flow.
5213  * @param[out] error
5214  *   Pointer to the error structure.
5215  *
5216  * @return
5217  *   0 on success, a negative errno value otherwise and rte_errno is set.
5218  */
5219 static int
5220 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5221                  const struct rte_flow_item items[],
5222                  const struct rte_flow_action actions[],
5223                  bool external, int hairpin, struct rte_flow_error *error)
5224 {
5225         int ret;
5226         uint64_t action_flags = 0;
5227         uint64_t item_flags = 0;
5228         uint64_t last_item = 0;
5229         uint8_t next_protocol = 0xff;
5230         uint16_t ether_type = 0;
5231         int actions_n = 0;
5232         uint8_t item_ipv6_proto = 0;
5233         const struct rte_flow_item *gre_item = NULL;
5234         const struct rte_flow_action_raw_decap *decap;
5235         const struct rte_flow_action_raw_encap *encap;
5236         const struct rte_flow_action_rss *rss;
5237         const struct rte_flow_item_tcp nic_tcp_mask = {
5238                 .hdr = {
5239                         .tcp_flags = 0xFF,
5240                         .src_port = RTE_BE16(UINT16_MAX),
5241                         .dst_port = RTE_BE16(UINT16_MAX),
5242                 }
5243         };
5244         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5245                 .hdr = {
5246                         .src_addr =
5247                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5248                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5249                         .dst_addr =
5250                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5251                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5252                         .vtc_flow = RTE_BE32(0xffffffff),
5253                         .proto = 0xff,
5254                         .hop_limits = 0xff,
5255                 },
5256                 .has_frag_ext = 1,
5257         };
5258         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5259                 .hdr = {
5260                         .common = {
5261                                 .u32 =
5262                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5263                                         .type = 0xFF,
5264                                         }).u32),
5265                         },
5266                         .dummy[0] = 0xffffffff,
5267                 },
5268         };
5269         struct mlx5_priv *priv = dev->data->dev_private;
5270         struct mlx5_dev_config *dev_conf = &priv->config;
5271         uint16_t queue_index = 0xFFFF;
5272         const struct rte_flow_item_vlan *vlan_m = NULL;
5273         int16_t rw_act_num = 0;
5274         uint64_t is_root;
5275         const struct mlx5_flow_tunnel *tunnel;
5276         struct flow_grp_info grp_info = {
5277                 .external = !!external,
5278                 .transfer = !!attr->transfer,
5279                 .fdb_def_rule = !!priv->fdb_def_rule,
5280         };
5281         const struct rte_eth_hairpin_conf *conf;
5282
5283         if (items == NULL)
5284                 return -1;
5285         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5286                 tunnel = flow_items_to_tunnel(items);
5287                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5288                                 MLX5_FLOW_ACTION_DECAP;
5289         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5290                 tunnel = flow_actions_to_tunnel(actions);
5291                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5292         } else {
5293                 tunnel = NULL;
5294         }
5295         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5296                                 (dev, tunnel, attr, items, actions);
5297         ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5298         if (ret < 0)
5299                 return ret;
5300         is_root = (uint64_t)ret;
5301         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5302                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5303                 int type = items->type;
5304
5305                 if (!mlx5_flow_os_item_supported(type))
5306                         return rte_flow_error_set(error, ENOTSUP,
5307                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5308                                                   NULL, "item not supported");
5309                 switch (type) {
5310                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5311                         if (items[0].type != (typeof(items[0].type))
5312                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5313                                 return rte_flow_error_set
5314                                                 (error, EINVAL,
5315                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5316                                                 NULL, "MLX5 private items "
5317                                                 "must be the first");
5318                         break;
5319                 case RTE_FLOW_ITEM_TYPE_VOID:
5320                         break;
5321                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5322                         ret = flow_dv_validate_item_port_id
5323                                         (dev, items, attr, item_flags, error);
5324                         if (ret < 0)
5325                                 return ret;
5326                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5327                         break;
5328                 case RTE_FLOW_ITEM_TYPE_ETH:
5329                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5330                                                           true, error);
5331                         if (ret < 0)
5332                                 return ret;
5333                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5334                                              MLX5_FLOW_LAYER_OUTER_L2;
5335                         if (items->mask != NULL && items->spec != NULL) {
5336                                 ether_type =
5337                                         ((const struct rte_flow_item_eth *)
5338                                          items->spec)->type;
5339                                 ether_type &=
5340                                         ((const struct rte_flow_item_eth *)
5341                                          items->mask)->type;
5342                                 ether_type = rte_be_to_cpu_16(ether_type);
5343                         } else {
5344                                 ether_type = 0;
5345                         }
5346                         break;
5347                 case RTE_FLOW_ITEM_TYPE_VLAN:
5348                         ret = flow_dv_validate_item_vlan(items, item_flags,
5349                                                          dev, error);
5350                         if (ret < 0)
5351                                 return ret;
5352                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5353                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5354                         if (items->mask != NULL && items->spec != NULL) {
5355                                 ether_type =
5356                                         ((const struct rte_flow_item_vlan *)
5357                                          items->spec)->inner_type;
5358                                 ether_type &=
5359                                         ((const struct rte_flow_item_vlan *)
5360                                          items->mask)->inner_type;
5361                                 ether_type = rte_be_to_cpu_16(ether_type);
5362                         } else {
5363                                 ether_type = 0;
5364                         }
5365                         /* Store outer VLAN mask for of_push_vlan action. */
5366                         if (!tunnel)
5367                                 vlan_m = items->mask;
5368                         break;
5369                 case RTE_FLOW_ITEM_TYPE_IPV4:
5370                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5371                                                   &item_flags, &tunnel);
5372                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5373                                                          last_item, ether_type,
5374                                                          error);
5375                         if (ret < 0)
5376                                 return ret;
5377                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5378                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5379                         if (items->mask != NULL &&
5380                             ((const struct rte_flow_item_ipv4 *)
5381                              items->mask)->hdr.next_proto_id) {
5382                                 next_protocol =
5383                                         ((const struct rte_flow_item_ipv4 *)
5384                                          (items->spec))->hdr.next_proto_id;
5385                                 next_protocol &=
5386                                         ((const struct rte_flow_item_ipv4 *)
5387                                          (items->mask))->hdr.next_proto_id;
5388                         } else {
5389                                 /* Reset for inner layer. */
5390                                 next_protocol = 0xff;
5391                         }
5392                         break;
5393                 case RTE_FLOW_ITEM_TYPE_IPV6:
5394                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5395                                                   &item_flags, &tunnel);
5396                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5397                                                            last_item,
5398                                                            ether_type,
5399                                                            &nic_ipv6_mask,
5400                                                            error);
5401                         if (ret < 0)
5402                                 return ret;
5403                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5404                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5405                         if (items->mask != NULL &&
5406                             ((const struct rte_flow_item_ipv6 *)
5407                              items->mask)->hdr.proto) {
5408                                 item_ipv6_proto =
5409                                         ((const struct rte_flow_item_ipv6 *)
5410                                          items->spec)->hdr.proto;
5411                                 next_protocol =
5412                                         ((const struct rte_flow_item_ipv6 *)
5413                                          items->spec)->hdr.proto;
5414                                 next_protocol &=
5415                                         ((const struct rte_flow_item_ipv6 *)
5416                                          items->mask)->hdr.proto;
5417                         } else {
5418                                 /* Reset for inner layer. */
5419                                 next_protocol = 0xff;
5420                         }
5421                         break;
5422                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5423                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5424                                                                   item_flags,
5425                                                                   error);
5426                         if (ret < 0)
5427                                 return ret;
5428                         last_item = tunnel ?
5429                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5430                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5431                         if (items->mask != NULL &&
5432                             ((const struct rte_flow_item_ipv6_frag_ext *)
5433                              items->mask)->hdr.next_header) {
5434                                 next_protocol =
5435                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5436                                  items->spec)->hdr.next_header;
5437                                 next_protocol &=
5438                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5439                                  items->mask)->hdr.next_header;
5440                         } else {
5441                                 /* Reset for inner layer. */
5442                                 next_protocol = 0xff;
5443                         }
5444                         break;
5445                 case RTE_FLOW_ITEM_TYPE_TCP:
5446                         ret = mlx5_flow_validate_item_tcp
5447                                                 (items, item_flags,
5448                                                  next_protocol,
5449                                                  &nic_tcp_mask,
5450                                                  error);
5451                         if (ret < 0)
5452                                 return ret;
5453                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5454                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5455                         break;
5456                 case RTE_FLOW_ITEM_TYPE_UDP:
5457                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5458                                                           next_protocol,
5459                                                           error);
5460                         if (ret < 0)
5461                                 return ret;
5462                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5463                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5464                         break;
5465                 case RTE_FLOW_ITEM_TYPE_GRE:
5466                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5467                                                           next_protocol, error);
5468                         if (ret < 0)
5469                                 return ret;
5470                         gre_item = items;
5471                         last_item = MLX5_FLOW_LAYER_GRE;
5472                         break;
5473                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5474                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5475                                                             next_protocol,
5476                                                             error);
5477                         if (ret < 0)
5478                                 return ret;
5479                         last_item = MLX5_FLOW_LAYER_NVGRE;
5480                         break;
5481                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5482                         ret = mlx5_flow_validate_item_gre_key
5483                                 (items, item_flags, gre_item, error);
5484                         if (ret < 0)
5485                                 return ret;
5486                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5487                         break;
5488                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5489                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5490                                                             error);
5491                         if (ret < 0)
5492                                 return ret;
5493                         last_item = MLX5_FLOW_LAYER_VXLAN;
5494                         break;
5495                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5496                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5497                                                                 item_flags, dev,
5498                                                                 error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5502                         break;
5503                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5504                         ret = mlx5_flow_validate_item_geneve(items,
5505                                                              item_flags, dev,
5506                                                              error);
5507                         if (ret < 0)
5508                                 return ret;
5509                         last_item = MLX5_FLOW_LAYER_GENEVE;
5510                         break;
5511                 case RTE_FLOW_ITEM_TYPE_MPLS:
5512                         ret = mlx5_flow_validate_item_mpls(dev, items,
5513                                                            item_flags,
5514                                                            last_item, error);
5515                         if (ret < 0)
5516                                 return ret;
5517                         last_item = MLX5_FLOW_LAYER_MPLS;
5518                         break;
5519
5520                 case RTE_FLOW_ITEM_TYPE_MARK:
5521                         ret = flow_dv_validate_item_mark(dev, items, attr,
5522                                                          error);
5523                         if (ret < 0)
5524                                 return ret;
5525                         last_item = MLX5_FLOW_ITEM_MARK;
5526                         break;
5527                 case RTE_FLOW_ITEM_TYPE_META:
5528                         ret = flow_dv_validate_item_meta(dev, items, attr,
5529                                                          error);
5530                         if (ret < 0)
5531                                 return ret;
5532                         last_item = MLX5_FLOW_ITEM_METADATA;
5533                         break;
5534                 case RTE_FLOW_ITEM_TYPE_ICMP:
5535                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5536                                                            next_protocol,
5537                                                            error);
5538                         if (ret < 0)
5539                                 return ret;
5540                         last_item = MLX5_FLOW_LAYER_ICMP;
5541                         break;
5542                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5543                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5544                                                             next_protocol,
5545                                                             error);
5546                         if (ret < 0)
5547                                 return ret;
5548                         item_ipv6_proto = IPPROTO_ICMPV6;
5549                         last_item = MLX5_FLOW_LAYER_ICMP6;
5550                         break;
5551                 case RTE_FLOW_ITEM_TYPE_TAG:
5552                         ret = flow_dv_validate_item_tag(dev, items,
5553                                                         attr, error);
5554                         if (ret < 0)
5555                                 return ret;
5556                         last_item = MLX5_FLOW_ITEM_TAG;
5557                         break;
5558                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5559                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5560                         break;
5561                 case RTE_FLOW_ITEM_TYPE_GTP:
5562                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5563                                                         error);
5564                         if (ret < 0)
5565                                 return ret;
5566                         last_item = MLX5_FLOW_LAYER_GTP;
5567                         break;
5568                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5569                         /* Capacity will be checked in the translate stage. */
5570                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5571                                                             last_item,
5572                                                             ether_type,
5573                                                             &nic_ecpri_mask,
5574                                                             error);
5575                         if (ret < 0)
5576                                 return ret;
5577                         last_item = MLX5_FLOW_LAYER_ECPRI;
5578                         break;
5579                 default:
5580                         return rte_flow_error_set(error, ENOTSUP,
5581                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5582                                                   NULL, "item not supported");
5583                 }
5584                 item_flags |= last_item;
5585         }
5586         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5587                 int type = actions->type;
5588
5589                 if (!mlx5_flow_os_action_supported(type))
5590                         return rte_flow_error_set(error, ENOTSUP,
5591                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5592                                                   actions,
5593                                                   "action not supported");
5594                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5595                         return rte_flow_error_set(error, ENOTSUP,
5596                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5597                                                   actions, "too many actions");
5598                 switch (type) {
5599                 case RTE_FLOW_ACTION_TYPE_VOID:
5600                         break;
5601                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5602                         ret = flow_dv_validate_action_port_id(dev,
5603                                                               action_flags,
5604                                                               actions,
5605                                                               attr,
5606                                                               error);
5607                         if (ret)
5608                                 return ret;
5609                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5610                         ++actions_n;
5611                         break;
5612                 case RTE_FLOW_ACTION_TYPE_FLAG:
5613                         ret = flow_dv_validate_action_flag(dev, action_flags,
5614                                                            attr, error);
5615                         if (ret < 0)
5616                                 return ret;
5617                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5618                                 /* Count all modify-header actions as one. */
5619                                 if (!(action_flags &
5620                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5621                                         ++actions_n;
5622                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5623                                                 MLX5_FLOW_ACTION_MARK_EXT;
5624                         } else {
5625                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5626                                 ++actions_n;
5627                         }
5628                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5629                         break;
5630                 case RTE_FLOW_ACTION_TYPE_MARK:
5631                         ret = flow_dv_validate_action_mark(dev, actions,
5632                                                            action_flags,
5633                                                            attr, error);
5634                         if (ret < 0)
5635                                 return ret;
5636                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5637                                 /* Count all modify-header actions as one. */
5638                                 if (!(action_flags &
5639                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5640                                         ++actions_n;
5641                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5642                                                 MLX5_FLOW_ACTION_MARK_EXT;
5643                         } else {
5644                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5645                                 ++actions_n;
5646                         }
5647                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5648                         break;
5649                 case RTE_FLOW_ACTION_TYPE_SET_META:
5650                         ret = flow_dv_validate_action_set_meta(dev, actions,
5651                                                                action_flags,
5652                                                                attr, error);
5653                         if (ret < 0)
5654                                 return ret;
5655                         /* Count all modify-header actions as one action. */
5656                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5657                                 ++actions_n;
5658                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5659                         rw_act_num += MLX5_ACT_NUM_SET_META;
5660                         break;
5661                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5662                         ret = flow_dv_validate_action_set_tag(dev, actions,
5663                                                               action_flags,
5664                                                               attr, error);
5665                         if (ret < 0)
5666                                 return ret;
5667                         /* Count all modify-header actions as one action. */
5668                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5669                                 ++actions_n;
5670                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5671                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5672                         break;
5673                 case RTE_FLOW_ACTION_TYPE_DROP:
5674                         ret = mlx5_flow_validate_action_drop(action_flags,
5675                                                              attr, error);
5676                         if (ret < 0)
5677                                 return ret;
5678                         action_flags |= MLX5_FLOW_ACTION_DROP;
5679                         ++actions_n;
5680                         break;
5681                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5682                         ret = mlx5_flow_validate_action_queue(actions,
5683                                                               action_flags, dev,
5684                                                               attr, error);
5685                         if (ret < 0)
5686                                 return ret;
5687                         queue_index = ((const struct rte_flow_action_queue *)
5688                                                         (actions->conf))->index;
5689                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5690                         ++actions_n;
5691                         break;
5692                 case RTE_FLOW_ACTION_TYPE_RSS:
5693                         rss = actions->conf;
5694                         ret = mlx5_flow_validate_action_rss(actions,
5695                                                             action_flags, dev,
5696                                                             attr, item_flags,
5697                                                             error);
5698                         if (ret < 0)
5699                                 return ret;
5700                         if (rss != NULL && rss->queue_num)
5701                                 queue_index = rss->queue[0];
5702                         action_flags |= MLX5_FLOW_ACTION_RSS;
5703                         ++actions_n;
5704                         break;
5705                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5706                         ret =
5707                         mlx5_flow_validate_action_default_miss(action_flags,
5708                                         attr, error);
5709                         if (ret < 0)
5710                                 return ret;
5711                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5712                         ++actions_n;
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_COUNT:
5715                         ret = flow_dv_validate_action_count(dev, error);
5716                         if (ret < 0)
5717                                 return ret;
5718                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5719                         ++actions_n;
5720                         break;
5721                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5722                         if (flow_dv_validate_action_pop_vlan(dev,
5723                                                              action_flags,
5724                                                              actions,
5725                                                              item_flags, attr,
5726                                                              error))
5727                                 return -rte_errno;
5728                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5729                         ++actions_n;
5730                         break;
5731                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5732                         ret = flow_dv_validate_action_push_vlan(dev,
5733                                                                 action_flags,
5734                                                                 vlan_m,
5735                                                                 actions, attr,
5736                                                                 error);
5737                         if (ret < 0)
5738                                 return ret;
5739                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5740                         ++actions_n;
5741                         break;
5742                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5743                         ret = flow_dv_validate_action_set_vlan_pcp
5744                                                 (action_flags, actions, error);
5745                         if (ret < 0)
5746                                 return ret;
5747                         /* Count PCP with push_vlan command. */
5748                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5749                         break;
5750                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5751                         ret = flow_dv_validate_action_set_vlan_vid
5752                                                 (item_flags, action_flags,
5753                                                  actions, error);
5754                         if (ret < 0)
5755                                 return ret;
5756                         /* Count VID with push_vlan command. */
5757                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5758                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5759                         break;
5760                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5761                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5762                         ret = flow_dv_validate_action_l2_encap(dev,
5763                                                                action_flags,
5764                                                                actions, attr,
5765                                                                error);
5766                         if (ret < 0)
5767                                 return ret;
5768                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5769                         ++actions_n;
5770                         break;
5771                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5772                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5773                         ret = flow_dv_validate_action_decap(dev, action_flags,
5774                                                             attr, error);
5775                         if (ret < 0)
5776                                 return ret;
5777                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5778                         ++actions_n;
5779                         break;
5780                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5781                         ret = flow_dv_validate_action_raw_encap_decap
5782                                 (dev, NULL, actions->conf, attr, &action_flags,
5783                                  &actions_n, error);
5784                         if (ret < 0)
5785                                 return ret;
5786                         break;
5787                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5788                         decap = actions->conf;
5789                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5790                                 ;
5791                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5792                                 encap = NULL;
5793                                 actions--;
5794                         } else {
5795                                 encap = actions->conf;
5796                         }
5797                         ret = flow_dv_validate_action_raw_encap_decap
5798                                            (dev,
5799                                             decap ? decap : &empty_decap, encap,
5800                                             attr, &action_flags, &actions_n,
5801                                             error);
5802                         if (ret < 0)
5803                                 return ret;
5804                         break;
5805                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5806                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5807                         ret = flow_dv_validate_action_modify_mac(action_flags,
5808                                                                  actions,
5809                                                                  item_flags,
5810                                                                  error);
5811                         if (ret < 0)
5812                                 return ret;
5813                         /* Count all modify-header actions as one action. */
5814                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5815                                 ++actions_n;
5816                         action_flags |= actions->type ==
5817                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5818                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5819                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5820                         /*
5821                          * Even if the source and destination MAC addresses have
5822                          * overlap in the header with 4B alignment, the convert
5823                          * function will handle them separately and 4 SW actions
5824                          * will be created. And 2 actions will be added each
5825                          * time no matter how many bytes of address will be set.
5826                          */
5827                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5828                         break;
5829                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5830                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5831                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5832                                                                   actions,
5833                                                                   item_flags,
5834                                                                   error);
5835                         if (ret < 0)
5836                                 return ret;
5837                         /* Count all modify-header actions as one action. */
5838                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5839                                 ++actions_n;
5840                         action_flags |= actions->type ==
5841                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5842                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5843                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5844                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5845                         break;
5846                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5847                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5848                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5849                                                                   actions,
5850                                                                   item_flags,
5851                                                                   error);
5852                         if (ret < 0)
5853                                 return ret;
5854                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5855                                 return rte_flow_error_set(error, ENOTSUP,
5856                                         RTE_FLOW_ERROR_TYPE_ACTION,
5857                                         actions,
5858                                         "Can't change header "
5859                                         "with ICMPv6 proto");
5860                         /* Count all modify-header actions as one action. */
5861                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5862                                 ++actions_n;
5863                         action_flags |= actions->type ==
5864                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5865                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5866                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5867                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5868                         break;
5869                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5870                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5871                         ret = flow_dv_validate_action_modify_tp(action_flags,
5872                                                                 actions,
5873                                                                 item_flags,
5874                                                                 error);
5875                         if (ret < 0)
5876                                 return ret;
5877                         /* Count all modify-header actions as one action. */
5878                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5879                                 ++actions_n;
5880                         action_flags |= actions->type ==
5881                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5882                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5883                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5884                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5885                         break;
5886                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5887                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5888                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5889                                                                  actions,
5890                                                                  item_flags,
5891                                                                  error);
5892                         if (ret < 0)
5893                                 return ret;
5894                         /* Count all modify-header actions as one action. */
5895                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5896                                 ++actions_n;
5897                         action_flags |= actions->type ==
5898                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5899                                                 MLX5_FLOW_ACTION_SET_TTL :
5900                                                 MLX5_FLOW_ACTION_DEC_TTL;
5901                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5902                         break;
5903                 case RTE_FLOW_ACTION_TYPE_JUMP:
5904                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5905                                                            action_flags,
5906                                                            attr, external,
5907                                                            error);
5908                         if (ret)
5909                                 return ret;
5910                         ++actions_n;
5911                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5912                         break;
5913                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5914                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5915                         ret = flow_dv_validate_action_modify_tcp_seq
5916                                                                 (action_flags,
5917                                                                  actions,
5918                                                                  item_flags,
5919                                                                  error);
5920                         if (ret < 0)
5921                                 return ret;
5922                         /* Count all modify-header actions as one action. */
5923                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5924                                 ++actions_n;
5925                         action_flags |= actions->type ==
5926                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5927                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5928                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5929                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5930                         break;
5931                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5932                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5933                         ret = flow_dv_validate_action_modify_tcp_ack
5934                                                                 (action_flags,
5935                                                                  actions,
5936                                                                  item_flags,
5937                                                                  error);
5938                         if (ret < 0)
5939                                 return ret;
5940                         /* Count all modify-header actions as one action. */
5941                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5942                                 ++actions_n;
5943                         action_flags |= actions->type ==
5944                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5945                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5946                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5947                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5948                         break;
5949                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5950                         break;
5951                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5952                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5953                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5954                         break;
5955                 case RTE_FLOW_ACTION_TYPE_METER:
5956                         ret = mlx5_flow_validate_action_meter(dev,
5957                                                               action_flags,
5958                                                               actions, attr,
5959                                                               error);
5960                         if (ret < 0)
5961                                 return ret;
5962                         action_flags |= MLX5_FLOW_ACTION_METER;
5963                         ++actions_n;
5964                         /* Meter action will add one more TAG action. */
5965                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5966                         break;
5967                 case RTE_FLOW_ACTION_TYPE_AGE:
5968                         ret = flow_dv_validate_action_age(action_flags,
5969                                                           actions, dev,
5970                                                           error);
5971                         if (ret < 0)
5972                                 return ret;
5973                         action_flags |= MLX5_FLOW_ACTION_AGE;
5974                         ++actions_n;
5975                         break;
5976                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5977                         ret = flow_dv_validate_action_modify_ipv4_dscp
5978                                                          (action_flags,
5979                                                           actions,
5980                                                           item_flags,
5981                                                           error);
5982                         if (ret < 0)
5983                                 return ret;
5984                         /* Count all modify-header actions as one action. */
5985                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5986                                 ++actions_n;
5987                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5988                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5989                         break;
5990                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5991                         ret = flow_dv_validate_action_modify_ipv6_dscp
5992                                                                 (action_flags,
5993                                                                  actions,
5994                                                                  item_flags,
5995                                                                  error);
5996                         if (ret < 0)
5997                                 return ret;
5998                         /* Count all modify-header actions as one action. */
5999                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6000                                 ++actions_n;
6001                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6002                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6003                         break;
6004                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6005                         ret = flow_dv_validate_action_sample(action_flags,
6006                                                              actions, dev,
6007                                                              attr, error);
6008                         if (ret < 0)
6009                                 return ret;
6010                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6011                         ++actions_n;
6012                         break;
6013                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6014                         if (actions[0].type != (typeof(actions[0].type))
6015                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6016                                 return rte_flow_error_set
6017                                                 (error, EINVAL,
6018                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6019                                                 NULL, "MLX5 private action "
6020                                                 "must be the first");
6021
6022                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6023                         break;
6024                 default:
6025                         return rte_flow_error_set(error, ENOTSUP,
6026                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6027                                                   actions,
6028                                                   "action not supported");
6029                 }
6030         }
6031         /*
6032          * Validate actions in flow rules
6033          * - Explicit decap action is prohibited by the tunnel offload API.
6034          * - Drop action in tunnel steer rule is prohibited by the API.
6035          * - Application cannot use MARK action because it's value can mask
6036          *   tunnel default miss nitification.
6037          * - JUMP in tunnel match rule has no support in current PMD
6038          *   implementation.
6039          * - TAG & META are reserved for future uses.
6040          */
6041         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6042                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6043                                             MLX5_FLOW_ACTION_MARK     |
6044                                             MLX5_FLOW_ACTION_SET_TAG  |
6045                                             MLX5_FLOW_ACTION_SET_META |
6046                                             MLX5_FLOW_ACTION_DROP;
6047
6048                 if (action_flags & bad_actions_mask)
6049                         return rte_flow_error_set
6050                                         (error, EINVAL,
6051                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6052                                         "Invalid RTE action in tunnel "
6053                                         "set decap rule");
6054                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6055                         return rte_flow_error_set
6056                                         (error, EINVAL,
6057                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6058                                         "tunnel set decap rule must terminate "
6059                                         "with JUMP");
6060                 if (!attr->ingress)
6061                         return rte_flow_error_set
6062                                         (error, EINVAL,
6063                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6064                                         "tunnel flows for ingress traffic only");
6065         }
6066         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6067                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6068                                             MLX5_FLOW_ACTION_MARK    |
6069                                             MLX5_FLOW_ACTION_SET_TAG |
6070                                             MLX5_FLOW_ACTION_SET_META;
6071
6072                 if (action_flags & bad_actions_mask)
6073                         return rte_flow_error_set
6074                                         (error, EINVAL,
6075                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6076                                         "Invalid RTE action in tunnel "
6077                                         "set match rule");
6078         }
6079         /*
6080          * Validate the drop action mutual exclusion with other actions.
6081          * Drop action is mutually-exclusive with any other action, except for
6082          * Count action.
6083          */
6084         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6085             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6086                 return rte_flow_error_set(error, EINVAL,
6087                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6088                                           "Drop action is mutually-exclusive "
6089                                           "with any other action, except for "
6090                                           "Count action");
6091         /* Eswitch has few restrictions on using items and actions */
6092         if (attr->transfer) {
6093                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6094                     action_flags & MLX5_FLOW_ACTION_FLAG)
6095                         return rte_flow_error_set(error, ENOTSUP,
6096                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6097                                                   NULL,
6098                                                   "unsupported action FLAG");
6099                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6100                     action_flags & MLX5_FLOW_ACTION_MARK)
6101                         return rte_flow_error_set(error, ENOTSUP,
6102                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6103                                                   NULL,
6104                                                   "unsupported action MARK");
6105                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6106                         return rte_flow_error_set(error, ENOTSUP,
6107                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6108                                                   NULL,
6109                                                   "unsupported action QUEUE");
6110                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6111                         return rte_flow_error_set(error, ENOTSUP,
6112                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6113                                                   NULL,
6114                                                   "unsupported action RSS");
6115                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6116                         return rte_flow_error_set(error, EINVAL,
6117                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6118                                                   actions,
6119                                                   "no fate action is found");
6120         } else {
6121                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6122                         return rte_flow_error_set(error, EINVAL,
6123                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6124                                                   actions,
6125                                                   "no fate action is found");
6126         }
6127         /*
6128          * Continue validation for Xcap and VLAN actions.
6129          * If hairpin is working in explicit TX rule mode, there is no actions
6130          * splitting and the validation of hairpin ingress flow should be the
6131          * same as other standard flows.
6132          */
6133         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6134                              MLX5_FLOW_VLAN_ACTIONS)) &&
6135             (queue_index == 0xFFFF ||
6136              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6137              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6138              conf->tx_explicit != 0))) {
6139                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6140                     MLX5_FLOW_XCAP_ACTIONS)
6141                         return rte_flow_error_set(error, ENOTSUP,
6142                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6143                                                   NULL, "encap and decap "
6144                                                   "combination aren't supported");
6145                 if (!attr->transfer && attr->ingress) {
6146                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6147                                 return rte_flow_error_set
6148                                                 (error, ENOTSUP,
6149                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6150                                                  NULL, "encap is not supported"
6151                                                  " for ingress traffic");
6152                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6153                                 return rte_flow_error_set
6154                                                 (error, ENOTSUP,
6155                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6156                                                  NULL, "push VLAN action not "
6157                                                  "supported for ingress");
6158                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6159                                         MLX5_FLOW_VLAN_ACTIONS)
6160                                 return rte_flow_error_set
6161                                                 (error, ENOTSUP,
6162                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6163                                                  NULL, "no support for "
6164                                                  "multiple VLAN actions");
6165                 }
6166         }
6167         /*
6168          * Hairpin flow will add one more TAG action in TX implicit mode.
6169          * In TX explicit mode, there will be no hairpin flow ID.
6170          */
6171         if (hairpin > 0)
6172                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6173         /* extra metadata enabled: one more TAG action will be add. */
6174         if (dev_conf->dv_flow_en &&
6175             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6176             mlx5_flow_ext_mreg_supported(dev))
6177                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6178         if ((uint32_t)rw_act_num >
6179                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6180                 return rte_flow_error_set(error, ENOTSUP,
6181                                           RTE_FLOW_ERROR_TYPE_ACTION,
6182                                           NULL, "too many header modify"
6183                                           " actions to support");
6184         }
6185         return 0;
6186 }
6187
6188 /**
6189  * Internal preparation function. Allocates the DV flow size,
6190  * this size is constant.
6191  *
6192  * @param[in] dev
6193  *   Pointer to the rte_eth_dev structure.
6194  * @param[in] attr
6195  *   Pointer to the flow attributes.
6196  * @param[in] items
6197  *   Pointer to the list of items.
6198  * @param[in] actions
6199  *   Pointer to the list of actions.
6200  * @param[out] error
6201  *   Pointer to the error structure.
6202  *
6203  * @return
6204  *   Pointer to mlx5_flow object on success,
6205  *   otherwise NULL and rte_errno is set.
6206  */
6207 static struct mlx5_flow *
6208 flow_dv_prepare(struct rte_eth_dev *dev,
6209                 const struct rte_flow_attr *attr __rte_unused,
6210                 const struct rte_flow_item items[] __rte_unused,
6211                 const struct rte_flow_action actions[] __rte_unused,
6212                 struct rte_flow_error *error)
6213 {
6214         uint32_t handle_idx = 0;
6215         struct mlx5_flow *dev_flow;
6216         struct mlx5_flow_handle *dev_handle;
6217         struct mlx5_priv *priv = dev->data->dev_private;
6218         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6219
6220         MLX5_ASSERT(wks);
6221         /* In case of corrupting the memory. */
6222         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6223                 rte_flow_error_set(error, ENOSPC,
6224                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6225                                    "not free temporary device flow");
6226                 return NULL;
6227         }
6228         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6229                                    &handle_idx);
6230         if (!dev_handle) {
6231                 rte_flow_error_set(error, ENOMEM,
6232                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6233                                    "not enough memory to create flow handle");
6234                 return NULL;
6235         }
6236         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6237         dev_flow = &wks->flows[wks->flow_idx++];
6238         dev_flow->handle = dev_handle;
6239         dev_flow->handle_idx = handle_idx;
6240         /*
6241          * In some old rdma-core releases, before continuing, a check of the
6242          * length of matching parameter will be done at first. It needs to use
6243          * the length without misc4 param. If the flow has misc4 support, then
6244          * the length needs to be adjusted accordingly. Each param member is
6245          * aligned with a 64B boundary naturally.
6246          */
6247         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6248                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6249         /*
6250          * The matching value needs to be cleared to 0 before using. In the
6251          * past, it will be automatically cleared when using rte_*alloc
6252          * API. The time consumption will be almost the same as before.
6253          */
6254         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6255         dev_flow->ingress = attr->ingress;
6256         dev_flow->dv.transfer = attr->transfer;
6257         return dev_flow;
6258 }
6259
6260 #ifdef RTE_LIBRTE_MLX5_DEBUG
6261 /**
6262  * Sanity check for match mask and value. Similar to check_valid_spec() in
6263  * kernel driver. If unmasked bit is present in value, it returns failure.
6264  *
6265  * @param match_mask
6266  *   pointer to match mask buffer.
6267  * @param match_value
6268  *   pointer to match value buffer.
6269  *
6270  * @return
6271  *   0 if valid, -EINVAL otherwise.
6272  */
6273 static int
6274 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6275 {
6276         uint8_t *m = match_mask;
6277         uint8_t *v = match_value;
6278         unsigned int i;
6279
6280         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6281                 if (v[i] & ~m[i]) {
6282                         DRV_LOG(ERR,
6283                                 "match_value differs from match_criteria"
6284                                 " %p[%u] != %p[%u]",
6285                                 match_value, i, match_mask, i);
6286                         return -EINVAL;
6287                 }
6288         }
6289         return 0;
6290 }
6291 #endif
6292
6293 /**
6294  * Add match of ip_version.
6295  *
6296  * @param[in] group
6297  *   Flow group.
6298  * @param[in] headers_v
6299  *   Values header pointer.
6300  * @param[in] headers_m
6301  *   Masks header pointer.
6302  * @param[in] ip_version
6303  *   The IP version to set.
6304  */
6305 static inline void
6306 flow_dv_set_match_ip_version(uint32_t group,
6307                              void *headers_v,
6308                              void *headers_m,
6309                              uint8_t ip_version)
6310 {
6311         if (group == 0)
6312                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6313         else
6314                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6315                          ip_version);
6316         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6317         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6318         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6319 }
6320
6321 /**
6322  * Add Ethernet item to matcher and to the value.
6323  *
6324  * @param[in, out] matcher
6325  *   Flow matcher.
6326  * @param[in, out] key
6327  *   Flow matcher value.
6328  * @param[in] item
6329  *   Flow pattern to translate.
6330  * @param[in] inner
6331  *   Item is inner pattern.
6332  */
6333 static void
6334 flow_dv_translate_item_eth(void *matcher, void *key,
6335                            const struct rte_flow_item *item, int inner,
6336                            uint32_t group)
6337 {
6338         const struct rte_flow_item_eth *eth_m = item->mask;
6339         const struct rte_flow_item_eth *eth_v = item->spec;
6340         const struct rte_flow_item_eth nic_mask = {
6341                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6342                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6343                 .type = RTE_BE16(0xffff),
6344                 .has_vlan = 0,
6345         };
6346         void *hdrs_m;
6347         void *hdrs_v;
6348         char *l24_v;
6349         unsigned int i;
6350
6351         if (!eth_v)
6352                 return;
6353         if (!eth_m)
6354                 eth_m = &nic_mask;
6355         if (inner) {
6356                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6357                                          inner_headers);
6358                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6359         } else {
6360                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6361                                          outer_headers);
6362                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6363         }
6364         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6365                &eth_m->dst, sizeof(eth_m->dst));
6366         /* The value must be in the range of the mask. */
6367         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6368         for (i = 0; i < sizeof(eth_m->dst); ++i)
6369                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6370         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6371                &eth_m->src, sizeof(eth_m->src));
6372         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6373         /* The value must be in the range of the mask. */
6374         for (i = 0; i < sizeof(eth_m->dst); ++i)
6375                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6376         /*
6377          * HW supports match on one Ethertype, the Ethertype following the last
6378          * VLAN tag of the packet (see PRM).
6379          * Set match on ethertype only if ETH header is not followed by VLAN.
6380          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6381          * ethertype, and use ip_version field instead.
6382          * eCPRI over Ether layer will use type value 0xAEFE.
6383          */
6384         if (eth_m->type == 0xFFFF) {
6385                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6386                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6387                 switch (eth_v->type) {
6388                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6389                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6390                         return;
6391                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6392                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6393                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6394                         return;
6395                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6396                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6397                         return;
6398                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6399                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6400                         return;
6401                 default:
6402                         break;
6403                 }
6404         }
6405         if (eth_m->has_vlan) {
6406                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6407                 if (eth_v->has_vlan) {
6408                         /*
6409                          * Here, when also has_more_vlan field in VLAN item is
6410                          * not set, only single-tagged packets will be matched.
6411                          */
6412                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6413                         return;
6414                 }
6415         }
6416         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6417                  rte_be_to_cpu_16(eth_m->type));
6418         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6419         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6420 }
6421
6422 /**
6423  * Add VLAN item to matcher and to the value.
6424  *
6425  * @param[in, out] dev_flow
6426  *   Flow descriptor.
6427  * @param[in, out] matcher
6428  *   Flow matcher.
6429  * @param[in, out] key
6430  *   Flow matcher value.
6431  * @param[in] item
6432  *   Flow pattern to translate.
6433  * @param[in] inner
6434  *   Item is inner pattern.
6435  */
6436 static void
6437 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6438                             void *matcher, void *key,
6439                             const struct rte_flow_item *item,
6440                             int inner, uint32_t group)
6441 {
6442         const struct rte_flow_item_vlan *vlan_m = item->mask;
6443         const struct rte_flow_item_vlan *vlan_v = item->spec;
6444         void *hdrs_m;
6445         void *hdrs_v;
6446         uint16_t tci_m;
6447         uint16_t tci_v;
6448
6449         if (inner) {
6450                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6451                                          inner_headers);
6452                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6453         } else {
6454                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6455                                          outer_headers);
6456                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6457                 /*
6458                  * This is workaround, masks are not supported,
6459                  * and pre-validated.
6460                  */
6461                 if (vlan_v)
6462                         dev_flow->handle->vf_vlan.tag =
6463                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6464         }
6465         /*
6466          * When VLAN item exists in flow, mark packet as tagged,
6467          * even if TCI is not specified.
6468          */
6469         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6470                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6471                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6472         }
6473         if (!vlan_v)
6474                 return;
6475         if (!vlan_m)
6476                 vlan_m = &rte_flow_item_vlan_mask;
6477         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6478         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6479         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6480         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6481         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6482         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6483         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6484         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6485         /*
6486          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6487          * ethertype, and use ip_version field instead.
6488          */
6489         if (vlan_m->inner_type == 0xFFFF) {
6490                 switch (vlan_v->inner_type) {
6491                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6492                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6493                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6494                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6495                         return;
6496                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6497                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6498                         return;
6499                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6500                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6501                         return;
6502                 default:
6503                         break;
6504                 }
6505         }
6506         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6507                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6508                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6509                 /* Only one vlan_tag bit can be set. */
6510                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6511                 return;
6512         }
6513         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6514                  rte_be_to_cpu_16(vlan_m->inner_type));
6515         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6516                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6517 }
6518
6519 /**
6520  * Add IPV4 item to matcher and to the value.
6521  *
6522  * @param[in, out] matcher
6523  *   Flow matcher.
6524  * @param[in, out] key
6525  *   Flow matcher value.
6526  * @param[in] item
6527  *   Flow pattern to translate.
6528  * @param[in] inner
6529  *   Item is inner pattern.
6530  * @param[in] group
6531  *   The group to insert the rule.
6532  */
6533 static void
6534 flow_dv_translate_item_ipv4(void *matcher, void *key,
6535                             const struct rte_flow_item *item,
6536                             int inner, uint32_t group)
6537 {
6538         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6539         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6540         const struct rte_flow_item_ipv4 nic_mask = {
6541                 .hdr = {
6542                         .src_addr = RTE_BE32(0xffffffff),
6543                         .dst_addr = RTE_BE32(0xffffffff),
6544                         .type_of_service = 0xff,
6545                         .next_proto_id = 0xff,
6546                         .time_to_live = 0xff,
6547                 },
6548         };
6549         void *headers_m;
6550         void *headers_v;
6551         char *l24_m;
6552         char *l24_v;
6553         uint8_t tos;
6554
6555         if (inner) {
6556                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6557                                          inner_headers);
6558                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6559         } else {
6560                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6561                                          outer_headers);
6562                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6563         }
6564         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6565         if (!ipv4_v)
6566                 return;
6567         if (!ipv4_m)
6568                 ipv4_m = &nic_mask;
6569         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6570                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6571         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6572                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6573         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6574         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6575         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6576                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6577         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6578                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6579         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6580         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6581         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6582         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6583                  ipv4_m->hdr.type_of_service);
6584         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6585         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6586                  ipv4_m->hdr.type_of_service >> 2);
6587         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6588         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6589                  ipv4_m->hdr.next_proto_id);
6590         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6591                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6592         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6593                  ipv4_m->hdr.time_to_live);
6594         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6595                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6596         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6597                  !!(ipv4_m->hdr.fragment_offset));
6598         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6599                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6600 }
6601
6602 /**
6603  * Add IPV6 item to matcher and to the value.
6604  *
6605  * @param[in, out] matcher
6606  *   Flow matcher.
6607  * @param[in, out] key
6608  *   Flow matcher value.
6609  * @param[in] item
6610  *   Flow pattern to translate.
6611  * @param[in] inner
6612  *   Item is inner pattern.
6613  * @param[in] group
6614  *   The group to insert the rule.
6615  */
6616 static void
6617 flow_dv_translate_item_ipv6(void *matcher, void *key,
6618                             const struct rte_flow_item *item,
6619                             int inner, uint32_t group)
6620 {
6621         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6622         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6623         const struct rte_flow_item_ipv6 nic_mask = {
6624                 .hdr = {
6625                         .src_addr =
6626                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6627                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6628                         .dst_addr =
6629                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6630                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6631                         .vtc_flow = RTE_BE32(0xffffffff),
6632                         .proto = 0xff,
6633                         .hop_limits = 0xff,
6634                 },
6635         };
6636         void *headers_m;
6637         void *headers_v;
6638         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6639         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6640         char *l24_m;
6641         char *l24_v;
6642         uint32_t vtc_m;
6643         uint32_t vtc_v;
6644         int i;
6645         int size;
6646
6647         if (inner) {
6648                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6649                                          inner_headers);
6650                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6651         } else {
6652                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6653                                          outer_headers);
6654                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6655         }
6656         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6657         if (!ipv6_v)
6658                 return;
6659         if (!ipv6_m)
6660                 ipv6_m = &nic_mask;
6661         size = sizeof(ipv6_m->hdr.dst_addr);
6662         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6663                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6664         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6665                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6666         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6667         for (i = 0; i < size; ++i)
6668                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6669         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6670                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6671         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6672                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6673         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6674         for (i = 0; i < size; ++i)
6675                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6676         /* TOS. */
6677         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6678         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6679         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6680         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6681         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6682         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6683         /* Label. */
6684         if (inner) {
6685                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6686                          vtc_m);
6687                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6688                          vtc_v);
6689         } else {
6690                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6691                          vtc_m);
6692                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6693                          vtc_v);
6694         }
6695         /* Protocol. */
6696         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6697                  ipv6_m->hdr.proto);
6698         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6699                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6700         /* Hop limit. */
6701         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6702                  ipv6_m->hdr.hop_limits);
6703         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6704                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6706                  !!(ipv6_m->has_frag_ext));
6707         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6708                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6709 }
6710
6711 /**
6712  * Add IPV6 fragment extension item to matcher and to the value.
6713  *
6714  * @param[in, out] matcher
6715  *   Flow matcher.
6716  * @param[in, out] key
6717  *   Flow matcher value.
6718  * @param[in] item
6719  *   Flow pattern to translate.
6720  * @param[in] inner
6721  *   Item is inner pattern.
6722  */
6723 static void
6724 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6725                                      const struct rte_flow_item *item,
6726                                      int inner)
6727 {
6728         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6729         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6730         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6731                 .hdr = {
6732                         .next_header = 0xff,
6733                         .frag_data = RTE_BE16(0xffff),
6734                 },
6735         };
6736         void *headers_m;
6737         void *headers_v;
6738
6739         if (inner) {
6740                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6741                                          inner_headers);
6742                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6743         } else {
6744                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6745                                          outer_headers);
6746                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6747         }
6748         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6749         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6750         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6751         if (!ipv6_frag_ext_v)
6752                 return;
6753         if (!ipv6_frag_ext_m)
6754                 ipv6_frag_ext_m = &nic_mask;
6755         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6756                  ipv6_frag_ext_m->hdr.next_header);
6757         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6758                  ipv6_frag_ext_v->hdr.next_header &
6759                  ipv6_frag_ext_m->hdr.next_header);
6760 }
6761
6762 /**
6763  * Add TCP item to matcher and to the value.
6764  *
6765  * @param[in, out] matcher
6766  *   Flow matcher.
6767  * @param[in, out] key
6768  *   Flow matcher value.
6769  * @param[in] item
6770  *   Flow pattern to translate.
6771  * @param[in] inner
6772  *   Item is inner pattern.
6773  */
6774 static void
6775 flow_dv_translate_item_tcp(void *matcher, void *key,
6776                            const struct rte_flow_item *item,
6777                            int inner)
6778 {
6779         const struct rte_flow_item_tcp *tcp_m = item->mask;
6780         const struct rte_flow_item_tcp *tcp_v = item->spec;
6781         void *headers_m;
6782         void *headers_v;
6783
6784         if (inner) {
6785                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6786                                          inner_headers);
6787                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6788         } else {
6789                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6790                                          outer_headers);
6791                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6792         }
6793         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6794         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6795         if (!tcp_v)
6796                 return;
6797         if (!tcp_m)
6798                 tcp_m = &rte_flow_item_tcp_mask;
6799         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6800                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6801         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6802                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6803         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6804                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6805         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6806                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6807         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6808                  tcp_m->hdr.tcp_flags);
6809         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6810                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6811 }
6812
6813 /**
6814  * Add UDP item to matcher and to the value.
6815  *
6816  * @param[in, out] matcher
6817  *   Flow matcher.
6818  * @param[in, out] key
6819  *   Flow matcher value.
6820  * @param[in] item
6821  *   Flow pattern to translate.
6822  * @param[in] inner
6823  *   Item is inner pattern.
6824  */
6825 static void
6826 flow_dv_translate_item_udp(void *matcher, void *key,
6827                            const struct rte_flow_item *item,
6828                            int inner)
6829 {
6830         const struct rte_flow_item_udp *udp_m = item->mask;
6831         const struct rte_flow_item_udp *udp_v = item->spec;
6832         void *headers_m;
6833         void *headers_v;
6834
6835         if (inner) {
6836                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6837                                          inner_headers);
6838                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6839         } else {
6840                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6841                                          outer_headers);
6842                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6843         }
6844         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6845         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6846         if (!udp_v)
6847                 return;
6848         if (!udp_m)
6849                 udp_m = &rte_flow_item_udp_mask;
6850         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6851                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6852         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6853                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6854         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6855                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6856         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6857                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6858 }
6859
6860 /**
6861  * Add GRE optional Key item to matcher and to the value.
6862  *
6863  * @param[in, out] matcher
6864  *   Flow matcher.
6865  * @param[in, out] key
6866  *   Flow matcher value.
6867  * @param[in] item
6868  *   Flow pattern to translate.
6869  * @param[in] inner
6870  *   Item is inner pattern.
6871  */
6872 static void
6873 flow_dv_translate_item_gre_key(void *matcher, void *key,
6874                                    const struct rte_flow_item *item)
6875 {
6876         const rte_be32_t *key_m = item->mask;
6877         const rte_be32_t *key_v = item->spec;
6878         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6879         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6880         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6881
6882         /* GRE K bit must be on and should already be validated */
6883         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6884         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6885         if (!key_v)
6886                 return;
6887         if (!key_m)
6888                 key_m = &gre_key_default_mask;
6889         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6890                  rte_be_to_cpu_32(*key_m) >> 8);
6891         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6892                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6893         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6894                  rte_be_to_cpu_32(*key_m) & 0xFF);
6895         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6896                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6897 }
6898
6899 /**
6900  * Add GRE item to matcher and to the value.
6901  *
6902  * @param[in, out] matcher
6903  *   Flow matcher.
6904  * @param[in, out] key
6905  *   Flow matcher value.
6906  * @param[in] item
6907  *   Flow pattern to translate.
6908  * @param[in] inner
6909  *   Item is inner pattern.
6910  */
6911 static void
6912 flow_dv_translate_item_gre(void *matcher, void *key,
6913                            const struct rte_flow_item *item,
6914                            int inner)
6915 {
6916         const struct rte_flow_item_gre *gre_m = item->mask;
6917         const struct rte_flow_item_gre *gre_v = item->spec;
6918         void *headers_m;
6919         void *headers_v;
6920         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6921         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6922         struct {
6923                 union {
6924                         __extension__
6925                         struct {
6926                                 uint16_t version:3;
6927                                 uint16_t rsvd0:9;
6928                                 uint16_t s_present:1;
6929                                 uint16_t k_present:1;
6930                                 uint16_t rsvd_bit1:1;
6931                                 uint16_t c_present:1;
6932                         };
6933                         uint16_t value;
6934                 };
6935         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6936
6937         if (inner) {
6938                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6939                                          inner_headers);
6940                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6941         } else {
6942                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6943                                          outer_headers);
6944                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6945         }
6946         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6947         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6948         if (!gre_v)
6949                 return;
6950         if (!gre_m)
6951                 gre_m = &rte_flow_item_gre_mask;
6952         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6953                  rte_be_to_cpu_16(gre_m->protocol));
6954         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6955                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6956         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6957         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6958         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6959                  gre_crks_rsvd0_ver_m.c_present);
6960         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6961                  gre_crks_rsvd0_ver_v.c_present &
6962                  gre_crks_rsvd0_ver_m.c_present);
6963         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6964                  gre_crks_rsvd0_ver_m.k_present);
6965         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6966                  gre_crks_rsvd0_ver_v.k_present &
6967                  gre_crks_rsvd0_ver_m.k_present);
6968         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6969                  gre_crks_rsvd0_ver_m.s_present);
6970         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6971                  gre_crks_rsvd0_ver_v.s_present &
6972                  gre_crks_rsvd0_ver_m.s_present);
6973 }
6974
6975 /**
6976  * Add NVGRE item to matcher and to the value.
6977  *
6978  * @param[in, out] matcher
6979  *   Flow matcher.
6980  * @param[in, out] key
6981  *   Flow matcher value.
6982  * @param[in] item
6983  *   Flow pattern to translate.
6984  * @param[in] inner
6985  *   Item is inner pattern.
6986  */
6987 static void
6988 flow_dv_translate_item_nvgre(void *matcher, void *key,
6989                              const struct rte_flow_item *item,
6990                              int inner)
6991 {
6992         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6993         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6994         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6995         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6996         const char *tni_flow_id_m;
6997         const char *tni_flow_id_v;
6998         char *gre_key_m;
6999         char *gre_key_v;
7000         int size;
7001         int i;
7002
7003         /* For NVGRE, GRE header fields must be set with defined values. */
7004         const struct rte_flow_item_gre gre_spec = {
7005                 .c_rsvd0_ver = RTE_BE16(0x2000),
7006                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7007         };
7008         const struct rte_flow_item_gre gre_mask = {
7009                 .c_rsvd0_ver = RTE_BE16(0xB000),
7010                 .protocol = RTE_BE16(UINT16_MAX),
7011         };
7012         const struct rte_flow_item gre_item = {
7013                 .spec = &gre_spec,
7014                 .mask = &gre_mask,
7015                 .last = NULL,
7016         };
7017         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7018         if (!nvgre_v)
7019                 return;
7020         if (!nvgre_m)
7021                 nvgre_m = &rte_flow_item_nvgre_mask;
7022         tni_flow_id_m = (const char *)nvgre_m->tni;
7023         tni_flow_id_v = (const char *)nvgre_v->tni;
7024         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7025         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7026         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7027         memcpy(gre_key_m, tni_flow_id_m, size);
7028         for (i = 0; i < size; ++i)
7029                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7030 }
7031
7032 /**
7033  * Add VXLAN item to matcher and to the value.
7034  *
7035  * @param[in, out] matcher
7036  *   Flow matcher.
7037  * @param[in, out] key
7038  *   Flow matcher value.
7039  * @param[in] item
7040  *   Flow pattern to translate.
7041  * @param[in] inner
7042  *   Item is inner pattern.
7043  */
7044 static void
7045 flow_dv_translate_item_vxlan(void *matcher, void *key,
7046                              const struct rte_flow_item *item,
7047                              int inner)
7048 {
7049         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7050         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7051         void *headers_m;
7052         void *headers_v;
7053         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7054         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7055         char *vni_m;
7056         char *vni_v;
7057         uint16_t dport;
7058         int size;
7059         int i;
7060
7061         if (inner) {
7062                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7063                                          inner_headers);
7064                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7065         } else {
7066                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7067                                          outer_headers);
7068                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7069         }
7070         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7071                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7072         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7073                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7074                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7075         }
7076         if (!vxlan_v)
7077                 return;
7078         if (!vxlan_m)
7079                 vxlan_m = &rte_flow_item_vxlan_mask;
7080         size = sizeof(vxlan_m->vni);
7081         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7082         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7083         memcpy(vni_m, vxlan_m->vni, size);
7084         for (i = 0; i < size; ++i)
7085                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7086 }
7087
7088 /**
7089  * Add VXLAN-GPE item to matcher and to the value.
7090  *
7091  * @param[in, out] matcher
7092  *   Flow matcher.
7093  * @param[in, out] key
7094  *   Flow matcher value.
7095  * @param[in] item
7096  *   Flow pattern to translate.
7097  * @param[in] inner
7098  *   Item is inner pattern.
7099  */
7100
7101 static void
7102 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7103                                  const struct rte_flow_item *item, int inner)
7104 {
7105         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7106         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7107         void *headers_m;
7108         void *headers_v;
7109         void *misc_m =
7110                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7111         void *misc_v =
7112                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7113         char *vni_m;
7114         char *vni_v;
7115         uint16_t dport;
7116         int size;
7117         int i;
7118         uint8_t flags_m = 0xff;
7119         uint8_t flags_v = 0xc;
7120
7121         if (inner) {
7122                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7123                                          inner_headers);
7124                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7125         } else {
7126                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7127                                          outer_headers);
7128                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7129         }
7130         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7131                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7132         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7133                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7134                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7135         }
7136         if (!vxlan_v)
7137                 return;
7138         if (!vxlan_m)
7139                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7140         size = sizeof(vxlan_m->vni);
7141         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7142         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7143         memcpy(vni_m, vxlan_m->vni, size);
7144         for (i = 0; i < size; ++i)
7145                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7146         if (vxlan_m->flags) {
7147                 flags_m = vxlan_m->flags;
7148                 flags_v = vxlan_v->flags;
7149         }
7150         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7151         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7152         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7153                  vxlan_m->protocol);
7154         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7155                  vxlan_v->protocol);
7156 }
7157
7158 /**
7159  * Add Geneve item to matcher and to the value.
7160  *
7161  * @param[in, out] matcher
7162  *   Flow matcher.
7163  * @param[in, out] key
7164  *   Flow matcher value.
7165  * @param[in] item
7166  *   Flow pattern to translate.
7167  * @param[in] inner
7168  *   Item is inner pattern.
7169  */
7170
7171 static void
7172 flow_dv_translate_item_geneve(void *matcher, void *key,
7173                               const struct rte_flow_item *item, int inner)
7174 {
7175         const struct rte_flow_item_geneve *geneve_m = item->mask;
7176         const struct rte_flow_item_geneve *geneve_v = item->spec;
7177         void *headers_m;
7178         void *headers_v;
7179         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7180         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7181         uint16_t dport;
7182         uint16_t gbhdr_m;
7183         uint16_t gbhdr_v;
7184         char *vni_m;
7185         char *vni_v;
7186         size_t size, i;
7187
7188         if (inner) {
7189                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7190                                          inner_headers);
7191                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7192         } else {
7193                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7194                                          outer_headers);
7195                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7196         }
7197         dport = MLX5_UDP_PORT_GENEVE;
7198         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7199                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7200                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7201         }
7202         if (!geneve_v)
7203                 return;
7204         if (!geneve_m)
7205                 geneve_m = &rte_flow_item_geneve_mask;
7206         size = sizeof(geneve_m->vni);
7207         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7208         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7209         memcpy(vni_m, geneve_m->vni, size);
7210         for (i = 0; i < size; ++i)
7211                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7212         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7213                  rte_be_to_cpu_16(geneve_m->protocol));
7214         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7215                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7216         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7217         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7218         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7219                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7220         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7221                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7222         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7223                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7224         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7225                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7226                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7227 }
7228
7229 /**
7230  * Add MPLS item to matcher and to the value.
7231  *
7232  * @param[in, out] matcher
7233  *   Flow matcher.
7234  * @param[in, out] key
7235  *   Flow matcher value.
7236  * @param[in] item
7237  *   Flow pattern to translate.
7238  * @param[in] prev_layer
7239  *   The protocol layer indicated in previous item.
7240  * @param[in] inner
7241  *   Item is inner pattern.
7242  */
7243 static void
7244 flow_dv_translate_item_mpls(void *matcher, void *key,
7245                             const struct rte_flow_item *item,
7246                             uint64_t prev_layer,
7247                             int inner)
7248 {
7249         const uint32_t *in_mpls_m = item->mask;
7250         const uint32_t *in_mpls_v = item->spec;
7251         uint32_t *out_mpls_m = 0;
7252         uint32_t *out_mpls_v = 0;
7253         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7254         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7255         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7256                                      misc_parameters_2);
7257         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7258         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7259         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7260
7261         switch (prev_layer) {
7262         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7263                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7264                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7265                          MLX5_UDP_PORT_MPLS);
7266                 break;
7267         case MLX5_FLOW_LAYER_GRE:
7268                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7269                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7270                          RTE_ETHER_TYPE_MPLS);
7271                 break;
7272         default:
7273                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7274                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7275                          IPPROTO_MPLS);
7276                 break;
7277         }
7278         if (!in_mpls_v)
7279                 return;
7280         if (!in_mpls_m)
7281                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7282         switch (prev_layer) {
7283         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7284                 out_mpls_m =
7285                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7286                                                  outer_first_mpls_over_udp);
7287                 out_mpls_v =
7288                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7289                                                  outer_first_mpls_over_udp);
7290                 break;
7291         case MLX5_FLOW_LAYER_GRE:
7292                 out_mpls_m =
7293                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7294                                                  outer_first_mpls_over_gre);
7295                 out_mpls_v =
7296                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7297                                                  outer_first_mpls_over_gre);
7298                 break;
7299         default:
7300                 /* Inner MPLS not over GRE is not supported. */
7301                 if (!inner) {
7302                         out_mpls_m =
7303                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7304                                                          misc2_m,
7305                                                          outer_first_mpls);
7306                         out_mpls_v =
7307                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7308                                                          misc2_v,
7309                                                          outer_first_mpls);
7310                 }
7311                 break;
7312         }
7313         if (out_mpls_m && out_mpls_v) {
7314                 *out_mpls_m = *in_mpls_m;
7315                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7316         }
7317 }
7318
7319 /**
7320  * Add metadata register item to matcher
7321  *
7322  * @param[in, out] matcher
7323  *   Flow matcher.
7324  * @param[in, out] key
7325  *   Flow matcher value.
7326  * @param[in] reg_type
7327  *   Type of device metadata register
7328  * @param[in] value
7329  *   Register value
7330  * @param[in] mask
7331  *   Register mask
7332  */
7333 static void
7334 flow_dv_match_meta_reg(void *matcher, void *key,
7335                        enum modify_reg reg_type,
7336                        uint32_t data, uint32_t mask)
7337 {
7338         void *misc2_m =
7339                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7340         void *misc2_v =
7341                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7342         uint32_t temp;
7343
7344         data &= mask;
7345         switch (reg_type) {
7346         case REG_A:
7347                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7348                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7349                 break;
7350         case REG_B:
7351                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7352                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7353                 break;
7354         case REG_C_0:
7355                 /*
7356                  * The metadata register C0 field might be divided into
7357                  * source vport index and META item value, we should set
7358                  * this field according to specified mask, not as whole one.
7359                  */
7360                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7361                 temp |= mask;
7362                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7363                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7364                 temp &= ~mask;
7365                 temp |= data;
7366                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7367                 break;
7368         case REG_C_1:
7369                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7370                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7371                 break;
7372         case REG_C_2:
7373                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7374                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7375                 break;
7376         case REG_C_3:
7377                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7378                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7379                 break;
7380         case REG_C_4:
7381                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7382                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7383                 break;
7384         case REG_C_5:
7385                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7386                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7387                 break;
7388         case REG_C_6:
7389                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7390                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7391                 break;
7392         case REG_C_7:
7393                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7394                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7395                 break;
7396         default:
7397                 MLX5_ASSERT(false);
7398                 break;
7399         }
7400 }
7401
7402 /**
7403  * Add MARK item to matcher
7404  *
7405  * @param[in] dev
7406  *   The device to configure through.
7407  * @param[in, out] matcher
7408  *   Flow matcher.
7409  * @param[in, out] key
7410  *   Flow matcher value.
7411  * @param[in] item
7412  *   Flow pattern to translate.
7413  */
7414 static void
7415 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7416                             void *matcher, void *key,
7417                             const struct rte_flow_item *item)
7418 {
7419         struct mlx5_priv *priv = dev->data->dev_private;
7420         const struct rte_flow_item_mark *mark;
7421         uint32_t value;
7422         uint32_t mask;
7423
7424         mark = item->mask ? (const void *)item->mask :
7425                             &rte_flow_item_mark_mask;
7426         mask = mark->id & priv->sh->dv_mark_mask;
7427         mark = (const void *)item->spec;
7428         MLX5_ASSERT(mark);
7429         value = mark->id & priv->sh->dv_mark_mask & mask;
7430         if (mask) {
7431                 enum modify_reg reg;
7432
7433                 /* Get the metadata register index for the mark. */
7434                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7435                 MLX5_ASSERT(reg > 0);
7436                 if (reg == REG_C_0) {
7437                         struct mlx5_priv *priv = dev->data->dev_private;
7438                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7439                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7440
7441                         mask &= msk_c0;
7442                         mask <<= shl_c0;
7443                         value <<= shl_c0;
7444                 }
7445                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7446         }
7447 }
7448
7449 /**
7450  * Add META item to matcher
7451  *
7452  * @param[in] dev
7453  *   The devich to configure through.
7454  * @param[in, out] matcher
7455  *   Flow matcher.
7456  * @param[in, out] key
7457  *   Flow matcher value.
7458  * @param[in] attr
7459  *   Attributes of flow that includes this item.
7460  * @param[in] item
7461  *   Flow pattern to translate.
7462  */
7463 static void
7464 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7465                             void *matcher, void *key,
7466                             const struct rte_flow_attr *attr,
7467                             const struct rte_flow_item *item)
7468 {
7469         const struct rte_flow_item_meta *meta_m;
7470         const struct rte_flow_item_meta *meta_v;
7471
7472         meta_m = (const void *)item->mask;
7473         if (!meta_m)
7474                 meta_m = &rte_flow_item_meta_mask;
7475         meta_v = (const void *)item->spec;
7476         if (meta_v) {
7477                 int reg;
7478                 uint32_t value = meta_v->data;
7479                 uint32_t mask = meta_m->data;
7480
7481                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7482                 if (reg < 0)
7483                         return;
7484                 /*
7485                  * In datapath code there is no endianness
7486                  * coversions for perfromance reasons, all
7487                  * pattern conversions are done in rte_flow.
7488                  */
7489                 value = rte_cpu_to_be_32(value);
7490                 mask = rte_cpu_to_be_32(mask);
7491                 if (reg == REG_C_0) {
7492                         struct mlx5_priv *priv = dev->data->dev_private;
7493                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7494                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7496                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7497
7498                         value >>= shr_c0;
7499                         mask >>= shr_c0;
7500 #endif
7501                         value <<= shl_c0;
7502                         mask <<= shl_c0;
7503                         MLX5_ASSERT(msk_c0);
7504                         MLX5_ASSERT(!(~msk_c0 & mask));
7505                 }
7506                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7507         }
7508 }
7509
7510 /**
7511  * Add vport metadata Reg C0 item to matcher
7512  *
7513  * @param[in, out] matcher
7514  *   Flow matcher.
7515  * @param[in, out] key
7516  *   Flow matcher value.
7517  * @param[in] reg
7518  *   Flow pattern to translate.
7519  */
7520 static void
7521 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7522                                   uint32_t value, uint32_t mask)
7523 {
7524         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7525 }
7526
7527 /**
7528  * Add tag item to matcher
7529  *
7530  * @param[in] dev
7531  *   The devich to configure through.
7532  * @param[in, out] matcher
7533  *   Flow matcher.
7534  * @param[in, out] key
7535  *   Flow matcher value.
7536  * @param[in] item
7537  *   Flow pattern to translate.
7538  */
7539 static void
7540 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7541                                 void *matcher, void *key,
7542                                 const struct rte_flow_item *item)
7543 {
7544         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7545         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7546         uint32_t mask, value;
7547
7548         MLX5_ASSERT(tag_v);
7549         value = tag_v->data;
7550         mask = tag_m ? tag_m->data : UINT32_MAX;
7551         if (tag_v->id == REG_C_0) {
7552                 struct mlx5_priv *priv = dev->data->dev_private;
7553                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7554                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7555
7556                 mask &= msk_c0;
7557                 mask <<= shl_c0;
7558                 value <<= shl_c0;
7559         }
7560         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7561 }
7562
7563 /**
7564  * Add TAG item to matcher
7565  *
7566  * @param[in] dev
7567  *   The devich to configure through.
7568  * @param[in, out] matcher
7569  *   Flow matcher.
7570  * @param[in, out] key
7571  *   Flow matcher value.
7572  * @param[in] item
7573  *   Flow pattern to translate.
7574  */
7575 static void
7576 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7577                            void *matcher, void *key,
7578                            const struct rte_flow_item *item)
7579 {
7580         const struct rte_flow_item_tag *tag_v = item->spec;
7581         const struct rte_flow_item_tag *tag_m = item->mask;
7582         enum modify_reg reg;
7583
7584         MLX5_ASSERT(tag_v);
7585         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7586         /* Get the metadata register index for the tag. */
7587         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7588         MLX5_ASSERT(reg > 0);
7589         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7590 }
7591
7592 /**
7593  * Add source vport match to the specified matcher.
7594  *
7595  * @param[in, out] matcher
7596  *   Flow matcher.
7597  * @param[in, out] key
7598  *   Flow matcher value.
7599  * @param[in] port
7600  *   Source vport value to match
7601  * @param[in] mask
7602  *   Mask
7603  */
7604 static void
7605 flow_dv_translate_item_source_vport(void *matcher, void *key,
7606                                     int16_t port, uint16_t mask)
7607 {
7608         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7609         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7610
7611         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7612         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7613 }
7614
7615 /**
7616  * Translate port-id item to eswitch match on  port-id.
7617  *
7618  * @param[in] dev
7619  *   The devich to configure through.
7620  * @param[in, out] matcher
7621  *   Flow matcher.
7622  * @param[in, out] key
7623  *   Flow matcher value.
7624  * @param[in] item
7625  *   Flow pattern to translate.
7626  *
7627  * @return
7628  *   0 on success, a negative errno value otherwise.
7629  */
7630 static int
7631 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7632                                void *key, const struct rte_flow_item *item)
7633 {
7634         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7635         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7636         struct mlx5_priv *priv;
7637         uint16_t mask, id;
7638
7639         mask = pid_m ? pid_m->id : 0xffff;
7640         id = pid_v ? pid_v->id : dev->data->port_id;
7641         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7642         if (!priv)
7643                 return -rte_errno;
7644         /* Translate to vport field or to metadata, depending on mode. */
7645         if (priv->vport_meta_mask)
7646                 flow_dv_translate_item_meta_vport(matcher, key,
7647                                                   priv->vport_meta_tag,
7648                                                   priv->vport_meta_mask);
7649         else
7650                 flow_dv_translate_item_source_vport(matcher, key,
7651                                                     priv->vport_id, mask);
7652         return 0;
7653 }
7654
7655 /**
7656  * Add ICMP6 item to matcher and to the value.
7657  *
7658  * @param[in, out] matcher
7659  *   Flow matcher.
7660  * @param[in, out] key
7661  *   Flow matcher value.
7662  * @param[in] item
7663  *   Flow pattern to translate.
7664  * @param[in] inner
7665  *   Item is inner pattern.
7666  */
7667 static void
7668 flow_dv_translate_item_icmp6(void *matcher, void *key,
7669                               const struct rte_flow_item *item,
7670                               int inner)
7671 {
7672         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7673         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7674         void *headers_m;
7675         void *headers_v;
7676         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7677                                      misc_parameters_3);
7678         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7679         if (inner) {
7680                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7681                                          inner_headers);
7682                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7683         } else {
7684                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7685                                          outer_headers);
7686                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7687         }
7688         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7689         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7690         if (!icmp6_v)
7691                 return;
7692         if (!icmp6_m)
7693                 icmp6_m = &rte_flow_item_icmp6_mask;
7694         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7695         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7696                  icmp6_v->type & icmp6_m->type);
7697         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7698         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7699                  icmp6_v->code & icmp6_m->code);
7700 }
7701
7702 /**
7703  * Add ICMP item to matcher and to the value.
7704  *
7705  * @param[in, out] matcher
7706  *   Flow matcher.
7707  * @param[in, out] key
7708  *   Flow matcher value.
7709  * @param[in] item
7710  *   Flow pattern to translate.
7711  * @param[in] inner
7712  *   Item is inner pattern.
7713  */
7714 static void
7715 flow_dv_translate_item_icmp(void *matcher, void *key,
7716                             const struct rte_flow_item *item,
7717                             int inner)
7718 {
7719         const struct rte_flow_item_icmp *icmp_m = item->mask;
7720         const struct rte_flow_item_icmp *icmp_v = item->spec;
7721         uint32_t icmp_header_data_m = 0;
7722         uint32_t icmp_header_data_v = 0;
7723         void *headers_m;
7724         void *headers_v;
7725         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7726                                      misc_parameters_3);
7727         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7728         if (inner) {
7729                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7730                                          inner_headers);
7731                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7732         } else {
7733                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7734                                          outer_headers);
7735                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7736         }
7737         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7738         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7739         if (!icmp_v)
7740                 return;
7741         if (!icmp_m)
7742                 icmp_m = &rte_flow_item_icmp_mask;
7743         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7744                  icmp_m->hdr.icmp_type);
7745         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7746                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7747         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7748                  icmp_m->hdr.icmp_code);
7749         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7750                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7751         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7752         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7753         if (icmp_header_data_m) {
7754                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7755                 icmp_header_data_v |=
7756                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7757                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7758                          icmp_header_data_m);
7759                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7760                          icmp_header_data_v & icmp_header_data_m);
7761         }
7762 }
7763
7764 /**
7765  * Add GTP item to matcher and to the value.
7766  *
7767  * @param[in, out] matcher
7768  *   Flow matcher.
7769  * @param[in, out] key
7770  *   Flow matcher value.
7771  * @param[in] item
7772  *   Flow pattern to translate.
7773  * @param[in] inner
7774  *   Item is inner pattern.
7775  */
7776 static void
7777 flow_dv_translate_item_gtp(void *matcher, void *key,
7778                            const struct rte_flow_item *item, int inner)
7779 {
7780         const struct rte_flow_item_gtp *gtp_m = item->mask;
7781         const struct rte_flow_item_gtp *gtp_v = item->spec;
7782         void *headers_m;
7783         void *headers_v;
7784         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7785                                      misc_parameters_3);
7786         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7787         uint16_t dport = RTE_GTPU_UDP_PORT;
7788
7789         if (inner) {
7790                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7791                                          inner_headers);
7792                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7793         } else {
7794                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7795                                          outer_headers);
7796                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7797         }
7798         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7799                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7800                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7801         }
7802         if (!gtp_v)
7803                 return;
7804         if (!gtp_m)
7805                 gtp_m = &rte_flow_item_gtp_mask;
7806         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7807                  gtp_m->v_pt_rsv_flags);
7808         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7809                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7810         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7811         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7812                  gtp_v->msg_type & gtp_m->msg_type);
7813         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7814                  rte_be_to_cpu_32(gtp_m->teid));
7815         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7816                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7817 }
7818
7819 /**
7820  * Add eCPRI item to matcher and to the value.
7821  *
7822  * @param[in] dev
7823  *   The devich to configure through.
7824  * @param[in, out] matcher
7825  *   Flow matcher.
7826  * @param[in, out] key
7827  *   Flow matcher value.
7828  * @param[in] item
7829  *   Flow pattern to translate.
7830  * @param[in] samples
7831  *   Sample IDs to be used in the matching.
7832  */
7833 static void
7834 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7835                              void *key, const struct rte_flow_item *item)
7836 {
7837         struct mlx5_priv *priv = dev->data->dev_private;
7838         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7839         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7840         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7841                                      misc_parameters_4);
7842         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7843         uint32_t *samples;
7844         void *dw_m;
7845         void *dw_v;
7846
7847         if (!ecpri_v)
7848                 return;
7849         if (!ecpri_m)
7850                 ecpri_m = &rte_flow_item_ecpri_mask;
7851         /*
7852          * Maximal four DW samples are supported in a single matching now.
7853          * Two are used now for a eCPRI matching:
7854          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7855          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7856          *    if any.
7857          */
7858         if (!ecpri_m->hdr.common.u32)
7859                 return;
7860         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7861         /* Need to take the whole DW as the mask to fill the entry. */
7862         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7863                             prog_sample_field_value_0);
7864         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7865                             prog_sample_field_value_0);
7866         /* Already big endian (network order) in the header. */
7867         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7868         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7869         /* Sample#0, used for matching type, offset 0. */
7870         MLX5_SET(fte_match_set_misc4, misc4_m,
7871                  prog_sample_field_id_0, samples[0]);
7872         /* It makes no sense to set the sample ID in the mask field. */
7873         MLX5_SET(fte_match_set_misc4, misc4_v,
7874                  prog_sample_field_id_0, samples[0]);
7875         /*
7876          * Checking if message body part needs to be matched.
7877          * Some wildcard rules only matching type field should be supported.
7878          */
7879         if (ecpri_m->hdr.dummy[0]) {
7880                 switch (ecpri_v->hdr.common.type) {
7881                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7882                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7883                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7884                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7885                                             prog_sample_field_value_1);
7886                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7887                                             prog_sample_field_value_1);
7888                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7889                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7890                         /* Sample#1, to match message body, offset 4. */
7891                         MLX5_SET(fte_match_set_misc4, misc4_m,
7892                                  prog_sample_field_id_1, samples[1]);
7893                         MLX5_SET(fte_match_set_misc4, misc4_v,
7894                                  prog_sample_field_id_1, samples[1]);
7895                         break;
7896                 default:
7897                         /* Others, do not match any sample ID. */
7898                         break;
7899                 }
7900         }
7901 }
7902
7903 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7904
7905 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7906         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7907                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7908
7909 /**
7910  * Calculate flow matcher enable bitmap.
7911  *
7912  * @param match_criteria
7913  *   Pointer to flow matcher criteria.
7914  *
7915  * @return
7916  *   Bitmap of enabled fields.
7917  */
7918 static uint8_t
7919 flow_dv_matcher_enable(uint32_t *match_criteria)
7920 {
7921         uint8_t match_criteria_enable;
7922
7923         match_criteria_enable =
7924                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7925                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7926         match_criteria_enable |=
7927                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7928                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7929         match_criteria_enable |=
7930                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7931                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7932         match_criteria_enable |=
7933                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7934                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7935         match_criteria_enable |=
7936                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7937                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7938         match_criteria_enable |=
7939                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7940                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7941         return match_criteria_enable;
7942 }
7943
7944 struct mlx5_hlist_entry *
7945 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7946 {
7947         struct mlx5_dev_ctx_shared *sh = list->ctx;
7948         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7949         struct rte_eth_dev *dev = ctx->dev;
7950         struct mlx5_flow_tbl_data_entry *tbl_data;
7951         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7952         struct rte_flow_error *error = ctx->error;
7953         union mlx5_flow_tbl_key key = { .v64 = key64 };
7954         struct mlx5_flow_tbl_resource *tbl;
7955         void *domain;
7956         uint32_t idx = 0;
7957         int ret;
7958
7959         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7960         if (!tbl_data) {
7961                 rte_flow_error_set(error, ENOMEM,
7962                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7963                                    NULL,
7964                                    "cannot allocate flow table data entry");
7965                 return NULL;
7966         }
7967         tbl_data->idx = idx;
7968         tbl_data->tunnel = tt_prm->tunnel;
7969         tbl_data->group_id = tt_prm->group_id;
7970         tbl_data->external = tt_prm->external;
7971         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7972         tbl_data->is_egress = !!key.direction;
7973         tbl = &tbl_data->tbl;
7974         if (key.dummy)
7975                 return &tbl_data->entry;
7976         if (key.domain)
7977                 domain = sh->fdb_domain;
7978         else if (key.direction)
7979                 domain = sh->tx_domain;
7980         else
7981                 domain = sh->rx_domain;
7982         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7983         if (ret) {
7984                 rte_flow_error_set(error, ENOMEM,
7985                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7986                                    NULL, "cannot create flow table object");
7987                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7988                 return NULL;
7989         }
7990         if (key.table_id) {
7991                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7992                                         (tbl->obj, &tbl_data->jump.action);
7993                 if (ret) {
7994                         rte_flow_error_set(error, ENOMEM,
7995                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7996                                            NULL,
7997                                            "cannot create flow jump action");
7998                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7999                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8000                         return NULL;
8001                 }
8002         }
8003         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8004               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8005               key.table_id);
8006         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8007                              flow_dv_matcher_create_cb,
8008                              flow_dv_matcher_match_cb,
8009                              flow_dv_matcher_remove_cb);
8010         return &tbl_data->entry;
8011 }
8012
8013 /**
8014  * Get a flow table.
8015  *
8016  * @param[in, out] dev
8017  *   Pointer to rte_eth_dev structure.
8018  * @param[in] table_id
8019  *   Table id to use.
8020  * @param[in] egress
8021  *   Direction of the table.
8022  * @param[in] transfer
8023  *   E-Switch or NIC flow.
8024  * @param[in] dummy
8025  *   Dummy entry for dv API.
8026  * @param[out] error
8027  *   pointer to error structure.
8028  *
8029  * @return
8030  *   Returns tables resource based on the index, NULL in case of failed.
8031  */
8032 struct mlx5_flow_tbl_resource *
8033 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8034                          uint32_t table_id, uint8_t egress,
8035                          uint8_t transfer,
8036                          bool external,
8037                          const struct mlx5_flow_tunnel *tunnel,
8038                          uint32_t group_id, uint8_t dummy,
8039                          struct rte_flow_error *error)
8040 {
8041         struct mlx5_priv *priv = dev->data->dev_private;
8042         union mlx5_flow_tbl_key table_key = {
8043                 {
8044                         .table_id = table_id,
8045                         .dummy = dummy,
8046                         .domain = !!transfer,
8047                         .direction = !!egress,
8048                 }
8049         };
8050         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8051                 .tunnel = tunnel,
8052                 .group_id = group_id,
8053                 .external = external,
8054         };
8055         struct mlx5_flow_cb_ctx ctx = {
8056                 .dev = dev,
8057                 .error = error,
8058                 .data = &tt_prm,
8059         };
8060         struct mlx5_hlist_entry *entry;
8061         struct mlx5_flow_tbl_data_entry *tbl_data;
8062
8063         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8064         if (!entry) {
8065                 rte_flow_error_set(error, ENOMEM,
8066                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8067                                    "cannot get table");
8068                 return NULL;
8069         }
8070         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8071         return &tbl_data->tbl;
8072 }
8073
8074 void
8075 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8076                       struct mlx5_hlist_entry *entry)
8077 {
8078         struct mlx5_dev_ctx_shared *sh = list->ctx;
8079         struct mlx5_flow_tbl_data_entry *tbl_data =
8080                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8081
8082         MLX5_ASSERT(entry && sh);
8083         if (tbl_data->jump.action)
8084                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8085         if (tbl_data->tbl.obj)
8086                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8087         if (tbl_data->tunnel_offload && tbl_data->external) {
8088                 struct mlx5_hlist_entry *he;
8089                 struct mlx5_hlist *tunnel_grp_hash;
8090                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8091                 union tunnel_tbl_key tunnel_key = {
8092                         .tunnel_id = tbl_data->tunnel ?
8093                                         tbl_data->tunnel->tunnel_id : 0,
8094                         .group = tbl_data->group_id
8095                 };
8096                 union mlx5_flow_tbl_key table_key = {
8097                         .v64 = entry->key
8098                 };
8099                 uint32_t table_id = table_key.table_id;
8100
8101                 tunnel_grp_hash = tbl_data->tunnel ?
8102                                         tbl_data->tunnel->groups :
8103                                         thub->groups;
8104                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8105                 if (he) {
8106                         struct tunnel_tbl_entry *tte;
8107                         tte = container_of(he, typeof(*tte), hash);
8108                         MLX5_ASSERT(tte->flow_table == table_id);
8109                         mlx5_hlist_remove(tunnel_grp_hash, he);
8110                         mlx5_free(tte);
8111                 }
8112                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8113                                 tunnel_flow_tbl_to_id(table_id));
8114                 DRV_LOG(DEBUG,
8115                         "Table_id %#x tunnel %u group %u released.",
8116                         table_id,
8117                         tbl_data->tunnel ?
8118                         tbl_data->tunnel->tunnel_id : 0,
8119                         tbl_data->group_id);
8120         }
8121         mlx5_cache_list_destroy(&tbl_data->matchers);
8122         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8123 }
8124
8125 /**
8126  * Release a flow table.
8127  *
8128  * @param[in] sh
8129  *   Pointer to device shared structure.
8130  * @param[in] tbl
8131  *   Table resource to be released.
8132  *
8133  * @return
8134  *   Returns 0 if table was released, else return 1;
8135  */
8136 static int
8137 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8138                              struct mlx5_flow_tbl_resource *tbl)
8139 {
8140         struct mlx5_flow_tbl_data_entry *tbl_data =
8141                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8142
8143         if (!tbl)
8144                 return 0;
8145         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8146 }
8147
8148 int
8149 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8150                          struct mlx5_cache_entry *entry, void *cb_ctx)
8151 {
8152         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8153         struct mlx5_flow_dv_matcher *ref = ctx->data;
8154         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8155                                                         entry);
8156
8157         return cur->crc != ref->crc ||
8158                cur->priority != ref->priority ||
8159                memcmp((const void *)cur->mask.buf,
8160                       (const void *)ref->mask.buf, ref->mask.size);
8161 }
8162
8163 struct mlx5_cache_entry *
8164 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8165                           struct mlx5_cache_entry *entry __rte_unused,
8166                           void *cb_ctx)
8167 {
8168         struct mlx5_dev_ctx_shared *sh = list->ctx;
8169         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8170         struct mlx5_flow_dv_matcher *ref = ctx->data;
8171         struct mlx5_flow_dv_matcher *cache;
8172         struct mlx5dv_flow_matcher_attr dv_attr = {
8173                 .type = IBV_FLOW_ATTR_NORMAL,
8174                 .match_mask = (void *)&ref->mask,
8175         };
8176         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8177                                                             typeof(*tbl), tbl);
8178         int ret;
8179
8180         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8181         if (!cache) {
8182                 rte_flow_error_set(ctx->error, ENOMEM,
8183                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8184                                    "cannot create matcher");
8185                 return NULL;
8186         }
8187         *cache = *ref;
8188         dv_attr.match_criteria_enable =
8189                 flow_dv_matcher_enable(cache->mask.buf);
8190         dv_attr.priority = ref->priority;
8191         if (tbl->is_egress)
8192                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8193         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8194                                                &cache->matcher_object);
8195         if (ret) {
8196                 mlx5_free(cache);
8197                 rte_flow_error_set(ctx->error, ENOMEM,
8198                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8199                                    "cannot create matcher");
8200                 return NULL;
8201         }
8202         return &cache->entry;
8203 }
8204
8205 /**
8206  * Register the flow matcher.
8207  *
8208  * @param[in, out] dev
8209  *   Pointer to rte_eth_dev structure.
8210  * @param[in, out] matcher
8211  *   Pointer to flow matcher.
8212  * @param[in, out] key
8213  *   Pointer to flow table key.
8214  * @parm[in, out] dev_flow
8215  *   Pointer to the dev_flow.
8216  * @param[out] error
8217  *   pointer to error structure.
8218  *
8219  * @return
8220  *   0 on success otherwise -errno and errno is set.
8221  */
8222 static int
8223 flow_dv_matcher_register(struct rte_eth_dev *dev,
8224                          struct mlx5_flow_dv_matcher *ref,
8225                          union mlx5_flow_tbl_key *key,
8226                          struct mlx5_flow *dev_flow,
8227                          struct rte_flow_error *error)
8228 {
8229         struct mlx5_cache_entry *entry;
8230         struct mlx5_flow_dv_matcher *cache;
8231         struct mlx5_flow_tbl_resource *tbl;
8232         struct mlx5_flow_tbl_data_entry *tbl_data;
8233         struct mlx5_flow_cb_ctx ctx = {
8234                 .error = error,
8235                 .data = ref,
8236         };
8237
8238         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8239                                        key->domain, false, NULL, 0, 0, error);
8240         if (!tbl)
8241                 return -rte_errno;      /* No need to refill the error info */
8242         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8243         ref->tbl = tbl;
8244         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8245         if (!entry) {
8246                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8247                 return rte_flow_error_set(error, ENOMEM,
8248                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8249                                           "cannot allocate ref memory");
8250         }
8251         cache = container_of(entry, typeof(*cache), entry);
8252         dev_flow->handle->dvh.matcher = cache;
8253         return 0;
8254 }
8255
8256 struct mlx5_hlist_entry *
8257 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8258 {
8259         struct mlx5_dev_ctx_shared *sh = list->ctx;
8260         struct rte_flow_error *error = ctx;
8261         struct mlx5_flow_dv_tag_resource *entry;
8262         uint32_t idx = 0;
8263         int ret;
8264
8265         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8266         if (!entry) {
8267                 rte_flow_error_set(error, ENOMEM,
8268                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8269                                    "cannot allocate resource memory");
8270                 return NULL;
8271         }
8272         entry->idx = idx;
8273         ret = mlx5_flow_os_create_flow_action_tag(key,
8274                                                   &entry->action);
8275         if (ret) {
8276                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8277                 rte_flow_error_set(error, ENOMEM,
8278                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8279                                    NULL, "cannot create action");
8280                 return NULL;
8281         }
8282         return &entry->entry;
8283 }
8284
8285 /**
8286  * Find existing tag resource or create and register a new one.
8287  *
8288  * @param dev[in, out]
8289  *   Pointer to rte_eth_dev structure.
8290  * @param[in, out] tag_be24
8291  *   Tag value in big endian then R-shift 8.
8292  * @parm[in, out] dev_flow
8293  *   Pointer to the dev_flow.
8294  * @param[out] error
8295  *   pointer to error structure.
8296  *
8297  * @return
8298  *   0 on success otherwise -errno and errno is set.
8299  */
8300 static int
8301 flow_dv_tag_resource_register
8302                         (struct rte_eth_dev *dev,
8303                          uint32_t tag_be24,
8304                          struct mlx5_flow *dev_flow,
8305                          struct rte_flow_error *error)
8306 {
8307         struct mlx5_priv *priv = dev->data->dev_private;
8308         struct mlx5_flow_dv_tag_resource *cache_resource;
8309         struct mlx5_hlist_entry *entry;
8310
8311         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8312         if (entry) {
8313                 cache_resource = container_of
8314                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8315                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8316                 dev_flow->dv.tag_resource = cache_resource;
8317                 return 0;
8318         }
8319         return -rte_errno;
8320 }
8321
8322 void
8323 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8324                       struct mlx5_hlist_entry *entry)
8325 {
8326         struct mlx5_dev_ctx_shared *sh = list->ctx;
8327         struct mlx5_flow_dv_tag_resource *tag =
8328                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8329
8330         MLX5_ASSERT(tag && sh && tag->action);
8331         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8332         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8333         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8334 }
8335
8336 /**
8337  * Release the tag.
8338  *
8339  * @param dev
8340  *   Pointer to Ethernet device.
8341  * @param tag_idx
8342  *   Tag index.
8343  *
8344  * @return
8345  *   1 while a reference on it exists, 0 when freed.
8346  */
8347 static int
8348 flow_dv_tag_release(struct rte_eth_dev *dev,
8349                     uint32_t tag_idx)
8350 {
8351         struct mlx5_priv *priv = dev->data->dev_private;
8352         struct mlx5_flow_dv_tag_resource *tag;
8353
8354         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8355         if (!tag)
8356                 return 0;
8357         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8358                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8359         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8360 }
8361
8362 /**
8363  * Translate port ID action to vport.
8364  *
8365  * @param[in] dev
8366  *   Pointer to rte_eth_dev structure.
8367  * @param[in] action
8368  *   Pointer to the port ID action.
8369  * @param[out] dst_port_id
8370  *   The target port ID.
8371  * @param[out] error
8372  *   Pointer to the error structure.
8373  *
8374  * @return
8375  *   0 on success, a negative errno value otherwise and rte_errno is set.
8376  */
8377 static int
8378 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8379                                  const struct rte_flow_action *action,
8380                                  uint32_t *dst_port_id,
8381                                  struct rte_flow_error *error)
8382 {
8383         uint32_t port;
8384         struct mlx5_priv *priv;
8385         const struct rte_flow_action_port_id *conf =
8386                         (const struct rte_flow_action_port_id *)action->conf;
8387
8388         port = conf->original ? dev->data->port_id : conf->id;
8389         priv = mlx5_port_to_eswitch_info(port, false);
8390         if (!priv)
8391                 return rte_flow_error_set(error, -rte_errno,
8392                                           RTE_FLOW_ERROR_TYPE_ACTION,
8393                                           NULL,
8394                                           "No eswitch info was found for port");
8395 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8396         /*
8397          * This parameter is transferred to
8398          * mlx5dv_dr_action_create_dest_ib_port().
8399          */
8400         *dst_port_id = priv->dev_port;
8401 #else
8402         /*
8403          * Legacy mode, no LAG configurations is supported.
8404          * This parameter is transferred to
8405          * mlx5dv_dr_action_create_dest_vport().
8406          */
8407         *dst_port_id = priv->vport_id;
8408 #endif
8409         return 0;
8410 }
8411
8412 /**
8413  * Create a counter with aging configuration.
8414  *
8415  * @param[in] dev
8416  *   Pointer to rte_eth_dev structure.
8417  * @param[out] count
8418  *   Pointer to the counter action configuration.
8419  * @param[in] age
8420  *   Pointer to the aging action configuration.
8421  *
8422  * @return
8423  *   Index to flow counter on success, 0 otherwise.
8424  */
8425 static uint32_t
8426 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8427                                 struct mlx5_flow *dev_flow,
8428                                 const struct rte_flow_action_count *count,
8429                                 const struct rte_flow_action_age *age)
8430 {
8431         uint32_t counter;
8432         struct mlx5_age_param *age_param;
8433
8434         if (count && count->shared)
8435                 counter = flow_dv_counter_get_shared(dev, count->id);
8436         else
8437                 counter = flow_dv_counter_alloc(dev, !!age);
8438         if (!counter || age == NULL)
8439                 return counter;
8440         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8441         age_param->context = age->context ? age->context :
8442                 (void *)(uintptr_t)(dev_flow->flow_idx);
8443         age_param->timeout = age->timeout;
8444         age_param->port_id = dev->data->port_id;
8445         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8446         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8447         return counter;
8448 }
8449 /**
8450  * Add Tx queue matcher
8451  *
8452  * @param[in] dev
8453  *   Pointer to the dev struct.
8454  * @param[in, out] matcher
8455  *   Flow matcher.
8456  * @param[in, out] key
8457  *   Flow matcher value.
8458  * @param[in] item
8459  *   Flow pattern to translate.
8460  * @param[in] inner
8461  *   Item is inner pattern.
8462  */
8463 static void
8464 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8465                                 void *matcher, void *key,
8466                                 const struct rte_flow_item *item)
8467 {
8468         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8469         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8470         void *misc_m =
8471                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8472         void *misc_v =
8473                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8474         struct mlx5_txq_ctrl *txq;
8475         uint32_t queue;
8476
8477
8478         queue_m = (const void *)item->mask;
8479         if (!queue_m)
8480                 return;
8481         queue_v = (const void *)item->spec;
8482         if (!queue_v)
8483                 return;
8484         txq = mlx5_txq_get(dev, queue_v->queue);
8485         if (!txq)
8486                 return;
8487         queue = txq->obj->sq->id;
8488         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8489         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8490                  queue & queue_m->queue);
8491         mlx5_txq_release(dev, queue_v->queue);
8492 }
8493
8494 /**
8495  * Set the hash fields according to the @p flow information.
8496  *
8497  * @param[in] dev_flow
8498  *   Pointer to the mlx5_flow.
8499  * @param[in] rss_desc
8500  *   Pointer to the mlx5_flow_rss_desc.
8501  */
8502 static void
8503 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8504                        struct mlx5_flow_rss_desc *rss_desc)
8505 {
8506         uint64_t items = dev_flow->handle->layers;
8507         int rss_inner = 0;
8508         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8509
8510         dev_flow->hash_fields = 0;
8511 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8512         if (rss_desc->level >= 2) {
8513                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8514                 rss_inner = 1;
8515         }
8516 #endif
8517         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8518             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8519                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8520                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8521                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8522                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8523                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8524                         else
8525                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8526                 }
8527         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8528                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8529                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8530                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8531                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8532                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8533                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8534                         else
8535                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8536                 }
8537         }
8538         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8539             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8540                 if (rss_types & ETH_RSS_UDP) {
8541                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8542                                 dev_flow->hash_fields |=
8543                                                 IBV_RX_HASH_SRC_PORT_UDP;
8544                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8545                                 dev_flow->hash_fields |=
8546                                                 IBV_RX_HASH_DST_PORT_UDP;
8547                         else
8548                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8549                 }
8550         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8551                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8552                 if (rss_types & ETH_RSS_TCP) {
8553                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8554                                 dev_flow->hash_fields |=
8555                                                 IBV_RX_HASH_SRC_PORT_TCP;
8556                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8557                                 dev_flow->hash_fields |=
8558                                                 IBV_RX_HASH_DST_PORT_TCP;
8559                         else
8560                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8561                 }
8562         }
8563 }
8564
8565 /**
8566  * Prepare an Rx Hash queue.
8567  *
8568  * @param dev
8569  *   Pointer to Ethernet device.
8570  * @param[in] dev_flow
8571  *   Pointer to the mlx5_flow.
8572  * @param[in] rss_desc
8573  *   Pointer to the mlx5_flow_rss_desc.
8574  * @param[out] hrxq_idx
8575  *   Hash Rx queue index.
8576  *
8577  * @return
8578  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8579  */
8580 static struct mlx5_hrxq *
8581 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8582                      struct mlx5_flow *dev_flow,
8583                      struct mlx5_flow_rss_desc *rss_desc,
8584                      uint32_t *hrxq_idx)
8585 {
8586         struct mlx5_priv *priv = dev->data->dev_private;
8587         struct mlx5_flow_handle *dh = dev_flow->handle;
8588         struct mlx5_hrxq *hrxq;
8589
8590         MLX5_ASSERT(rss_desc->queue_num);
8591         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8592         rss_desc->hash_fields = dev_flow->hash_fields;
8593         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8594         rss_desc->standalone = false;
8595         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8596         if (!*hrxq_idx)
8597                 return NULL;
8598         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8599                               *hrxq_idx);
8600         return hrxq;
8601 }
8602
8603 /**
8604  * Release sample sub action resource.
8605  *
8606  * @param[in, out] dev
8607  *   Pointer to rte_eth_dev structure.
8608  * @param[in] act_res
8609  *   Pointer to sample sub action resource.
8610  */
8611 static void
8612 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8613                                    struct mlx5_flow_sub_actions_idx *act_res)
8614 {
8615         if (act_res->rix_hrxq) {
8616                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
8617                 act_res->rix_hrxq = 0;
8618         }
8619         if (act_res->rix_encap_decap) {
8620                 flow_dv_encap_decap_resource_release(dev,
8621                                                      act_res->rix_encap_decap);
8622                 act_res->rix_encap_decap = 0;
8623         }
8624         if (act_res->rix_port_id_action) {
8625                 flow_dv_port_id_action_resource_release(dev,
8626                                                 act_res->rix_port_id_action);
8627                 act_res->rix_port_id_action = 0;
8628         }
8629         if (act_res->rix_tag) {
8630                 flow_dv_tag_release(dev, act_res->rix_tag);
8631                 act_res->rix_tag = 0;
8632         }
8633         if (act_res->cnt) {
8634                 flow_dv_counter_release(dev, act_res->cnt);
8635                 act_res->cnt = 0;
8636         }
8637 }
8638
8639 /**
8640  * Find existing sample resource or create and register a new one.
8641  *
8642  * @param[in, out] dev
8643  *   Pointer to rte_eth_dev structure.
8644  * @param[in] resource
8645  *   Pointer to sample resource.
8646  * @parm[in, out] dev_flow
8647  *   Pointer to the dev_flow.
8648  * @param[in, out] sample_dv_actions
8649  *   Pointer to sample actions list.
8650  * @param[out] error
8651  *   pointer to error structure.
8652  *
8653  * @return
8654  *   0 on success otherwise -errno and errno is set.
8655  */
8656 static int
8657 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8658                          struct mlx5_flow_dv_sample_resource *resource,
8659                          struct mlx5_flow *dev_flow,
8660                          void **sample_dv_actions,
8661                          struct rte_flow_error *error)
8662 {
8663         struct mlx5_flow_dv_sample_resource *cache_resource;
8664         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8665         struct mlx5_priv *priv = dev->data->dev_private;
8666         struct mlx5_dev_ctx_shared *sh = priv->sh;
8667         struct mlx5_flow_tbl_resource *tbl;
8668         uint32_t idx = 0;
8669         const uint32_t next_ft_step = 1;
8670         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8671         uint8_t is_egress = 0;
8672         uint8_t is_transfer = 0;
8673
8674         /* Lookup a matching resource from cache. */
8675         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
8676                       idx, cache_resource, next) {
8677                 if (resource->ratio == cache_resource->ratio &&
8678                     resource->ft_type == cache_resource->ft_type &&
8679                     resource->ft_id == cache_resource->ft_id &&
8680                     resource->set_action == cache_resource->set_action &&
8681                     !memcmp((void *)&resource->sample_act,
8682                             (void *)&cache_resource->sample_act,
8683                             sizeof(struct mlx5_flow_sub_actions_list))) {
8684                         DRV_LOG(DEBUG, "sample resource %p: refcnt %d++",
8685                                 (void *)cache_resource,
8686                                 __atomic_load_n(&cache_resource->refcnt,
8687                                                 __ATOMIC_RELAXED));
8688                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8689                                            __ATOMIC_RELAXED);
8690                         dev_flow->handle->dvh.rix_sample = idx;
8691                         dev_flow->dv.sample_res = cache_resource;
8692                         /*
8693                          * Existing sample action should release the prepared
8694                          * sub-actions reference counter.
8695                          */
8696                         flow_dv_sample_sub_actions_release(dev,
8697                                                         &resource->sample_idx);
8698                         return 0;
8699                 }
8700         }
8701         /* Register new sample resource. */
8702         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE],
8703                                        &dev_flow->handle->dvh.rix_sample);
8704         if (!cache_resource)
8705                 return rte_flow_error_set(error, ENOMEM,
8706                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8707                                           NULL,
8708                                           "cannot allocate resource memory");
8709         *cache_resource = *resource;
8710         /* Create normal path table level */
8711         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8712                 is_transfer = 1;
8713         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8714                 is_egress = 1;
8715         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8716                                         is_egress, is_transfer,
8717                                         dev_flow->external, NULL, 0, 0, error);
8718         if (!tbl) {
8719                 rte_flow_error_set(error, ENOMEM,
8720                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8721                                           NULL,
8722                                           "fail to create normal path table "
8723                                           "for sample");
8724                 goto error;
8725         }
8726         cache_resource->normal_path_tbl = tbl;
8727         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8728                 cache_resource->default_miss =
8729                                 mlx5_glue->dr_create_flow_action_default_miss();
8730                 if (!cache_resource->default_miss) {
8731                         rte_flow_error_set(error, ENOMEM,
8732                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8733                                                 NULL,
8734                                                 "cannot create default miss "
8735                                                 "action");
8736                         goto error;
8737                 }
8738                 sample_dv_actions[resource->sample_act.actions_num++] =
8739                                                 cache_resource->default_miss;
8740         }
8741         /* Create a DR sample action */
8742         sampler_attr.sample_ratio = cache_resource->ratio;
8743         sampler_attr.default_next_table = tbl->obj;
8744         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8745         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8746                                                         &sample_dv_actions[0];
8747         sampler_attr.action = cache_resource->set_action;
8748         cache_resource->verbs_action =
8749                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8750         if (!cache_resource->verbs_action) {
8751                 rte_flow_error_set(error, ENOMEM,
8752                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8753                                         NULL, "cannot create sample action");
8754                 goto error;
8755         }
8756         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8757         ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list,
8758                      dev_flow->handle->dvh.rix_sample, cache_resource,
8759                      next);
8760         dev_flow->dv.sample_res = cache_resource;
8761         DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++",
8762                 (void *)cache_resource,
8763                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8764         return 0;
8765 error:
8766         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8767             cache_resource->default_miss)
8768                 claim_zero(mlx5_glue->destroy_flow_action
8769                                 (cache_resource->default_miss));
8770         else
8771                 flow_dv_sample_sub_actions_release(dev,
8772                                                    &cache_resource->sample_idx);
8773         if (cache_resource->normal_path_tbl)
8774                 flow_dv_tbl_resource_release(MLX5_SH(dev),
8775                                 cache_resource->normal_path_tbl);
8776         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],
8777                                 dev_flow->handle->dvh.rix_sample);
8778         dev_flow->handle->dvh.rix_sample = 0;
8779         return -rte_errno;
8780 }
8781
8782 /**
8783  * Find existing destination array resource or create and register a new one.
8784  *
8785  * @param[in, out] dev
8786  *   Pointer to rte_eth_dev structure.
8787  * @param[in] resource
8788  *   Pointer to destination array resource.
8789  * @parm[in, out] dev_flow
8790  *   Pointer to the dev_flow.
8791  * @param[out] error
8792  *   pointer to error structure.
8793  *
8794  * @return
8795  *   0 on success otherwise -errno and errno is set.
8796  */
8797 static int
8798 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8799                          struct mlx5_flow_dv_dest_array_resource *resource,
8800                          struct mlx5_flow *dev_flow,
8801                          struct rte_flow_error *error)
8802 {
8803         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8804         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8805         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8806         struct mlx5_priv *priv = dev->data->dev_private;
8807         struct mlx5_dev_ctx_shared *sh = priv->sh;
8808         struct mlx5_flow_sub_actions_list *sample_act;
8809         struct mlx5dv_dr_domain *domain;
8810         uint32_t idx = 0;
8811
8812         /* Lookup a matching resource from cache. */
8813         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8814                       sh->dest_array_list,
8815                       idx, cache_resource, next) {
8816                 if (resource->num_of_dest == cache_resource->num_of_dest &&
8817                     resource->ft_type == cache_resource->ft_type &&
8818                     !memcmp((void *)cache_resource->sample_act,
8819                             (void *)resource->sample_act,
8820                            (resource->num_of_dest *
8821                            sizeof(struct mlx5_flow_sub_actions_list)))) {
8822                         DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
8823                                 (void *)cache_resource,
8824                                 __atomic_load_n(&cache_resource->refcnt,
8825                                                 __ATOMIC_RELAXED));
8826                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8827                                            __ATOMIC_RELAXED);
8828                         dev_flow->handle->dvh.rix_dest_array = idx;
8829                         dev_flow->dv.dest_array_res = cache_resource;
8830                         /*
8831                          * Existing sample action should release the prepared
8832                          * sub-actions reference counter.
8833                          */
8834                         for (idx = 0; idx < resource->num_of_dest; idx++)
8835                                 flow_dv_sample_sub_actions_release(dev,
8836                                                 &resource->sample_idx[idx]);
8837                         return 0;
8838                 }
8839         }
8840         /* Register new destination array resource. */
8841         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8842                                        &dev_flow->handle->dvh.rix_dest_array);
8843         if (!cache_resource)
8844                 return rte_flow_error_set(error, ENOMEM,
8845                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8846                                           NULL,
8847                                           "cannot allocate resource memory");
8848         *cache_resource = *resource;
8849         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8850                 domain = sh->fdb_domain;
8851         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8852                 domain = sh->rx_domain;
8853         else
8854                 domain = sh->tx_domain;
8855         for (idx = 0; idx < resource->num_of_dest; idx++) {
8856                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8857                                  mlx5_malloc(MLX5_MEM_ZERO,
8858                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8859                                  0, SOCKET_ID_ANY);
8860                 if (!dest_attr[idx]) {
8861                         rte_flow_error_set(error, ENOMEM,
8862                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8863                                            NULL,
8864                                            "cannot allocate resource memory");
8865                         goto error;
8866                 }
8867                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8868                 sample_act = &resource->sample_act[idx];
8869                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8870                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8871                 } else if (sample_act->action_flags ==
8872                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8873                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8874                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8875                         dest_attr[idx]->dest_reformat->reformat =
8876                                         sample_act->dr_encap_action;
8877                         dest_attr[idx]->dest_reformat->dest =
8878                                         sample_act->dr_port_id_action;
8879                 } else if (sample_act->action_flags ==
8880                            MLX5_FLOW_ACTION_PORT_ID) {
8881                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8882                 }
8883         }
8884         /* create a dest array actioin */
8885         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8886                                                 (domain,
8887                                                  cache_resource->num_of_dest,
8888                                                  dest_attr);
8889         if (!cache_resource->action) {
8890                 rte_flow_error_set(error, ENOMEM,
8891                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8892                                    NULL,
8893                                    "cannot create destination array action");
8894                 goto error;
8895         }
8896         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8897         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8898                      &sh->dest_array_list,
8899                      dev_flow->handle->dvh.rix_dest_array, cache_resource,
8900                      next);
8901         dev_flow->dv.dest_array_res = cache_resource;
8902         DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
8903                 (void *)cache_resource,
8904                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8905         for (idx = 0; idx < resource->num_of_dest; idx++)
8906                 mlx5_free(dest_attr[idx]);
8907         return 0;
8908 error:
8909         for (idx = 0; idx < resource->num_of_dest; idx++) {
8910                 struct mlx5_flow_sub_actions_idx *act_res =
8911                                         &cache_resource->sample_idx[idx];
8912                 if (act_res->rix_hrxq &&
8913                     !mlx5_hrxq_release(dev,
8914                                 act_res->rix_hrxq))
8915                         act_res->rix_hrxq = 0;
8916                 if (act_res->rix_encap_decap &&
8917                         !flow_dv_encap_decap_resource_release(dev,
8918                                 act_res->rix_encap_decap))
8919                         act_res->rix_encap_decap = 0;
8920                 if (act_res->rix_port_id_action &&
8921                         !flow_dv_port_id_action_resource_release(dev,
8922                                 act_res->rix_port_id_action))
8923                         act_res->rix_port_id_action = 0;
8924                 if (dest_attr[idx])
8925                         mlx5_free(dest_attr[idx]);
8926         }
8927
8928         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8929                                 dev_flow->handle->dvh.rix_dest_array);
8930         dev_flow->handle->dvh.rix_dest_array = 0;
8931         return -rte_errno;
8932 }
8933
8934 /**
8935  * Convert Sample action to DV specification.
8936  *
8937  * @param[in] dev
8938  *   Pointer to rte_eth_dev structure.
8939  * @param[in] action
8940  *   Pointer to action structure.
8941  * @param[in, out] dev_flow
8942  *   Pointer to the mlx5_flow.
8943  * @param[in] attr
8944  *   Pointer to the flow attributes.
8945  * @param[in, out] num_of_dest
8946  *   Pointer to the num of destination.
8947  * @param[in, out] sample_actions
8948  *   Pointer to sample actions list.
8949  * @param[in, out] res
8950  *   Pointer to sample resource.
8951  * @param[out] error
8952  *   Pointer to the error structure.
8953  *
8954  * @return
8955  *   0 on success, a negative errno value otherwise and rte_errno is set.
8956  */
8957 static int
8958 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8959                                 const struct rte_flow_action *action,
8960                                 struct mlx5_flow *dev_flow,
8961                                 const struct rte_flow_attr *attr,
8962                                 uint32_t *num_of_dest,
8963                                 void **sample_actions,
8964                                 struct mlx5_flow_dv_sample_resource *res,
8965                                 struct rte_flow_error *error)
8966 {
8967         struct mlx5_priv *priv = dev->data->dev_private;
8968         const struct rte_flow_action_sample *sample_action;
8969         const struct rte_flow_action *sub_actions;
8970         const struct rte_flow_action_queue *queue;
8971         struct mlx5_flow_sub_actions_list *sample_act;
8972         struct mlx5_flow_sub_actions_idx *sample_idx;
8973         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8974         struct mlx5_flow_rss_desc *rss_desc;
8975         uint64_t action_flags = 0;
8976
8977         MLX5_ASSERT(wks);
8978         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
8979         sample_act = &res->sample_act;
8980         sample_idx = &res->sample_idx;
8981         sample_action = (const struct rte_flow_action_sample *)action->conf;
8982         res->ratio = sample_action->ratio;
8983         sub_actions = sample_action->actions;
8984         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
8985                 int type = sub_actions->type;
8986                 uint32_t pre_rix = 0;
8987                 void *pre_r;
8988                 switch (type) {
8989                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8990                 {
8991                         struct mlx5_hrxq *hrxq;
8992                         uint32_t hrxq_idx;
8993
8994                         queue = sub_actions->conf;
8995                         rss_desc->queue_num = 1;
8996                         rss_desc->queue[0] = queue->index;
8997                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
8998                                                     rss_desc, &hrxq_idx);
8999                         if (!hrxq)
9000                                 return rte_flow_error_set
9001                                         (error, rte_errno,
9002                                          RTE_FLOW_ERROR_TYPE_ACTION,
9003                                          NULL,
9004                                          "cannot create fate queue");
9005                         sample_act->dr_queue_action = hrxq->action;
9006                         sample_idx->rix_hrxq = hrxq_idx;
9007                         sample_actions[sample_act->actions_num++] =
9008                                                 hrxq->action;
9009                         (*num_of_dest)++;
9010                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9011                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9012                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9013                         dev_flow->handle->fate_action =
9014                                         MLX5_FLOW_FATE_QUEUE;
9015                         break;
9016                 }
9017                 case RTE_FLOW_ACTION_TYPE_MARK:
9018                 {
9019                         uint32_t tag_be = mlx5_flow_mark_set
9020                                 (((const struct rte_flow_action_mark *)
9021                                 (sub_actions->conf))->id);
9022
9023                         dev_flow->handle->mark = 1;
9024                         pre_rix = dev_flow->handle->dvh.rix_tag;
9025                         /* Save the mark resource before sample */
9026                         pre_r = dev_flow->dv.tag_resource;
9027                         if (flow_dv_tag_resource_register(dev, tag_be,
9028                                                   dev_flow, error))
9029                                 return -rte_errno;
9030                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9031                         sample_act->dr_tag_action =
9032                                 dev_flow->dv.tag_resource->action;
9033                         sample_idx->rix_tag =
9034                                 dev_flow->handle->dvh.rix_tag;
9035                         sample_actions[sample_act->actions_num++] =
9036                                                 sample_act->dr_tag_action;
9037                         /* Recover the mark resource after sample */
9038                         dev_flow->dv.tag_resource = pre_r;
9039                         dev_flow->handle->dvh.rix_tag = pre_rix;
9040                         action_flags |= MLX5_FLOW_ACTION_MARK;
9041                         break;
9042                 }
9043                 case RTE_FLOW_ACTION_TYPE_COUNT:
9044                 {
9045                         uint32_t counter;
9046
9047                         counter = flow_dv_translate_create_counter(dev,
9048                                         dev_flow, sub_actions->conf, 0);
9049                         if (!counter)
9050                                 return rte_flow_error_set
9051                                                 (error, rte_errno,
9052                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9053                                                  NULL,
9054                                                  "cannot create counter"
9055                                                  " object.");
9056                         sample_idx->cnt = counter;
9057                         sample_act->dr_cnt_action =
9058                                   (flow_dv_counter_get_by_idx(dev,
9059                                   counter, NULL))->action;
9060                         sample_actions[sample_act->actions_num++] =
9061                                                 sample_act->dr_cnt_action;
9062                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9063                         break;
9064                 }
9065                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9066                 {
9067                         struct mlx5_flow_dv_port_id_action_resource
9068                                         port_id_resource;
9069                         uint32_t port_id = 0;
9070
9071                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9072                         /* Save the port id resource before sample */
9073                         pre_rix = dev_flow->handle->rix_port_id_action;
9074                         pre_r = dev_flow->dv.port_id_action;
9075                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9076                                                              &port_id, error))
9077                                 return -rte_errno;
9078                         port_id_resource.port_id = port_id;
9079                         if (flow_dv_port_id_action_resource_register
9080                             (dev, &port_id_resource, dev_flow, error))
9081                                 return -rte_errno;
9082                         sample_act->dr_port_id_action =
9083                                 dev_flow->dv.port_id_action->action;
9084                         sample_idx->rix_port_id_action =
9085                                 dev_flow->handle->rix_port_id_action;
9086                         sample_actions[sample_act->actions_num++] =
9087                                                 sample_act->dr_port_id_action;
9088                         /* Recover the port id resource after sample */
9089                         dev_flow->dv.port_id_action = pre_r;
9090                         dev_flow->handle->rix_port_id_action = pre_rix;
9091                         (*num_of_dest)++;
9092                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9093                         break;
9094                 }
9095                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9096                         /* Save the encap resource before sample */
9097                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9098                         pre_r = dev_flow->dv.encap_decap;
9099                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9100                                                            dev_flow,
9101                                                            attr->transfer,
9102                                                            error))
9103                                 return -rte_errno;
9104                         sample_act->dr_encap_action =
9105                                 dev_flow->dv.encap_decap->action;
9106                         sample_idx->rix_encap_decap =
9107                                 dev_flow->handle->dvh.rix_encap_decap;
9108                         sample_actions[sample_act->actions_num++] =
9109                                                 sample_act->dr_encap_action;
9110                         /* Recover the encap resource after sample */
9111                         dev_flow->dv.encap_decap = pre_r;
9112                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9113                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9114                         break;
9115                 default:
9116                         return rte_flow_error_set(error, EINVAL,
9117                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9118                                 NULL,
9119                                 "Not support for sampler action");
9120                 }
9121         }
9122         sample_act->action_flags = action_flags;
9123         res->ft_id = dev_flow->dv.group;
9124         if (attr->transfer) {
9125                 union {
9126                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9127                         uint64_t set_action;
9128                 } action_ctx = { .set_action = 0 };
9129
9130                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9131                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9132                          MLX5_MODIFICATION_TYPE_SET);
9133                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9134                          MLX5_MODI_META_REG_C_0);
9135                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9136                          priv->vport_meta_tag);
9137                 res->set_action = action_ctx.set_action;
9138         } else if (attr->ingress) {
9139                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9140         } else {
9141                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9142         }
9143         return 0;
9144 }
9145
9146 /**
9147  * Convert Sample action to DV specification.
9148  *
9149  * @param[in] dev
9150  *   Pointer to rte_eth_dev structure.
9151  * @param[in, out] dev_flow
9152  *   Pointer to the mlx5_flow.
9153  * @param[in] num_of_dest
9154  *   The num of destination.
9155  * @param[in, out] res
9156  *   Pointer to sample resource.
9157  * @param[in, out] mdest_res
9158  *   Pointer to destination array resource.
9159  * @param[in] sample_actions
9160  *   Pointer to sample path actions list.
9161  * @param[in] action_flags
9162  *   Holds the actions detected until now.
9163  * @param[out] error
9164  *   Pointer to the error structure.
9165  *
9166  * @return
9167  *   0 on success, a negative errno value otherwise and rte_errno is set.
9168  */
9169 static int
9170 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9171                              struct mlx5_flow *dev_flow,
9172                              uint32_t num_of_dest,
9173                              struct mlx5_flow_dv_sample_resource *res,
9174                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9175                              void **sample_actions,
9176                              uint64_t action_flags,
9177                              struct rte_flow_error *error)
9178 {
9179         /* update normal path action resource into last index of array */
9180         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9181         struct mlx5_flow_sub_actions_list *sample_act =
9182                                         &mdest_res->sample_act[dest_index];
9183         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9184         struct mlx5_flow_rss_desc *rss_desc;
9185         uint32_t normal_idx = 0;
9186         struct mlx5_hrxq *hrxq;
9187         uint32_t hrxq_idx;
9188
9189         MLX5_ASSERT(wks);
9190         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9191         if (num_of_dest > 1) {
9192                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9193                         /* Handle QP action for mirroring */
9194                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9195                                                     rss_desc, &hrxq_idx);
9196                         if (!hrxq)
9197                                 return rte_flow_error_set
9198                                      (error, rte_errno,
9199                                       RTE_FLOW_ERROR_TYPE_ACTION,
9200                                       NULL,
9201                                       "cannot create rx queue");
9202                         normal_idx++;
9203                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9204                         sample_act->dr_queue_action = hrxq->action;
9205                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9206                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9207                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9208                 }
9209                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9210                         normal_idx++;
9211                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9212                                 dev_flow->handle->dvh.rix_encap_decap;
9213                         sample_act->dr_encap_action =
9214                                 dev_flow->dv.encap_decap->action;
9215                 }
9216                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9217                         normal_idx++;
9218                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9219                                 dev_flow->handle->rix_port_id_action;
9220                         sample_act->dr_port_id_action =
9221                                 dev_flow->dv.port_id_action->action;
9222                 }
9223                 sample_act->actions_num = normal_idx;
9224                 /* update sample action resource into first index of array */
9225                 mdest_res->ft_type = res->ft_type;
9226                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9227                                 sizeof(struct mlx5_flow_sub_actions_idx));
9228                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9229                                 sizeof(struct mlx5_flow_sub_actions_list));
9230                 mdest_res->num_of_dest = num_of_dest;
9231                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9232                                                          dev_flow, error))
9233                         return rte_flow_error_set(error, EINVAL,
9234                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9235                                                   NULL, "can't create sample "
9236                                                   "action");
9237         } else {
9238                 if (flow_dv_sample_resource_register(dev, res, dev_flow,
9239                                                      sample_actions, error))
9240                         return rte_flow_error_set(error, EINVAL,
9241                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9242                                                   NULL,
9243                                                   "can't create sample action");
9244         }
9245         return 0;
9246 }
9247
9248 /**
9249  * Fill the flow with DV spec, lock free
9250  * (mutex should be acquired by caller).
9251  *
9252  * @param[in] dev
9253  *   Pointer to rte_eth_dev structure.
9254  * @param[in, out] dev_flow
9255  *   Pointer to the sub flow.
9256  * @param[in] attr
9257  *   Pointer to the flow attributes.
9258  * @param[in] items
9259  *   Pointer to the list of items.
9260  * @param[in] actions
9261  *   Pointer to the list of actions.
9262  * @param[out] error
9263  *   Pointer to the error structure.
9264  *
9265  * @return
9266  *   0 on success, a negative errno value otherwise and rte_errno is set.
9267  */
9268 static int
9269 __flow_dv_translate(struct rte_eth_dev *dev,
9270                     struct mlx5_flow *dev_flow,
9271                     const struct rte_flow_attr *attr,
9272                     const struct rte_flow_item items[],
9273                     const struct rte_flow_action actions[],
9274                     struct rte_flow_error *error)
9275 {
9276         struct mlx5_priv *priv = dev->data->dev_private;
9277         struct mlx5_dev_config *dev_conf = &priv->config;
9278         struct rte_flow *flow = dev_flow->flow;
9279         struct mlx5_flow_handle *handle = dev_flow->handle;
9280         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9281         struct mlx5_flow_rss_desc *rss_desc;
9282         uint64_t item_flags = 0;
9283         uint64_t last_item = 0;
9284         uint64_t action_flags = 0;
9285         uint64_t priority = attr->priority;
9286         struct mlx5_flow_dv_matcher matcher = {
9287                 .mask = {
9288                         .size = sizeof(matcher.mask.buf) -
9289                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9290                 },
9291         };
9292         int actions_n = 0;
9293         bool actions_end = false;
9294         union {
9295                 struct mlx5_flow_dv_modify_hdr_resource res;
9296                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9297                             sizeof(struct mlx5_modification_cmd) *
9298                             (MLX5_MAX_MODIFY_NUM + 1)];
9299         } mhdr_dummy;
9300         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9301         const struct rte_flow_action_count *count = NULL;
9302         const struct rte_flow_action_age *age = NULL;
9303         union flow_dv_attr flow_attr = { .attr = 0 };
9304         uint32_t tag_be;
9305         union mlx5_flow_tbl_key tbl_key;
9306         uint32_t modify_action_position = UINT32_MAX;
9307         void *match_mask = matcher.mask.buf;
9308         void *match_value = dev_flow->dv.value.buf;
9309         uint8_t next_protocol = 0xff;
9310         struct rte_vlan_hdr vlan = { 0 };
9311         struct mlx5_flow_dv_dest_array_resource mdest_res;
9312         struct mlx5_flow_dv_sample_resource sample_res;
9313         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9314         struct mlx5_flow_sub_actions_list *sample_act;
9315         uint32_t sample_act_pos = UINT32_MAX;
9316         uint32_t num_of_dest = 0;
9317         int tmp_actions_n = 0;
9318         uint32_t table;
9319         int ret = 0;
9320         const struct mlx5_flow_tunnel *tunnel;
9321         struct flow_grp_info grp_info = {
9322                 .external = !!dev_flow->external,
9323                 .transfer = !!attr->transfer,
9324                 .fdb_def_rule = !!priv->fdb_def_rule,
9325         };
9326
9327         MLX5_ASSERT(wks);
9328         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9329         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9330         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9331         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9332                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9333         /* update normal path action resource into last index of array */
9334         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9335         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9336                  flow_items_to_tunnel(items) :
9337                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9338                  flow_actions_to_tunnel(actions) :
9339                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9340         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9341                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9342         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9343                                 (dev, tunnel, attr, items, actions);
9344         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9345                                        grp_info, error);
9346         if (ret)
9347                 return ret;
9348         dev_flow->dv.group = table;
9349         if (attr->transfer)
9350                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9351         if (priority == MLX5_FLOW_PRIO_RSVD)
9352                 priority = dev_conf->flow_prio - 1;
9353         /* number of actions must be set to 0 in case of dirty stack. */
9354         mhdr_res->actions_num = 0;
9355         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9356                 /*
9357                  * do not add decap action if match rule drops packet
9358                  * HW rejects rules with decap & drop
9359                  */
9360                 bool add_decap = true;
9361                 const struct rte_flow_action *ptr = actions;
9362                 struct mlx5_flow_tbl_resource *tbl;
9363
9364                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9365                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9366                                 add_decap = false;
9367                                 break;
9368                         }
9369                 }
9370                 if (add_decap) {
9371                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9372                                                            attr->transfer,
9373                                                            error))
9374                                 return -rte_errno;
9375                         dev_flow->dv.actions[actions_n++] =
9376                                         dev_flow->dv.encap_decap->action;
9377                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9378                 }
9379                 /*
9380                  * bind table_id with <group, table> for tunnel match rule.
9381                  * Tunnel set rule establishes that bind in JUMP action handler.
9382                  * Required for scenario when application creates tunnel match
9383                  * rule before tunnel set rule.
9384                  */
9385                 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9386                                                attr->transfer,
9387                                                !!dev_flow->external, tunnel,
9388                                                attr->group, 0, error);
9389                 if (!tbl)
9390                         return rte_flow_error_set
9391                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9392                                actions, "cannot register tunnel group");
9393         }
9394         for (; !actions_end ; actions++) {
9395                 const struct rte_flow_action_queue *queue;
9396                 const struct rte_flow_action_rss *rss;
9397                 const struct rte_flow_action *action = actions;
9398                 const uint8_t *rss_key;
9399                 const struct rte_flow_action_meter *mtr;
9400                 struct mlx5_flow_tbl_resource *tbl;
9401                 uint32_t port_id = 0;
9402                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9403                 int action_type = actions->type;
9404                 const struct rte_flow_action *found_action = NULL;
9405                 struct mlx5_flow_meter *fm = NULL;
9406                 uint32_t jump_group = 0;
9407
9408                 if (!mlx5_flow_os_action_supported(action_type))
9409                         return rte_flow_error_set(error, ENOTSUP,
9410                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9411                                                   actions,
9412                                                   "action not supported");
9413                 switch (action_type) {
9414                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9415                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9416                         break;
9417                 case RTE_FLOW_ACTION_TYPE_VOID:
9418                         break;
9419                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9420                         if (flow_dv_translate_action_port_id(dev, action,
9421                                                              &port_id, error))
9422                                 return -rte_errno;
9423                         port_id_resource.port_id = port_id;
9424                         MLX5_ASSERT(!handle->rix_port_id_action);
9425                         if (flow_dv_port_id_action_resource_register
9426                             (dev, &port_id_resource, dev_flow, error))
9427                                 return -rte_errno;
9428                         dev_flow->dv.actions[actions_n++] =
9429                                         dev_flow->dv.port_id_action->action;
9430                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9431                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9432                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9433                         num_of_dest++;
9434                         break;
9435                 case RTE_FLOW_ACTION_TYPE_FLAG:
9436                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9437                         dev_flow->handle->mark = 1;
9438                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9439                                 struct rte_flow_action_mark mark = {
9440                                         .id = MLX5_FLOW_MARK_DEFAULT,
9441                                 };
9442
9443                                 if (flow_dv_convert_action_mark(dev, &mark,
9444                                                                 mhdr_res,
9445                                                                 error))
9446                                         return -rte_errno;
9447                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9448                                 break;
9449                         }
9450                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9451                         /*
9452                          * Only one FLAG or MARK is supported per device flow
9453                          * right now. So the pointer to the tag resource must be
9454                          * zero before the register process.
9455                          */
9456                         MLX5_ASSERT(!handle->dvh.rix_tag);
9457                         if (flow_dv_tag_resource_register(dev, tag_be,
9458                                                           dev_flow, error))
9459                                 return -rte_errno;
9460                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9461                         dev_flow->dv.actions[actions_n++] =
9462                                         dev_flow->dv.tag_resource->action;
9463                         break;
9464                 case RTE_FLOW_ACTION_TYPE_MARK:
9465                         action_flags |= MLX5_FLOW_ACTION_MARK;
9466                         dev_flow->handle->mark = 1;
9467                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9468                                 const struct rte_flow_action_mark *mark =
9469                                         (const struct rte_flow_action_mark *)
9470                                                 actions->conf;
9471
9472                                 if (flow_dv_convert_action_mark(dev, mark,
9473                                                                 mhdr_res,
9474                                                                 error))
9475                                         return -rte_errno;
9476                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9477                                 break;
9478                         }
9479                         /* Fall-through */
9480                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9481                         /* Legacy (non-extensive) MARK action. */
9482                         tag_be = mlx5_flow_mark_set
9483                               (((const struct rte_flow_action_mark *)
9484                                (actions->conf))->id);
9485                         MLX5_ASSERT(!handle->dvh.rix_tag);
9486                         if (flow_dv_tag_resource_register(dev, tag_be,
9487                                                           dev_flow, error))
9488                                 return -rte_errno;
9489                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9490                         dev_flow->dv.actions[actions_n++] =
9491                                         dev_flow->dv.tag_resource->action;
9492                         break;
9493                 case RTE_FLOW_ACTION_TYPE_SET_META:
9494                         if (flow_dv_convert_action_set_meta
9495                                 (dev, mhdr_res, attr,
9496                                  (const struct rte_flow_action_set_meta *)
9497                                   actions->conf, error))
9498                                 return -rte_errno;
9499                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9500                         break;
9501                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9502                         if (flow_dv_convert_action_set_tag
9503                                 (dev, mhdr_res,
9504                                  (const struct rte_flow_action_set_tag *)
9505                                   actions->conf, error))
9506                                 return -rte_errno;
9507                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9508                         break;
9509                 case RTE_FLOW_ACTION_TYPE_DROP:
9510                         action_flags |= MLX5_FLOW_ACTION_DROP;
9511                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9512                         break;
9513                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9514                         queue = actions->conf;
9515                         rss_desc->queue_num = 1;
9516                         rss_desc->queue[0] = queue->index;
9517                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9518                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9519                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9520                         num_of_dest++;
9521                         break;
9522                 case RTE_FLOW_ACTION_TYPE_RSS:
9523                         rss = actions->conf;
9524                         memcpy(rss_desc->queue, rss->queue,
9525                                rss->queue_num * sizeof(uint16_t));
9526                         rss_desc->queue_num = rss->queue_num;
9527                         /* NULL RSS key indicates default RSS key. */
9528                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9529                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9530                         /*
9531                          * rss->level and rss.types should be set in advance
9532                          * when expanding items for RSS.
9533                          */
9534                         action_flags |= MLX5_FLOW_ACTION_RSS;
9535                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9536                         break;
9537                 case RTE_FLOW_ACTION_TYPE_AGE:
9538                 case RTE_FLOW_ACTION_TYPE_COUNT:
9539                         if (!dev_conf->devx) {
9540                                 return rte_flow_error_set
9541                                               (error, ENOTSUP,
9542                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9543                                                NULL,
9544                                                "count action not supported");
9545                         }
9546                         /* Save information first, will apply later. */
9547                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9548                                 count = action->conf;
9549                         else
9550                                 age = action->conf;
9551                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9552                         break;
9553                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9554                         dev_flow->dv.actions[actions_n++] =
9555                                                 priv->sh->pop_vlan_action;
9556                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9557                         break;
9558                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9559                         if (!(action_flags &
9560                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9561                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9562                         vlan.eth_proto = rte_be_to_cpu_16
9563                              ((((const struct rte_flow_action_of_push_vlan *)
9564                                                    actions->conf)->ethertype));
9565                         found_action = mlx5_flow_find_action
9566                                         (actions + 1,
9567                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9568                         if (found_action)
9569                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9570                         found_action = mlx5_flow_find_action
9571                                         (actions + 1,
9572                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9573                         if (found_action)
9574                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9575                         if (flow_dv_create_action_push_vlan
9576                                             (dev, attr, &vlan, dev_flow, error))
9577                                 return -rte_errno;
9578                         dev_flow->dv.actions[actions_n++] =
9579                                         dev_flow->dv.push_vlan_res->action;
9580                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9581                         break;
9582                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9583                         /* of_vlan_push action handled this action */
9584                         MLX5_ASSERT(action_flags &
9585                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9586                         break;
9587                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9588                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9589                                 break;
9590                         flow_dev_get_vlan_info_from_items(items, &vlan);
9591                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9592                         /* If no VLAN push - this is a modify header action */
9593                         if (flow_dv_convert_action_modify_vlan_vid
9594                                                 (mhdr_res, actions, error))
9595                                 return -rte_errno;
9596                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9597                         break;
9598                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9599                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9600                         if (flow_dv_create_action_l2_encap(dev, actions,
9601                                                            dev_flow,
9602                                                            attr->transfer,
9603                                                            error))
9604                                 return -rte_errno;
9605                         dev_flow->dv.actions[actions_n++] =
9606                                         dev_flow->dv.encap_decap->action;
9607                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9608                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9609                                 sample_act->action_flags |=
9610                                                         MLX5_FLOW_ACTION_ENCAP;
9611                         break;
9612                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9613                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9614                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9615                                                            attr->transfer,
9616                                                            error))
9617                                 return -rte_errno;
9618                         dev_flow->dv.actions[actions_n++] =
9619                                         dev_flow->dv.encap_decap->action;
9620                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9621                         break;
9622                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9623                         /* Handle encap with preceding decap. */
9624                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9625                                 if (flow_dv_create_action_raw_encap
9626                                         (dev, actions, dev_flow, attr, error))
9627                                         return -rte_errno;
9628                                 dev_flow->dv.actions[actions_n++] =
9629                                         dev_flow->dv.encap_decap->action;
9630                         } else {
9631                                 /* Handle encap without preceding decap. */
9632                                 if (flow_dv_create_action_l2_encap
9633                                     (dev, actions, dev_flow, attr->transfer,
9634                                      error))
9635                                         return -rte_errno;
9636                                 dev_flow->dv.actions[actions_n++] =
9637                                         dev_flow->dv.encap_decap->action;
9638                         }
9639                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9640                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9641                                 sample_act->action_flags |=
9642                                                         MLX5_FLOW_ACTION_ENCAP;
9643                         break;
9644                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9645                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9646                                 ;
9647                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9648                                 if (flow_dv_create_action_l2_decap
9649                                     (dev, dev_flow, attr->transfer, error))
9650                                         return -rte_errno;
9651                                 dev_flow->dv.actions[actions_n++] =
9652                                         dev_flow->dv.encap_decap->action;
9653                         }
9654                         /* If decap is followed by encap, handle it at encap. */
9655                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9656                         break;
9657                 case RTE_FLOW_ACTION_TYPE_JUMP:
9658                         jump_group = ((const struct rte_flow_action_jump *)
9659                                                         action->conf)->group;
9660                         grp_info.std_tbl_fix = 0;
9661                         ret = mlx5_flow_group_to_table(dev, tunnel,
9662                                                        jump_group,
9663                                                        &table,
9664                                                        grp_info, error);
9665                         if (ret)
9666                                 return ret;
9667                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9668                                                        attr->transfer,
9669                                                        !!dev_flow->external,
9670                                                        tunnel, jump_group, 0,
9671                                                        error);
9672                         if (!tbl)
9673                                 return rte_flow_error_set
9674                                                 (error, errno,
9675                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9676                                                  NULL,
9677                                                  "cannot create jump action.");
9678                         if (flow_dv_jump_tbl_resource_register
9679                             (dev, tbl, dev_flow, error)) {
9680                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9681                                 return rte_flow_error_set
9682                                                 (error, errno,
9683                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9684                                                  NULL,
9685                                                  "cannot create jump action.");
9686                         }
9687                         dev_flow->dv.actions[actions_n++] =
9688                                         dev_flow->dv.jump->action;
9689                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9690                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9691                         break;
9692                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9693                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9694                         if (flow_dv_convert_action_modify_mac
9695                                         (mhdr_res, actions, error))
9696                                 return -rte_errno;
9697                         action_flags |= actions->type ==
9698                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9699                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
9700                                         MLX5_FLOW_ACTION_SET_MAC_DST;
9701                         break;
9702                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9703                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9704                         if (flow_dv_convert_action_modify_ipv4
9705                                         (mhdr_res, actions, error))
9706                                 return -rte_errno;
9707                         action_flags |= actions->type ==
9708                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9709                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
9710                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
9711                         break;
9712                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9713                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9714                         if (flow_dv_convert_action_modify_ipv6
9715                                         (mhdr_res, actions, error))
9716                                 return -rte_errno;
9717                         action_flags |= actions->type ==
9718                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9719                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
9720                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
9721                         break;
9722                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9723                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9724                         if (flow_dv_convert_action_modify_tp
9725                                         (mhdr_res, actions, items,
9726                                          &flow_attr, dev_flow, !!(action_flags &
9727                                          MLX5_FLOW_ACTION_DECAP), error))
9728                                 return -rte_errno;
9729                         action_flags |= actions->type ==
9730                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9731                                         MLX5_FLOW_ACTION_SET_TP_SRC :
9732                                         MLX5_FLOW_ACTION_SET_TP_DST;
9733                         break;
9734                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9735                         if (flow_dv_convert_action_modify_dec_ttl
9736                                         (mhdr_res, items, &flow_attr, dev_flow,
9737                                          !!(action_flags &
9738                                          MLX5_FLOW_ACTION_DECAP), error))
9739                                 return -rte_errno;
9740                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9741                         break;
9742                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9743                         if (flow_dv_convert_action_modify_ttl
9744                                         (mhdr_res, actions, items, &flow_attr,
9745                                          dev_flow, !!(action_flags &
9746                                          MLX5_FLOW_ACTION_DECAP), error))
9747                                 return -rte_errno;
9748                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9749                         break;
9750                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9751                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9752                         if (flow_dv_convert_action_modify_tcp_seq
9753                                         (mhdr_res, actions, error))
9754                                 return -rte_errno;
9755                         action_flags |= actions->type ==
9756                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9757                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
9758                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9759                         break;
9760
9761                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9762                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9763                         if (flow_dv_convert_action_modify_tcp_ack
9764                                         (mhdr_res, actions, error))
9765                                 return -rte_errno;
9766                         action_flags |= actions->type ==
9767                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9768                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
9769                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
9770                         break;
9771                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9772                         if (flow_dv_convert_action_set_reg
9773                                         (mhdr_res, actions, error))
9774                                 return -rte_errno;
9775                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9776                         break;
9777                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9778                         if (flow_dv_convert_action_copy_mreg
9779                                         (dev, mhdr_res, actions, error))
9780                                 return -rte_errno;
9781                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9782                         break;
9783                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9784                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9785                         dev_flow->handle->fate_action =
9786                                         MLX5_FLOW_FATE_DEFAULT_MISS;
9787                         break;
9788                 case RTE_FLOW_ACTION_TYPE_METER:
9789                         mtr = actions->conf;
9790                         if (!flow->meter) {
9791                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9792                                                             attr, error);
9793                                 if (!fm)
9794                                         return rte_flow_error_set(error,
9795                                                 rte_errno,
9796                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9797                                                 NULL,
9798                                                 "meter not found "
9799                                                 "or invalid parameters");
9800                                 flow->meter = fm->idx;
9801                         }
9802                         /* Set the meter action. */
9803                         if (!fm) {
9804                                 fm = mlx5_ipool_get(priv->sh->ipool
9805                                                 [MLX5_IPOOL_MTR], flow->meter);
9806                                 if (!fm)
9807                                         return rte_flow_error_set(error,
9808                                                 rte_errno,
9809                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9810                                                 NULL,
9811                                                 "meter not found "
9812                                                 "or invalid parameters");
9813                         }
9814                         dev_flow->dv.actions[actions_n++] =
9815                                 fm->mfts->meter_action;
9816                         action_flags |= MLX5_FLOW_ACTION_METER;
9817                         break;
9818                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9819                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9820                                                               actions, error))
9821                                 return -rte_errno;
9822                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9823                         break;
9824                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9825                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9826                                                               actions, error))
9827                                 return -rte_errno;
9828                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9829                         break;
9830                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9831                         sample_act_pos = actions_n;
9832                         ret = flow_dv_translate_action_sample(dev,
9833                                                               actions,
9834                                                               dev_flow, attr,
9835                                                               &num_of_dest,
9836                                                               sample_actions,
9837                                                               &sample_res,
9838                                                               error);
9839                         if (ret < 0)
9840                                 return ret;
9841                         actions_n++;
9842                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9843                         /* put encap action into group if work with port id */
9844                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9845                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9846                                 sample_act->action_flags |=
9847                                                         MLX5_FLOW_ACTION_ENCAP;
9848                         break;
9849                 case RTE_FLOW_ACTION_TYPE_END:
9850                         actions_end = true;
9851                         if (mhdr_res->actions_num) {
9852                                 /* create modify action if needed. */
9853                                 if (flow_dv_modify_hdr_resource_register
9854                                         (dev, mhdr_res, dev_flow, error))
9855                                         return -rte_errno;
9856                                 dev_flow->dv.actions[modify_action_position] =
9857                                         handle->dvh.modify_hdr->action;
9858                         }
9859                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9860                                 flow->counter =
9861                                         flow_dv_translate_create_counter(dev,
9862                                                 dev_flow, count, age);
9863
9864                                 if (!flow->counter)
9865                                         return rte_flow_error_set
9866                                                 (error, rte_errno,
9867                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9868                                                 NULL,
9869                                                 "cannot create counter"
9870                                                 " object.");
9871                                 dev_flow->dv.actions[actions_n] =
9872                                           (flow_dv_counter_get_by_idx(dev,
9873                                           flow->counter, NULL))->action;
9874                                 actions_n++;
9875                         }
9876                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9877                                 ret = flow_dv_create_action_sample(dev,
9878                                                           dev_flow,
9879                                                           num_of_dest,
9880                                                           &sample_res,
9881                                                           &mdest_res,
9882                                                           sample_actions,
9883                                                           action_flags,
9884                                                           error);
9885                                 if (ret < 0)
9886                                         return rte_flow_error_set
9887                                                 (error, rte_errno,
9888                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9889                                                 NULL,
9890                                                 "cannot create sample action");
9891                                 if (num_of_dest > 1) {
9892                                         dev_flow->dv.actions[sample_act_pos] =
9893                                         dev_flow->dv.dest_array_res->action;
9894                                 } else {
9895                                         dev_flow->dv.actions[sample_act_pos] =
9896                                         dev_flow->dv.sample_res->verbs_action;
9897                                 }
9898                         }
9899                         break;
9900                 default:
9901                         break;
9902                 }
9903                 if (mhdr_res->actions_num &&
9904                     modify_action_position == UINT32_MAX)
9905                         modify_action_position = actions_n++;
9906         }
9907         /*
9908          * For multiple destination (sample action with ratio=1), the encap
9909          * action and port id action will be combined into group action.
9910          * So need remove the original these actions in the flow and only
9911          * use the sample action instead of.
9912          */
9913         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9914                 int i;
9915                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9916
9917                 for (i = 0; i < actions_n; i++) {
9918                         if ((sample_act->dr_encap_action &&
9919                                 sample_act->dr_encap_action ==
9920                                 dev_flow->dv.actions[i]) ||
9921                                 (sample_act->dr_port_id_action &&
9922                                 sample_act->dr_port_id_action ==
9923                                 dev_flow->dv.actions[i]))
9924                                 continue;
9925                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9926                 }
9927                 memcpy((void *)dev_flow->dv.actions,
9928                                 (void *)temp_actions,
9929                                 tmp_actions_n * sizeof(void *));
9930                 actions_n = tmp_actions_n;
9931         }
9932         dev_flow->dv.actions_n = actions_n;
9933         dev_flow->act_flags = action_flags;
9934         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9935                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9936                 int item_type = items->type;
9937
9938                 if (!mlx5_flow_os_item_supported(item_type))
9939                         return rte_flow_error_set(error, ENOTSUP,
9940                                                   RTE_FLOW_ERROR_TYPE_ITEM,
9941                                                   NULL, "item not supported");
9942                 switch (item_type) {
9943                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9944                         flow_dv_translate_item_port_id(dev, match_mask,
9945                                                        match_value, items);
9946                         last_item = MLX5_FLOW_ITEM_PORT_ID;
9947                         break;
9948                 case RTE_FLOW_ITEM_TYPE_ETH:
9949                         flow_dv_translate_item_eth(match_mask, match_value,
9950                                                    items, tunnel,
9951                                                    dev_flow->dv.group);
9952                         matcher.priority = action_flags &
9953                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
9954                                         !dev_flow->external ?
9955                                         MLX5_PRIORITY_MAP_L3 :
9956                                         MLX5_PRIORITY_MAP_L2;
9957                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9958                                              MLX5_FLOW_LAYER_OUTER_L2;
9959                         break;
9960                 case RTE_FLOW_ITEM_TYPE_VLAN:
9961                         flow_dv_translate_item_vlan(dev_flow,
9962                                                     match_mask, match_value,
9963                                                     items, tunnel,
9964                                                     dev_flow->dv.group);
9965                         matcher.priority = MLX5_PRIORITY_MAP_L2;
9966                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
9967                                               MLX5_FLOW_LAYER_INNER_VLAN) :
9968                                              (MLX5_FLOW_LAYER_OUTER_L2 |
9969                                               MLX5_FLOW_LAYER_OUTER_VLAN);
9970                         break;
9971                 case RTE_FLOW_ITEM_TYPE_IPV4:
9972                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9973                                                   &item_flags, &tunnel);
9974                         flow_dv_translate_item_ipv4(match_mask, match_value,
9975                                                     items, tunnel,
9976                                                     dev_flow->dv.group);
9977                         matcher.priority = MLX5_PRIORITY_MAP_L3;
9978                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
9979                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
9980                         if (items->mask != NULL &&
9981                             ((const struct rte_flow_item_ipv4 *)
9982                              items->mask)->hdr.next_proto_id) {
9983                                 next_protocol =
9984                                         ((const struct rte_flow_item_ipv4 *)
9985                                          (items->spec))->hdr.next_proto_id;
9986                                 next_protocol &=
9987                                         ((const struct rte_flow_item_ipv4 *)
9988                                          (items->mask))->hdr.next_proto_id;
9989                         } else {
9990                                 /* Reset for inner layer. */
9991                                 next_protocol = 0xff;
9992                         }
9993                         break;
9994                 case RTE_FLOW_ITEM_TYPE_IPV6:
9995                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9996                                                   &item_flags, &tunnel);
9997                         flow_dv_translate_item_ipv6(match_mask, match_value,
9998                                                     items, tunnel,
9999                                                     dev_flow->dv.group);
10000                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10001                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10002                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10003                         if (items->mask != NULL &&
10004                             ((const struct rte_flow_item_ipv6 *)
10005                              items->mask)->hdr.proto) {
10006                                 next_protocol =
10007                                         ((const struct rte_flow_item_ipv6 *)
10008                                          items->spec)->hdr.proto;
10009                                 next_protocol &=
10010                                         ((const struct rte_flow_item_ipv6 *)
10011                                          items->mask)->hdr.proto;
10012                         } else {
10013                                 /* Reset for inner layer. */
10014                                 next_protocol = 0xff;
10015                         }
10016                         break;
10017                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10018                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10019                                                              match_value,
10020                                                              items, tunnel);
10021                         last_item = tunnel ?
10022                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10023                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10024                         if (items->mask != NULL &&
10025                             ((const struct rte_flow_item_ipv6_frag_ext *)
10026                              items->mask)->hdr.next_header) {
10027                                 next_protocol =
10028                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10029                                  items->spec)->hdr.next_header;
10030                                 next_protocol &=
10031                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10032                                  items->mask)->hdr.next_header;
10033                         } else {
10034                                 /* Reset for inner layer. */
10035                                 next_protocol = 0xff;
10036                         }
10037                         break;
10038                 case RTE_FLOW_ITEM_TYPE_TCP:
10039                         flow_dv_translate_item_tcp(match_mask, match_value,
10040                                                    items, tunnel);
10041                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10042                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10043                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10044                         break;
10045                 case RTE_FLOW_ITEM_TYPE_UDP:
10046                         flow_dv_translate_item_udp(match_mask, match_value,
10047                                                    items, tunnel);
10048                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10049                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10050                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10051                         break;
10052                 case RTE_FLOW_ITEM_TYPE_GRE:
10053                         flow_dv_translate_item_gre(match_mask, match_value,
10054                                                    items, tunnel);
10055                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10056                         last_item = MLX5_FLOW_LAYER_GRE;
10057                         break;
10058                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10059                         flow_dv_translate_item_gre_key(match_mask,
10060                                                        match_value, items);
10061                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10062                         break;
10063                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10064                         flow_dv_translate_item_nvgre(match_mask, match_value,
10065                                                      items, tunnel);
10066                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10067                         last_item = MLX5_FLOW_LAYER_GRE;
10068                         break;
10069                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10070                         flow_dv_translate_item_vxlan(match_mask, match_value,
10071                                                      items, tunnel);
10072                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10073                         last_item = MLX5_FLOW_LAYER_VXLAN;
10074                         break;
10075                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10076                         flow_dv_translate_item_vxlan_gpe(match_mask,
10077                                                          match_value, items,
10078                                                          tunnel);
10079                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10080                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10081                         break;
10082                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10083                         flow_dv_translate_item_geneve(match_mask, match_value,
10084                                                       items, tunnel);
10085                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10086                         last_item = MLX5_FLOW_LAYER_GENEVE;
10087                         break;
10088                 case RTE_FLOW_ITEM_TYPE_MPLS:
10089                         flow_dv_translate_item_mpls(match_mask, match_value,
10090                                                     items, last_item, tunnel);
10091                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10092                         last_item = MLX5_FLOW_LAYER_MPLS;
10093                         break;
10094                 case RTE_FLOW_ITEM_TYPE_MARK:
10095                         flow_dv_translate_item_mark(dev, match_mask,
10096                                                     match_value, items);
10097                         last_item = MLX5_FLOW_ITEM_MARK;
10098                         break;
10099                 case RTE_FLOW_ITEM_TYPE_META:
10100                         flow_dv_translate_item_meta(dev, match_mask,
10101                                                     match_value, attr, items);
10102                         last_item = MLX5_FLOW_ITEM_METADATA;
10103                         break;
10104                 case RTE_FLOW_ITEM_TYPE_ICMP:
10105                         flow_dv_translate_item_icmp(match_mask, match_value,
10106                                                     items, tunnel);
10107                         last_item = MLX5_FLOW_LAYER_ICMP;
10108                         break;
10109                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10110                         flow_dv_translate_item_icmp6(match_mask, match_value,
10111                                                       items, tunnel);
10112                         last_item = MLX5_FLOW_LAYER_ICMP6;
10113                         break;
10114                 case RTE_FLOW_ITEM_TYPE_TAG:
10115                         flow_dv_translate_item_tag(dev, match_mask,
10116                                                    match_value, items);
10117                         last_item = MLX5_FLOW_ITEM_TAG;
10118                         break;
10119                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10120                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10121                                                         match_value, items);
10122                         last_item = MLX5_FLOW_ITEM_TAG;
10123                         break;
10124                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10125                         flow_dv_translate_item_tx_queue(dev, match_mask,
10126                                                         match_value,
10127                                                         items);
10128                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10129                         break;
10130                 case RTE_FLOW_ITEM_TYPE_GTP:
10131                         flow_dv_translate_item_gtp(match_mask, match_value,
10132                                                    items, tunnel);
10133                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10134                         last_item = MLX5_FLOW_LAYER_GTP;
10135                         break;
10136                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10137                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10138                                 /* Create it only the first time to be used. */
10139                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10140                                 if (ret)
10141                                         return rte_flow_error_set
10142                                                 (error, -ret,
10143                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10144                                                 NULL,
10145                                                 "cannot create eCPRI parser");
10146                         }
10147                         /* Adjust the length matcher and device flow value. */
10148                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10149                         dev_flow->dv.value.size =
10150                                         MLX5_ST_SZ_BYTES(fte_match_param);
10151                         flow_dv_translate_item_ecpri(dev, match_mask,
10152                                                      match_value, items);
10153                         /* No other protocol should follow eCPRI layer. */
10154                         last_item = MLX5_FLOW_LAYER_ECPRI;
10155                         break;
10156                 default:
10157                         break;
10158                 }
10159                 item_flags |= last_item;
10160         }
10161         /*
10162          * When E-Switch mode is enabled, we have two cases where we need to
10163          * set the source port manually.
10164          * The first one, is in case of Nic steering rule, and the second is
10165          * E-Switch rule where no port_id item was found. In both cases
10166          * the source port is set according the current port in use.
10167          */
10168         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10169             (priv->representor || priv->master)) {
10170                 if (flow_dv_translate_item_port_id(dev, match_mask,
10171                                                    match_value, NULL))
10172                         return -rte_errno;
10173         }
10174 #ifdef RTE_LIBRTE_MLX5_DEBUG
10175         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10176                                               dev_flow->dv.value.buf));
10177 #endif
10178         /*
10179          * Layers may be already initialized from prefix flow if this dev_flow
10180          * is the suffix flow.
10181          */
10182         handle->layers |= item_flags;
10183         if (action_flags & MLX5_FLOW_ACTION_RSS)
10184                 flow_dv_hashfields_set(dev_flow, rss_desc);
10185         /* Register matcher. */
10186         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10187                                     matcher.mask.size);
10188         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10189                                                      matcher.priority);
10190         /* reserved field no needs to be set to 0 here. */
10191         tbl_key.domain = attr->transfer;
10192         tbl_key.direction = attr->egress;
10193         tbl_key.table_id = dev_flow->dv.group;
10194         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10195                 return -rte_errno;
10196         return 0;
10197 }
10198
10199 /**
10200  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10201  * and tunnel.
10202  *
10203  * @param[in, out] action
10204  *   Shred RSS action holding hash RX queue objects.
10205  * @param[in] hash_fields
10206  *   Defines combination of packet fields to participate in RX hash.
10207  * @param[in] tunnel
10208  *   Tunnel type
10209  * @param[in] hrxq_idx
10210  *   Hash RX queue index to set.
10211  *
10212  * @return
10213  *   0 on success, otherwise negative errno value.
10214  */
10215 static int
10216 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10217                               const uint64_t hash_fields,
10218                               const int tunnel,
10219                               uint32_t hrxq_idx)
10220 {
10221         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10222
10223         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10224         case MLX5_RSS_HASH_IPV4:
10225                 hrxqs[0] = hrxq_idx;
10226                 return 0;
10227         case MLX5_RSS_HASH_IPV4_TCP:
10228                 hrxqs[1] = hrxq_idx;
10229                 return 0;
10230         case MLX5_RSS_HASH_IPV4_UDP:
10231                 hrxqs[2] = hrxq_idx;
10232                 return 0;
10233         case MLX5_RSS_HASH_IPV6:
10234                 hrxqs[3] = hrxq_idx;
10235                 return 0;
10236         case MLX5_RSS_HASH_IPV6_TCP:
10237                 hrxqs[4] = hrxq_idx;
10238                 return 0;
10239         case MLX5_RSS_HASH_IPV6_UDP:
10240                 hrxqs[5] = hrxq_idx;
10241                 return 0;
10242         case MLX5_RSS_HASH_NONE:
10243                 hrxqs[6] = hrxq_idx;
10244                 return 0;
10245         default:
10246                 return -1;
10247         }
10248 }
10249
10250 /**
10251  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10252  * and tunnel.
10253  *
10254  * @param[in] action
10255  *   Shred RSS action holding hash RX queue objects.
10256  * @param[in] hash_fields
10257  *   Defines combination of packet fields to participate in RX hash.
10258  * @param[in] tunnel
10259  *   Tunnel type
10260  *
10261  * @return
10262  *   Valid hash RX queue index, otherwise 0.
10263  */
10264 static uint32_t
10265 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10266                                  const uint64_t hash_fields,
10267                                  const int tunnel)
10268 {
10269         const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10270
10271         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10272         case MLX5_RSS_HASH_IPV4:
10273                 return hrxqs[0];
10274         case MLX5_RSS_HASH_IPV4_TCP:
10275                 return hrxqs[1];
10276         case MLX5_RSS_HASH_IPV4_UDP:
10277                 return hrxqs[2];
10278         case MLX5_RSS_HASH_IPV6:
10279                 return hrxqs[3];
10280         case MLX5_RSS_HASH_IPV6_TCP:
10281                 return hrxqs[4];
10282         case MLX5_RSS_HASH_IPV6_UDP:
10283                 return hrxqs[5];
10284         case MLX5_RSS_HASH_NONE:
10285                 return hrxqs[6];
10286         default:
10287                 return 0;
10288         }
10289 }
10290
10291 /**
10292  * Retrieves hash RX queue suitable for the *flow*.
10293  * If shared action configured for *flow* suitable hash RX queue will be
10294  * retrieved from attached shared action.
10295  *
10296  * @param[in] flow
10297  *   Shred RSS action holding hash RX queue objects.
10298  * @param[in] dev_flow
10299  *   Pointer to the sub flow.
10300  * @param[out] hrxq
10301  *   Pointer to retrieved hash RX queue object.
10302  *
10303  * @return
10304  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10305  */
10306 static uint32_t
10307 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10308                            struct mlx5_flow *dev_flow,
10309                            struct mlx5_hrxq **hrxq)
10310 {
10311         struct mlx5_priv *priv = dev->data->dev_private;
10312         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10313         uint32_t hrxq_idx;
10314
10315         if (flow->shared_rss) {
10316                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10317                                 (flow->shared_rss, dev_flow->hash_fields,
10318                                  !!(dev_flow->handle->layers &
10319                                     MLX5_FLOW_LAYER_TUNNEL));
10320                 if (hrxq_idx) {
10321                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10322                                                hrxq_idx);
10323                         __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10324                                            __ATOMIC_RELAXED);
10325                 }
10326         } else {
10327                 struct mlx5_flow_rss_desc *rss_desc =
10328                                 &wks->rss_desc[!!wks->flow_nested_idx];
10329
10330                 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10331                                              &hrxq_idx);
10332         }
10333         return hrxq_idx;
10334 }
10335
10336 /**
10337  * Apply the flow to the NIC, lock free,
10338  * (mutex should be acquired by caller).
10339  *
10340  * @param[in] dev
10341  *   Pointer to the Ethernet device structure.
10342  * @param[in, out] flow
10343  *   Pointer to flow structure.
10344  * @param[out] error
10345  *   Pointer to error structure.
10346  *
10347  * @return
10348  *   0 on success, a negative errno value otherwise and rte_errno is set.
10349  */
10350 static int
10351 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10352                 struct rte_flow_error *error)
10353 {
10354         struct mlx5_flow_dv_workspace *dv;
10355         struct mlx5_flow_handle *dh;
10356         struct mlx5_flow_handle_dv *dv_h;
10357         struct mlx5_flow *dev_flow;
10358         struct mlx5_priv *priv = dev->data->dev_private;
10359         uint32_t handle_idx;
10360         int n;
10361         int err;
10362         int idx;
10363         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10364
10365         MLX5_ASSERT(wks);
10366         for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10367                 dev_flow = &wks->flows[idx];
10368                 dv = &dev_flow->dv;
10369                 dh = dev_flow->handle;
10370                 dv_h = &dh->dvh;
10371                 n = dv->actions_n;
10372                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10373                         if (dv->transfer) {
10374                                 dv->actions[n++] = priv->sh->esw_drop_action;
10375                         } else {
10376                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10377                                 dv->actions[n++] =
10378                                                 priv->drop_queue.hrxq->action;
10379                         }
10380                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10381                            !dv_h->rix_sample && !dv_h->rix_dest_array) {
10382                         struct mlx5_hrxq *hrxq = NULL;
10383                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10384                                                 (dev, flow, dev_flow, &hrxq);
10385                         if (!hrxq) {
10386                                 rte_flow_error_set
10387                                         (error, rte_errno,
10388                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10389                                          "cannot get hash queue");
10390                                 goto error;
10391                         }
10392                         dh->rix_hrxq = hrxq_idx;
10393                         dv->actions[n++] = hrxq->action;
10394                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10395                         if (!priv->sh->default_miss_action) {
10396                                 rte_flow_error_set
10397                                         (error, rte_errno,
10398                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10399                                          "default miss action not be created.");
10400                                 goto error;
10401                         }
10402                         dv->actions[n++] = priv->sh->default_miss_action;
10403                 }
10404                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10405                                                (void *)&dv->value, n,
10406                                                dv->actions, &dh->drv_flow);
10407                 if (err) {
10408                         rte_flow_error_set(error, errno,
10409                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10410                                            NULL,
10411                                            "hardware refuses to create flow");
10412                         goto error;
10413                 }
10414                 if (priv->vmwa_context &&
10415                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10416                         /*
10417                          * The rule contains the VLAN pattern.
10418                          * For VF we are going to create VLAN
10419                          * interface to make hypervisor set correct
10420                          * e-Switch vport context.
10421                          */
10422                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10423                 }
10424         }
10425         return 0;
10426 error:
10427         err = rte_errno; /* Save rte_errno before cleanup. */
10428         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10429                        handle_idx, dh, next) {
10430                 /* hrxq is union, don't clear it if the flag is not set. */
10431                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10432                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10433                         dh->rix_hrxq = 0;
10434                 }
10435                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10436                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10437         }
10438         rte_errno = err; /* Restore rte_errno. */
10439         return -rte_errno;
10440 }
10441
10442 void
10443 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10444                           struct mlx5_cache_entry *entry)
10445 {
10446         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10447                                                           entry);
10448
10449         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10450         mlx5_free(cache);
10451 }
10452
10453 /**
10454  * Release the flow matcher.
10455  *
10456  * @param dev
10457  *   Pointer to Ethernet device.
10458  * @param handle
10459  *   Pointer to mlx5_flow_handle.
10460  *
10461  * @return
10462  *   1 while a reference on it exists, 0 when freed.
10463  */
10464 static int
10465 flow_dv_matcher_release(struct rte_eth_dev *dev,
10466                         struct mlx5_flow_handle *handle)
10467 {
10468         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10469         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10470                                                             typeof(*tbl), tbl);
10471         int ret;
10472
10473         MLX5_ASSERT(matcher->matcher_object);
10474         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10475         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10476         return ret;
10477 }
10478
10479 /**
10480  * Release encap_decap resource.
10481  *
10482  * @param list
10483  *   Pointer to the hash list.
10484  * @param entry
10485  *   Pointer to exist resource entry object.
10486  */
10487 void
10488 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10489                               struct mlx5_hlist_entry *entry)
10490 {
10491         struct mlx5_dev_ctx_shared *sh = list->ctx;
10492         struct mlx5_flow_dv_encap_decap_resource *res =
10493                 container_of(entry, typeof(*res), entry);
10494
10495         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10496         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10497 }
10498
10499 /**
10500  * Release an encap/decap resource.
10501  *
10502  * @param dev
10503  *   Pointer to Ethernet device.
10504  * @param encap_decap_idx
10505  *   Index of encap decap resource.
10506  *
10507  * @return
10508  *   1 while a reference on it exists, 0 when freed.
10509  */
10510 static int
10511 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10512                                      uint32_t encap_decap_idx)
10513 {
10514         struct mlx5_priv *priv = dev->data->dev_private;
10515         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10516
10517         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10518                                         encap_decap_idx);
10519         if (!cache_resource)
10520                 return 0;
10521         MLX5_ASSERT(cache_resource->action);
10522         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10523                                      &cache_resource->entry);
10524 }
10525
10526 /**
10527  * Release an jump to table action resource.
10528  *
10529  * @param dev
10530  *   Pointer to Ethernet device.
10531  * @param handle
10532  *   Pointer to mlx5_flow_handle.
10533  *
10534  * @return
10535  *   1 while a reference on it exists, 0 when freed.
10536  */
10537 static int
10538 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10539                                   struct mlx5_flow_handle *handle)
10540 {
10541         struct mlx5_priv *priv = dev->data->dev_private;
10542         struct mlx5_flow_tbl_data_entry *tbl_data;
10543
10544         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10545                              handle->rix_jump);
10546         if (!tbl_data)
10547                 return 0;
10548         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10549 }
10550
10551 void
10552 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10553                          struct mlx5_hlist_entry *entry)
10554 {
10555         struct mlx5_flow_dv_modify_hdr_resource *res =
10556                 container_of(entry, typeof(*res), entry);
10557
10558         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10559         mlx5_free(entry);
10560 }
10561
10562 /**
10563  * Release a modify-header resource.
10564  *
10565  * @param dev
10566  *   Pointer to Ethernet device.
10567  * @param handle
10568  *   Pointer to mlx5_flow_handle.
10569  *
10570  * @return
10571  *   1 while a reference on it exists, 0 when freed.
10572  */
10573 static int
10574 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10575                                     struct mlx5_flow_handle *handle)
10576 {
10577         struct mlx5_priv *priv = dev->data->dev_private;
10578         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10579
10580         MLX5_ASSERT(entry->action);
10581         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10582 }
10583
10584 void
10585 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10586                           struct mlx5_cache_entry *entry)
10587 {
10588         struct mlx5_dev_ctx_shared *sh = list->ctx;
10589         struct mlx5_flow_dv_port_id_action_resource *cache =
10590                         container_of(entry, typeof(*cache), entry);
10591
10592         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10593         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10594 }
10595
10596 /**
10597  * Release port ID action resource.
10598  *
10599  * @param dev
10600  *   Pointer to Ethernet device.
10601  * @param handle
10602  *   Pointer to mlx5_flow_handle.
10603  *
10604  * @return
10605  *   1 while a reference on it exists, 0 when freed.
10606  */
10607 static int
10608 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10609                                         uint32_t port_id)
10610 {
10611         struct mlx5_priv *priv = dev->data->dev_private;
10612         struct mlx5_flow_dv_port_id_action_resource *cache;
10613
10614         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10615         if (!cache)
10616                 return 0;
10617         MLX5_ASSERT(cache->action);
10618         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10619                                      &cache->entry);
10620 }
10621
10622 void
10623 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
10624                             struct mlx5_cache_entry *entry)
10625 {
10626         struct mlx5_dev_ctx_shared *sh = list->ctx;
10627         struct mlx5_flow_dv_push_vlan_action_resource *cache =
10628                         container_of(entry, typeof(*cache), entry);
10629
10630         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10631         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
10632 }
10633
10634 /**
10635  * Release push vlan action resource.
10636  *
10637  * @param dev
10638  *   Pointer to Ethernet device.
10639  * @param handle
10640  *   Pointer to mlx5_flow_handle.
10641  *
10642  * @return
10643  *   1 while a reference on it exists, 0 when freed.
10644  */
10645 static int
10646 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10647                                           struct mlx5_flow_handle *handle)
10648 {
10649         struct mlx5_priv *priv = dev->data->dev_private;
10650         struct mlx5_flow_dv_push_vlan_action_resource *cache;
10651         uint32_t idx = handle->dvh.rix_push_vlan;
10652
10653         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10654         if (!cache)
10655                 return 0;
10656         MLX5_ASSERT(cache->action);
10657         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
10658                                      &cache->entry);
10659 }
10660
10661 /**
10662  * Release the fate resource.
10663  *
10664  * @param dev
10665  *   Pointer to Ethernet device.
10666  * @param handle
10667  *   Pointer to mlx5_flow_handle.
10668  */
10669 static void
10670 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10671                                struct mlx5_flow_handle *handle)
10672 {
10673         if (!handle->rix_fate)
10674                 return;
10675         switch (handle->fate_action) {
10676         case MLX5_FLOW_FATE_QUEUE:
10677                 mlx5_hrxq_release(dev, handle->rix_hrxq);
10678                 break;
10679         case MLX5_FLOW_FATE_JUMP:
10680                 flow_dv_jump_tbl_resource_release(dev, handle);
10681                 break;
10682         case MLX5_FLOW_FATE_PORT_ID:
10683                 flow_dv_port_id_action_resource_release(dev,
10684                                 handle->rix_port_id_action);
10685                 break;
10686         default:
10687                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10688                 break;
10689         }
10690         handle->rix_fate = 0;
10691 }
10692
10693 /**
10694  * Release an sample resource.
10695  *
10696  * @param dev
10697  *   Pointer to Ethernet device.
10698  * @param handle
10699  *   Pointer to mlx5_flow_handle.
10700  *
10701  * @return
10702  *   1 while a reference on it exists, 0 when freed.
10703  */
10704 static int
10705 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10706                                      struct mlx5_flow_handle *handle)
10707 {
10708         struct mlx5_priv *priv = dev->data->dev_private;
10709         uint32_t idx = handle->dvh.rix_sample;
10710         struct mlx5_flow_dv_sample_resource *cache_resource;
10711
10712         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10713                          idx);
10714         if (!cache_resource)
10715                 return 0;
10716         MLX5_ASSERT(cache_resource->verbs_action);
10717         DRV_LOG(DEBUG, "sample resource %p: refcnt %d--",
10718                 (void *)cache_resource,
10719                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10720         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10721                                __ATOMIC_RELAXED) == 0) {
10722                 if (cache_resource->verbs_action)
10723                         claim_zero(mlx5_glue->destroy_flow_action
10724                                         (cache_resource->verbs_action));
10725                 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10726                         if (cache_resource->default_miss)
10727                                 claim_zero(mlx5_glue->destroy_flow_action
10728                                   (cache_resource->default_miss));
10729                 }
10730                 if (cache_resource->normal_path_tbl)
10731                         flow_dv_tbl_resource_release(MLX5_SH(dev),
10732                                 cache_resource->normal_path_tbl);
10733                 flow_dv_sample_sub_actions_release(dev,
10734                                         &cache_resource->sample_idx);
10735                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10736                              &priv->sh->sample_action_list, idx,
10737                              cache_resource, next);
10738                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10739                 DRV_LOG(DEBUG, "sample resource %p: removed",
10740                         (void *)cache_resource);
10741                 return 0;
10742         }
10743         return 1;
10744 }
10745
10746 /**
10747  * Release an destination array resource.
10748  *
10749  * @param dev
10750  *   Pointer to Ethernet device.
10751  * @param handle
10752  *   Pointer to mlx5_flow_handle.
10753  *
10754  * @return
10755  *   1 while a reference on it exists, 0 when freed.
10756  */
10757 static int
10758 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10759                                      struct mlx5_flow_handle *handle)
10760 {
10761         struct mlx5_priv *priv = dev->data->dev_private;
10762         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10763         uint32_t idx = handle->dvh.rix_dest_array;
10764         uint32_t i = 0;
10765
10766         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10767                          idx);
10768         if (!cache_resource)
10769                 return 0;
10770         MLX5_ASSERT(cache_resource->action);
10771         DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
10772                 (void *)cache_resource,
10773                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10774         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10775                                __ATOMIC_RELAXED) == 0) {
10776                 if (cache_resource->action)
10777                         claim_zero(mlx5_glue->destroy_flow_action
10778                                                 (cache_resource->action));
10779                 for (; i < cache_resource->num_of_dest; i++)
10780                         flow_dv_sample_sub_actions_release(dev,
10781                                         &cache_resource->sample_idx[i]);
10782                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10783                              &priv->sh->dest_array_list, idx,
10784                              cache_resource, next);
10785                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
10786                 DRV_LOG(DEBUG, "destination array resource %p: removed",
10787                         (void *)cache_resource);
10788                 return 0;
10789         }
10790         return 1;
10791 }
10792
10793 /**
10794  * Remove the flow from the NIC but keeps it in memory.
10795  * Lock free, (mutex should be acquired by caller).
10796  *
10797  * @param[in] dev
10798  *   Pointer to Ethernet device.
10799  * @param[in, out] flow
10800  *   Pointer to flow structure.
10801  */
10802 static void
10803 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10804 {
10805         struct mlx5_flow_handle *dh;
10806         uint32_t handle_idx;
10807         struct mlx5_priv *priv = dev->data->dev_private;
10808
10809         if (!flow)
10810                 return;
10811         handle_idx = flow->dev_handles;
10812         while (handle_idx) {
10813                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10814                                     handle_idx);
10815                 if (!dh)
10816                         return;
10817                 if (dh->drv_flow) {
10818                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10819                         dh->drv_flow = NULL;
10820                 }
10821                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10822                         flow_dv_fate_resource_release(dev, dh);
10823                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10824                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10825                 handle_idx = dh->next.next;
10826         }
10827 }
10828
10829 /**
10830  * Remove the flow from the NIC and the memory.
10831  * Lock free, (mutex should be acquired by caller).
10832  *
10833  * @param[in] dev
10834  *   Pointer to the Ethernet device structure.
10835  * @param[in, out] flow
10836  *   Pointer to flow structure.
10837  */
10838 static void
10839 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10840 {
10841         struct rte_flow_shared_action *shared;
10842         struct mlx5_flow_handle *dev_handle;
10843         struct mlx5_priv *priv = dev->data->dev_private;
10844
10845         if (!flow)
10846                 return;
10847         __flow_dv_remove(dev, flow);
10848         shared = mlx5_flow_get_shared_rss(flow);
10849         if (shared)
10850                 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10851         if (flow->counter) {
10852                 flow_dv_counter_release(dev, flow->counter);
10853                 flow->counter = 0;
10854         }
10855         if (flow->meter) {
10856                 struct mlx5_flow_meter *fm;
10857
10858                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10859                                     flow->meter);
10860                 if (fm)
10861                         mlx5_flow_meter_detach(fm);
10862                 flow->meter = 0;
10863         }
10864         while (flow->dev_handles) {
10865                 uint32_t tmp_idx = flow->dev_handles;
10866
10867                 dev_handle = mlx5_ipool_get(priv->sh->ipool
10868                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10869                 if (!dev_handle)
10870                         return;
10871                 flow->dev_handles = dev_handle->next.next;
10872                 if (dev_handle->dvh.matcher)
10873                         flow_dv_matcher_release(dev, dev_handle);
10874                 if (dev_handle->dvh.rix_sample)
10875                         flow_dv_sample_resource_release(dev, dev_handle);
10876                 if (dev_handle->dvh.rix_dest_array)
10877                         flow_dv_dest_array_resource_release(dev, dev_handle);
10878                 if (dev_handle->dvh.rix_encap_decap)
10879                         flow_dv_encap_decap_resource_release(dev,
10880                                 dev_handle->dvh.rix_encap_decap);
10881                 if (dev_handle->dvh.modify_hdr)
10882                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
10883                 if (dev_handle->dvh.rix_push_vlan)
10884                         flow_dv_push_vlan_action_resource_release(dev,
10885                                                                   dev_handle);
10886                 if (dev_handle->dvh.rix_tag)
10887                         flow_dv_tag_release(dev,
10888                                             dev_handle->dvh.rix_tag);
10889                 flow_dv_fate_resource_release(dev, dev_handle);
10890                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10891                            tmp_idx);
10892         }
10893 }
10894
10895 /**
10896  * Release array of hash RX queue objects.
10897  * Helper function.
10898  *
10899  * @param[in] dev
10900  *   Pointer to the Ethernet device structure.
10901  * @param[in, out] hrxqs
10902  *   Array of hash RX queue objects.
10903  *
10904  * @return
10905  *   Total number of references to hash RX queue objects in *hrxqs* array
10906  *   after this operation.
10907  */
10908 static int
10909 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
10910                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
10911 {
10912         size_t i;
10913         int remaining = 0;
10914
10915         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
10916                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
10917
10918                 if (!ret)
10919                         (*hrxqs)[i] = 0;
10920                 remaining += ret;
10921         }
10922         return remaining;
10923 }
10924
10925 /**
10926  * Release all hash RX queue objects representing shared RSS action.
10927  *
10928  * @param[in] dev
10929  *   Pointer to the Ethernet device structure.
10930  * @param[in, out] action
10931  *   Shared RSS action to remove hash RX queue objects from.
10932  *
10933  * @return
10934  *   Total number of references to hash RX queue objects stored in *action*
10935  *   after this operation.
10936  *   Expected to be 0 if no external references held.
10937  */
10938 static int
10939 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
10940                                  struct mlx5_shared_action_rss *action)
10941 {
10942         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
10943                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
10944 }
10945
10946 /**
10947  * Setup shared RSS action.
10948  * Prepare set of hash RX queue objects sufficient to handle all valid
10949  * hash_fields combinations (see enum ibv_rx_hash_fields).
10950  *
10951  * @param[in] dev
10952  *   Pointer to the Ethernet device structure.
10953  * @param[in, out] action
10954  *   Partially initialized shared RSS action.
10955  * @param[out] error
10956  *   Perform verbose error reporting if not NULL. Initialized in case of
10957  *   error only.
10958  *
10959  * @return
10960  *   0 on success, otherwise negative errno value.
10961  */
10962 static int
10963 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
10964                         struct mlx5_shared_action_rss *action,
10965                         struct rte_flow_error *error)
10966 {
10967         struct mlx5_flow_rss_desc rss_desc = { 0 };
10968         size_t i;
10969         int err;
10970
10971         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
10972         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
10973         rss_desc.const_q = action->origin.queue;
10974         rss_desc.queue_num = action->origin.queue_num;
10975         rss_desc.standalone = true;
10976         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
10977                 uint32_t hrxq_idx;
10978                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
10979                 int tunnel;
10980
10981                 for (tunnel = 0; tunnel < 2; tunnel++) {
10982                         rss_desc.tunnel = tunnel;
10983                         rss_desc.hash_fields = hash_fields;
10984                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
10985                         if (!hrxq_idx) {
10986                                 rte_flow_error_set
10987                                         (error, rte_errno,
10988                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10989                                          "cannot get hash queue");
10990                                 goto error_hrxq_new;
10991                         }
10992                         err = __flow_dv_action_rss_hrxq_set
10993                                 (action, hash_fields, tunnel, hrxq_idx);
10994                         MLX5_ASSERT(!err);
10995                 }
10996         }
10997         return 0;
10998 error_hrxq_new:
10999         err = rte_errno;
11000         __flow_dv_action_rss_hrxqs_release(dev, action);
11001         rte_errno = err;
11002         return -rte_errno;
11003 }
11004
11005 /**
11006  * Create shared RSS action.
11007  *
11008  * @param[in] dev
11009  *   Pointer to the Ethernet device structure.
11010  * @param[in] conf
11011  *   Shared action configuration.
11012  * @param[in] rss
11013  *   RSS action specification used to create shared action.
11014  * @param[out] error
11015  *   Perform verbose error reporting if not NULL. Initialized in case of
11016  *   error only.
11017  *
11018  * @return
11019  *   A valid shared action handle in case of success, NULL otherwise and
11020  *   rte_errno is set.
11021  */
11022 static struct rte_flow_shared_action *
11023 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11024                             const struct rte_flow_shared_action_conf *conf,
11025                             const struct rte_flow_action_rss *rss,
11026                             struct rte_flow_error *error)
11027 {
11028         struct rte_flow_shared_action *shared_action = NULL;
11029         void *queue = NULL;
11030         struct mlx5_shared_action_rss *shared_rss;
11031         struct rte_flow_action_rss *origin;
11032         const uint8_t *rss_key;
11033         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11034
11035         RTE_SET_USED(conf);
11036         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11037                             0, SOCKET_ID_ANY);
11038         shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11039                                     SOCKET_ID_ANY);
11040         if (!shared_action || !queue) {
11041                 rte_flow_error_set(error, ENOMEM,
11042                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11043                                    "cannot allocate resource memory");
11044                 goto error_rss_init;
11045         }
11046         shared_rss = &shared_action->rss;
11047         shared_rss->queue = queue;
11048         origin = &shared_rss->origin;
11049         origin->func = rss->func;
11050         origin->level = rss->level;
11051         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11052         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11053         /* NULL RSS key indicates default RSS key. */
11054         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11055         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11056         origin->key = &shared_rss->key[0];
11057         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11058         memcpy(shared_rss->queue, rss->queue, queue_size);
11059         origin->queue = shared_rss->queue;
11060         origin->queue_num = rss->queue_num;
11061         if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11062                 goto error_rss_init;
11063         shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11064         return shared_action;
11065 error_rss_init:
11066         mlx5_free(shared_action);
11067         mlx5_free(queue);
11068         return NULL;
11069 }
11070
11071 /**
11072  * Destroy the shared RSS action.
11073  * Release related hash RX queue objects.
11074  *
11075  * @param[in] dev
11076  *   Pointer to the Ethernet device structure.
11077  * @param[in] shared_rss
11078  *   The shared RSS action object to be removed.
11079  * @param[out] error
11080  *   Perform verbose error reporting if not NULL. Initialized in case of
11081  *   error only.
11082  *
11083  * @return
11084  *   0 on success, otherwise negative errno value.
11085  */
11086 static int
11087 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11088                          struct mlx5_shared_action_rss *shared_rss,
11089                          struct rte_flow_error *error)
11090 {
11091         struct rte_flow_shared_action *shared_action = NULL;
11092         uint32_t old_refcnt = 1;
11093         int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11094
11095         if (remaining) {
11096                 return rte_flow_error_set(error, ETOOMANYREFS,
11097                                           RTE_FLOW_ERROR_TYPE_ACTION,
11098                                           NULL,
11099                                           "shared rss hrxq has references");
11100         }
11101         shared_action = container_of(shared_rss,
11102                                      struct rte_flow_shared_action, rss);
11103         if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11104                                          0, 0,
11105                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11106                 return rte_flow_error_set(error, ETOOMANYREFS,
11107                                           RTE_FLOW_ERROR_TYPE_ACTION,
11108                                           NULL,
11109                                           "shared rss has references");
11110         }
11111         rte_free(shared_rss->queue);
11112         return 0;
11113 }
11114
11115 /**
11116  * Create shared action, lock free,
11117  * (mutex should be acquired by caller).
11118  * Dispatcher for action type specific call.
11119  *
11120  * @param[in] dev
11121  *   Pointer to the Ethernet device structure.
11122  * @param[in] conf
11123  *   Shared action configuration.
11124  * @param[in] action
11125  *   Action specification used to create shared action.
11126  * @param[out] error
11127  *   Perform verbose error reporting if not NULL. Initialized in case of
11128  *   error only.
11129  *
11130  * @return
11131  *   A valid shared action handle in case of success, NULL otherwise and
11132  *   rte_errno is set.
11133  */
11134 static struct rte_flow_shared_action *
11135 __flow_dv_action_create(struct rte_eth_dev *dev,
11136                         const struct rte_flow_shared_action_conf *conf,
11137                         const struct rte_flow_action *action,
11138                         struct rte_flow_error *error)
11139 {
11140         struct rte_flow_shared_action *shared_action = NULL;
11141         struct mlx5_priv *priv = dev->data->dev_private;
11142
11143         switch (action->type) {
11144         case RTE_FLOW_ACTION_TYPE_RSS:
11145                 shared_action = __flow_dv_action_rss_create(dev, conf,
11146                                                             action->conf,
11147                                                             error);
11148                 break;
11149         default:
11150                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11151                                    NULL, "action type not supported");
11152                 break;
11153         }
11154         if (shared_action) {
11155                 __atomic_add_fetch(&shared_action->refcnt, 1,
11156                                    __ATOMIC_RELAXED);
11157                 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11158         }
11159         return shared_action;
11160 }
11161
11162 /**
11163  * Destroy the shared action.
11164  * Release action related resources on the NIC and the memory.
11165  * Lock free, (mutex should be acquired by caller).
11166  * Dispatcher for action type specific call.
11167  *
11168  * @param[in] dev
11169  *   Pointer to the Ethernet device structure.
11170  * @param[in] action
11171  *   The shared action object to be removed.
11172  * @param[out] error
11173  *   Perform verbose error reporting if not NULL. Initialized in case of
11174  *   error only.
11175  *
11176  * @return
11177  *   0 on success, otherwise negative errno value.
11178  */
11179 static int
11180 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11181                          struct rte_flow_shared_action *action,
11182                          struct rte_flow_error *error)
11183 {
11184         int ret;
11185
11186         switch (action->type) {
11187         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11188                 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11189                 break;
11190         default:
11191                 return rte_flow_error_set(error, ENOTSUP,
11192                                           RTE_FLOW_ERROR_TYPE_ACTION,
11193                                           NULL,
11194                                           "action type not supported");
11195         }
11196         if (ret)
11197                 return ret;
11198         LIST_REMOVE(action, next);
11199         rte_free(action);
11200         return 0;
11201 }
11202
11203 /**
11204  * Updates in place shared RSS action configuration.
11205  *
11206  * @param[in] dev
11207  *   Pointer to the Ethernet device structure.
11208  * @param[in] shared_rss
11209  *   The shared RSS action object to be updated.
11210  * @param[in] action_conf
11211  *   RSS action specification used to modify *shared_rss*.
11212  * @param[out] error
11213  *   Perform verbose error reporting if not NULL. Initialized in case of
11214  *   error only.
11215  *
11216  * @return
11217  *   0 on success, otherwise negative errno value.
11218  * @note: currently only support update of RSS queues.
11219  */
11220 static int
11221 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11222                             struct mlx5_shared_action_rss *shared_rss,
11223                             const struct rte_flow_action_rss *action_conf,
11224                             struct rte_flow_error *error)
11225 {
11226         size_t i;
11227         int ret;
11228         void *queue = NULL;
11229         const uint8_t *rss_key;
11230         uint32_t rss_key_len;
11231         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11232
11233         queue = mlx5_malloc(MLX5_MEM_ZERO,
11234                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11235                             0, SOCKET_ID_ANY);
11236         if (!queue)
11237                 return rte_flow_error_set(error, ENOMEM,
11238                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11239                                           NULL,
11240                                           "cannot allocate resource memory");
11241         if (action_conf->key) {
11242                 rss_key = action_conf->key;
11243                 rss_key_len = action_conf->key_len;
11244         } else {
11245                 rss_key = rss_hash_default_key;
11246                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11247         }
11248         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11249                 uint32_t hrxq_idx;
11250                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11251                 int tunnel;
11252
11253                 for (tunnel = 0; tunnel < 2; tunnel++) {
11254                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11255                                         (shared_rss, hash_fields, tunnel);
11256                         MLX5_ASSERT(hrxq_idx);
11257                         ret = mlx5_hrxq_modify
11258                                 (dev, hrxq_idx,
11259                                  rss_key, rss_key_len,
11260                                  hash_fields,
11261                                  action_conf->queue, action_conf->queue_num);
11262                         if (ret) {
11263                                 mlx5_free(queue);
11264                                 return rte_flow_error_set
11265                                         (error, rte_errno,
11266                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11267                                          "cannot update hash queue");
11268                         }
11269                 }
11270         }
11271         mlx5_free(shared_rss->queue);
11272         shared_rss->queue = queue;
11273         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11274         shared_rss->origin.queue = shared_rss->queue;
11275         shared_rss->origin.queue_num = action_conf->queue_num;
11276         return 0;
11277 }
11278
11279 /**
11280  * Updates in place shared action configuration, lock free,
11281  * (mutex should be acquired by caller).
11282  *
11283  * @param[in] dev
11284  *   Pointer to the Ethernet device structure.
11285  * @param[in] action
11286  *   The shared action object to be updated.
11287  * @param[in] action_conf
11288  *   Action specification used to modify *action*.
11289  *   *action_conf* should be of type correlating with type of the *action*,
11290  *   otherwise considered as invalid.
11291  * @param[out] error
11292  *   Perform verbose error reporting if not NULL. Initialized in case of
11293  *   error only.
11294  *
11295  * @return
11296  *   0 on success, otherwise negative errno value.
11297  */
11298 static int
11299 __flow_dv_action_update(struct rte_eth_dev *dev,
11300                         struct rte_flow_shared_action *action,
11301                         const void *action_conf,
11302                         struct rte_flow_error *error)
11303 {
11304         switch (action->type) {
11305         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11306                 return __flow_dv_action_rss_update(dev, &action->rss,
11307                                                    action_conf, error);
11308         default:
11309                 return rte_flow_error_set(error, ENOTSUP,
11310                                           RTE_FLOW_ERROR_TYPE_ACTION,
11311                                           NULL,
11312                                           "action type not supported");
11313         }
11314 }
11315 /**
11316  * Query a dv flow  rule for its statistics via devx.
11317  *
11318  * @param[in] dev
11319  *   Pointer to Ethernet device.
11320  * @param[in] flow
11321  *   Pointer to the sub flow.
11322  * @param[out] data
11323  *   data retrieved by the query.
11324  * @param[out] error
11325  *   Perform verbose error reporting if not NULL.
11326  *
11327  * @return
11328  *   0 on success, a negative errno value otherwise and rte_errno is set.
11329  */
11330 static int
11331 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11332                     void *data, struct rte_flow_error *error)
11333 {
11334         struct mlx5_priv *priv = dev->data->dev_private;
11335         struct rte_flow_query_count *qc = data;
11336
11337         if (!priv->config.devx)
11338                 return rte_flow_error_set(error, ENOTSUP,
11339                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11340                                           NULL,
11341                                           "counters are not supported");
11342         if (flow->counter) {
11343                 uint64_t pkts, bytes;
11344                 struct mlx5_flow_counter *cnt;
11345
11346                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11347                                                  NULL);
11348                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11349                                                &bytes);
11350
11351                 if (err)
11352                         return rte_flow_error_set(error, -err,
11353                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11354                                         NULL, "cannot read counters");
11355                 qc->hits_set = 1;
11356                 qc->bytes_set = 1;
11357                 qc->hits = pkts - cnt->hits;
11358                 qc->bytes = bytes - cnt->bytes;
11359                 if (qc->reset) {
11360                         cnt->hits = pkts;
11361                         cnt->bytes = bytes;
11362                 }
11363                 return 0;
11364         }
11365         return rte_flow_error_set(error, EINVAL,
11366                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11367                                   NULL,
11368                                   "counters are not available");
11369 }
11370
11371 /**
11372  * Query a flow rule AGE action for aging information.
11373  *
11374  * @param[in] dev
11375  *   Pointer to Ethernet device.
11376  * @param[in] flow
11377  *   Pointer to the sub flow.
11378  * @param[out] data
11379  *   data retrieved by the query.
11380  * @param[out] error
11381  *   Perform verbose error reporting if not NULL.
11382  *
11383  * @return
11384  *   0 on success, a negative errno value otherwise and rte_errno is set.
11385  */
11386 static int
11387 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11388                   void *data, struct rte_flow_error *error)
11389 {
11390         struct rte_flow_query_age *resp = data;
11391
11392         if (flow->counter) {
11393                 struct mlx5_age_param *age_param =
11394                                 flow_dv_counter_idx_get_age(dev, flow->counter);
11395
11396                 if (!age_param || !age_param->timeout)
11397                         return rte_flow_error_set
11398                                         (error, EINVAL,
11399                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11400                                          NULL, "cannot read age data");
11401                 resp->aged = __atomic_load_n(&age_param->state,
11402                                              __ATOMIC_RELAXED) ==
11403                                                         AGE_TMOUT ? 1 : 0;
11404                 resp->sec_since_last_hit_valid = !resp->aged;
11405                 if (resp->sec_since_last_hit_valid)
11406                         resp->sec_since_last_hit =
11407                                 __atomic_load_n(&age_param->sec_since_last_hit,
11408                                                 __ATOMIC_RELAXED);
11409                 return 0;
11410         }
11411         return rte_flow_error_set(error, EINVAL,
11412                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11413                                   NULL,
11414                                   "age data not available");
11415 }
11416
11417 /**
11418  * Query a flow.
11419  *
11420  * @see rte_flow_query()
11421  * @see rte_flow_ops
11422  */
11423 static int
11424 flow_dv_query(struct rte_eth_dev *dev,
11425               struct rte_flow *flow __rte_unused,
11426               const struct rte_flow_action *actions __rte_unused,
11427               void *data __rte_unused,
11428               struct rte_flow_error *error __rte_unused)
11429 {
11430         int ret = -EINVAL;
11431
11432         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11433                 switch (actions->type) {
11434                 case RTE_FLOW_ACTION_TYPE_VOID:
11435                         break;
11436                 case RTE_FLOW_ACTION_TYPE_COUNT:
11437                         ret = flow_dv_query_count(dev, flow, data, error);
11438                         break;
11439                 case RTE_FLOW_ACTION_TYPE_AGE:
11440                         ret = flow_dv_query_age(dev, flow, data, error);
11441                         break;
11442                 default:
11443                         return rte_flow_error_set(error, ENOTSUP,
11444                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11445                                                   actions,
11446                                                   "action not supported");
11447                 }
11448         }
11449         return ret;
11450 }
11451
11452 /**
11453  * Destroy the meter table set.
11454  * Lock free, (mutex should be acquired by caller).
11455  *
11456  * @param[in] dev
11457  *   Pointer to Ethernet device.
11458  * @param[in] tbl
11459  *   Pointer to the meter table set.
11460  *
11461  * @return
11462  *   Always 0.
11463  */
11464 static int
11465 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11466                         struct mlx5_meter_domains_infos *tbl)
11467 {
11468         struct mlx5_priv *priv = dev->data->dev_private;
11469         struct mlx5_meter_domains_infos *mtd =
11470                                 (struct mlx5_meter_domains_infos *)tbl;
11471
11472         if (!mtd || !priv->config.dv_flow_en)
11473                 return 0;
11474         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11475                 claim_zero(mlx5_flow_os_destroy_flow
11476                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11477         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11478                 claim_zero(mlx5_flow_os_destroy_flow
11479                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11480         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11481                 claim_zero(mlx5_flow_os_destroy_flow
11482                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11483         if (mtd->egress.color_matcher)
11484                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11485                            (mtd->egress.color_matcher));
11486         if (mtd->egress.any_matcher)
11487                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11488                            (mtd->egress.any_matcher));
11489         if (mtd->egress.tbl)
11490                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11491         if (mtd->egress.sfx_tbl)
11492                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11493         if (mtd->ingress.color_matcher)
11494                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11495                            (mtd->ingress.color_matcher));
11496         if (mtd->ingress.any_matcher)
11497                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11498                            (mtd->ingress.any_matcher));
11499         if (mtd->ingress.tbl)
11500                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11501         if (mtd->ingress.sfx_tbl)
11502                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11503                                              mtd->ingress.sfx_tbl);
11504         if (mtd->transfer.color_matcher)
11505                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11506                            (mtd->transfer.color_matcher));
11507         if (mtd->transfer.any_matcher)
11508                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11509                            (mtd->transfer.any_matcher));
11510         if (mtd->transfer.tbl)
11511                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11512         if (mtd->transfer.sfx_tbl)
11513                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11514                                              mtd->transfer.sfx_tbl);
11515         if (mtd->drop_actn)
11516                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11517         mlx5_free(mtd);
11518         return 0;
11519 }
11520
11521 /* Number of meter flow actions, count and jump or count and drop. */
11522 #define METER_ACTIONS 2
11523
11524 /**
11525  * Create specify domain meter table and suffix table.
11526  *
11527  * @param[in] dev
11528  *   Pointer to Ethernet device.
11529  * @param[in,out] mtb
11530  *   Pointer to DV meter table set.
11531  * @param[in] egress
11532  *   Table attribute.
11533  * @param[in] transfer
11534  *   Table attribute.
11535  * @param[in] color_reg_c_idx
11536  *   Reg C index for color match.
11537  *
11538  * @return
11539  *   0 on success, -1 otherwise and rte_errno is set.
11540  */
11541 static int
11542 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11543                            struct mlx5_meter_domains_infos *mtb,
11544                            uint8_t egress, uint8_t transfer,
11545                            uint32_t color_reg_c_idx)
11546 {
11547         struct mlx5_priv *priv = dev->data->dev_private;
11548         struct mlx5_dev_ctx_shared *sh = priv->sh;
11549         struct mlx5_flow_dv_match_params mask = {
11550                 .size = sizeof(mask.buf),
11551         };
11552         struct mlx5_flow_dv_match_params value = {
11553                 .size = sizeof(value.buf),
11554         };
11555         struct mlx5dv_flow_matcher_attr dv_attr = {
11556                 .type = IBV_FLOW_ATTR_NORMAL,
11557                 .priority = 0,
11558                 .match_criteria_enable = 0,
11559                 .match_mask = (void *)&mask,
11560         };
11561         void *actions[METER_ACTIONS];
11562         struct mlx5_meter_domain_info *dtb;
11563         struct rte_flow_error error;
11564         int i = 0;
11565         int ret;
11566
11567         if (transfer)
11568                 dtb = &mtb->transfer;
11569         else if (egress)
11570                 dtb = &mtb->egress;
11571         else
11572                 dtb = &mtb->ingress;
11573         /* Create the meter table with METER level. */
11574         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11575                                             egress, transfer, false, NULL, 0,
11576                                             0, &error);
11577         if (!dtb->tbl) {
11578                 DRV_LOG(ERR, "Failed to create meter policer table.");
11579                 return -1;
11580         }
11581         /* Create the meter suffix table with SUFFIX level. */
11582         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11583                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11584                                             egress, transfer, false, NULL, 0,
11585                                             0, &error);
11586         if (!dtb->sfx_tbl) {
11587                 DRV_LOG(ERR, "Failed to create meter suffix table.");
11588                 return -1;
11589         }
11590         /* Create matchers, Any and Color. */
11591         dv_attr.priority = 3;
11592         dv_attr.match_criteria_enable = 0;
11593         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11594                                                &dtb->any_matcher);
11595         if (ret) {
11596                 DRV_LOG(ERR, "Failed to create meter"
11597                              " policer default matcher.");
11598                 goto error_exit;
11599         }
11600         dv_attr.priority = 0;
11601         dv_attr.match_criteria_enable =
11602                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11603         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11604                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11605         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11606                                                &dtb->color_matcher);
11607         if (ret) {
11608                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11609                 goto error_exit;
11610         }
11611         if (mtb->count_actns[RTE_MTR_DROPPED])
11612                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11613         actions[i++] = mtb->drop_actn;
11614         /* Default rule: lowest priority, match any, actions: drop. */
11615         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11616                                        actions,
11617                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
11618         if (ret) {
11619                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11620                 goto error_exit;
11621         }
11622         return 0;
11623 error_exit:
11624         return -1;
11625 }
11626
11627 /**
11628  * Create the needed meter and suffix tables.
11629  * Lock free, (mutex should be acquired by caller).
11630  *
11631  * @param[in] dev
11632  *   Pointer to Ethernet device.
11633  * @param[in] fm
11634  *   Pointer to the flow meter.
11635  *
11636  * @return
11637  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
11638  */
11639 static struct mlx5_meter_domains_infos *
11640 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11641                        const struct mlx5_flow_meter *fm)
11642 {
11643         struct mlx5_priv *priv = dev->data->dev_private;
11644         struct mlx5_meter_domains_infos *mtb;
11645         int ret;
11646         int i;
11647
11648         if (!priv->mtr_en) {
11649                 rte_errno = ENOTSUP;
11650                 return NULL;
11651         }
11652         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11653         if (!mtb) {
11654                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11655                 return NULL;
11656         }
11657         /* Create meter count actions */
11658         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11659                 struct mlx5_flow_counter *cnt;
11660                 if (!fm->policer_stats.cnt[i])
11661                         continue;
11662                 cnt = flow_dv_counter_get_by_idx(dev,
11663                       fm->policer_stats.cnt[i], NULL);
11664                 mtb->count_actns[i] = cnt->action;
11665         }
11666         /* Create drop action. */
11667         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11668         if (ret) {
11669                 DRV_LOG(ERR, "Failed to create drop action.");
11670                 goto error_exit;
11671         }
11672         /* Egress meter table. */
11673         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11674         if (ret) {
11675                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11676                 goto error_exit;
11677         }
11678         /* Ingress meter table. */
11679         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11680         if (ret) {
11681                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11682                 goto error_exit;
11683         }
11684         /* FDB meter table. */
11685         if (priv->config.dv_esw_en) {
11686                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11687                                                  priv->mtr_color_reg);
11688                 if (ret) {
11689                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11690                         goto error_exit;
11691                 }
11692         }
11693         return mtb;
11694 error_exit:
11695         flow_dv_destroy_mtr_tbl(dev, mtb);
11696         return NULL;
11697 }
11698
11699 /**
11700  * Destroy domain policer rule.
11701  *
11702  * @param[in] dt
11703  *   Pointer to domain table.
11704  */
11705 static void
11706 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11707 {
11708         int i;
11709
11710         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11711                 if (dt->policer_rules[i]) {
11712                         claim_zero(mlx5_flow_os_destroy_flow
11713                                    (dt->policer_rules[i]));
11714                         dt->policer_rules[i] = NULL;
11715                 }
11716         }
11717         if (dt->jump_actn) {
11718                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11719                 dt->jump_actn = NULL;
11720         }
11721 }
11722
11723 /**
11724  * Destroy policer rules.
11725  *
11726  * @param[in] dev
11727  *   Pointer to Ethernet device.
11728  * @param[in] fm
11729  *   Pointer to flow meter structure.
11730  * @param[in] attr
11731  *   Pointer to flow attributes.
11732  *
11733  * @return
11734  *   Always 0.
11735  */
11736 static int
11737 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11738                               const struct mlx5_flow_meter *fm,
11739                               const struct rte_flow_attr *attr)
11740 {
11741         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11742
11743         if (!mtb)
11744                 return 0;
11745         if (attr->egress)
11746                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11747         if (attr->ingress)
11748                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11749         if (attr->transfer)
11750                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11751         return 0;
11752 }
11753
11754 /**
11755  * Create specify domain meter policer rule.
11756  *
11757  * @param[in] fm
11758  *   Pointer to flow meter structure.
11759  * @param[in] mtb
11760  *   Pointer to DV meter table set.
11761  * @param[in] mtr_reg_c
11762  *   Color match REG_C.
11763  *
11764  * @return
11765  *   0 on success, -1 otherwise.
11766  */
11767 static int
11768 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11769                                     struct mlx5_meter_domain_info *dtb,
11770                                     uint8_t mtr_reg_c)
11771 {
11772         struct mlx5_flow_dv_match_params matcher = {
11773                 .size = sizeof(matcher.buf),
11774         };
11775         struct mlx5_flow_dv_match_params value = {
11776                 .size = sizeof(value.buf),
11777         };
11778         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11779         void *actions[METER_ACTIONS];
11780         int i;
11781         int ret = 0;
11782
11783         /* Create jump action. */
11784         if (!dtb->jump_actn)
11785                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11786                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11787         if (ret) {
11788                 DRV_LOG(ERR, "Failed to create policer jump action.");
11789                 goto error;
11790         }
11791         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11792                 int j = 0;
11793
11794                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11795                                        rte_col_2_mlx5_col(i), UINT8_MAX);
11796                 if (mtb->count_actns[i])
11797                         actions[j++] = mtb->count_actns[i];
11798                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11799                         actions[j++] = mtb->drop_actn;
11800                 else
11801                         actions[j++] = dtb->jump_actn;
11802                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11803                                                (void *)&value, j, actions,
11804                                                &dtb->policer_rules[i]);
11805                 if (ret) {
11806                         DRV_LOG(ERR, "Failed to create policer rule.");
11807                         goto error;
11808                 }
11809         }
11810         return 0;
11811 error:
11812         rte_errno = errno;
11813         return -1;
11814 }
11815
11816 /**
11817  * Create policer rules.
11818  *
11819  * @param[in] dev
11820  *   Pointer to Ethernet device.
11821  * @param[in] fm
11822  *   Pointer to flow meter structure.
11823  * @param[in] attr
11824  *   Pointer to flow attributes.
11825  *
11826  * @return
11827  *   0 on success, -1 otherwise.
11828  */
11829 static int
11830 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11831                              struct mlx5_flow_meter *fm,
11832                              const struct rte_flow_attr *attr)
11833 {
11834         struct mlx5_priv *priv = dev->data->dev_private;
11835         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11836         int ret;
11837
11838         if (attr->egress) {
11839                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11840                                                 priv->mtr_color_reg);
11841                 if (ret) {
11842                         DRV_LOG(ERR, "Failed to create egress policer.");
11843                         goto error;
11844                 }
11845         }
11846         if (attr->ingress) {
11847                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11848                                                 priv->mtr_color_reg);
11849                 if (ret) {
11850                         DRV_LOG(ERR, "Failed to create ingress policer.");
11851                         goto error;
11852                 }
11853         }
11854         if (attr->transfer) {
11855                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11856                                                 priv->mtr_color_reg);
11857                 if (ret) {
11858                         DRV_LOG(ERR, "Failed to create transfer policer.");
11859                         goto error;
11860                 }
11861         }
11862         return 0;
11863 error:
11864         flow_dv_destroy_policer_rules(dev, fm, attr);
11865         return -1;
11866 }
11867
11868 /**
11869  * Validate the batch counter support in root table.
11870  *
11871  * Create a simple flow with invalid counter and drop action on root table to
11872  * validate if batch counter with offset on root table is supported or not.
11873  *
11874  * @param[in] dev
11875  *   Pointer to rte_eth_dev structure.
11876  *
11877  * @return
11878  *   0 on success, a negative errno value otherwise and rte_errno is set.
11879  */
11880 int
11881 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11882 {
11883         struct mlx5_priv *priv = dev->data->dev_private;
11884         struct mlx5_dev_ctx_shared *sh = priv->sh;
11885         struct mlx5_flow_dv_match_params mask = {
11886                 .size = sizeof(mask.buf),
11887         };
11888         struct mlx5_flow_dv_match_params value = {
11889                 .size = sizeof(value.buf),
11890         };
11891         struct mlx5dv_flow_matcher_attr dv_attr = {
11892                 .type = IBV_FLOW_ATTR_NORMAL,
11893                 .priority = 0,
11894                 .match_criteria_enable = 0,
11895                 .match_mask = (void *)&mask,
11896         };
11897         void *actions[2] = { 0 };
11898         struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
11899         struct mlx5_devx_obj *dcs = NULL;
11900         void *matcher = NULL;
11901         void *flow = NULL;
11902         int i, ret = -1;
11903
11904         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
11905         if (!tbl)
11906                 goto err;
11907         dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
11908                                             NULL, 0, 0, NULL);
11909         if (!dest_tbl)
11910                 goto err;
11911         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
11912         if (!dcs)
11913                 goto err;
11914         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
11915                                                     &actions[0]);
11916         if (ret)
11917                 goto err;
11918         ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11919                                 (dest_tbl->obj, &actions[1]);
11920         if (ret)
11921                 goto err;
11922         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
11923         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
11924                                                &matcher);
11925         if (ret)
11926                 goto err;
11927         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
11928                                        actions, &flow);
11929 err:
11930         /*
11931          * If batch counter with offset is not supported, the driver will not
11932          * validate the invalid offset value, flow create should success.
11933          * In this case, it means batch counter is not supported in root table.
11934          *
11935          * Otherwise, if flow create is failed, counter offset is supported.
11936          */
11937         if (flow) {
11938                 DRV_LOG(INFO, "Batch counter is not supported in root "
11939                               "table. Switch to fallback mode.");
11940                 rte_errno = ENOTSUP;
11941                 ret = -rte_errno;
11942                 claim_zero(mlx5_flow_os_destroy_flow(flow));
11943         } else {
11944                 /* Check matcher to make sure validate fail at flow create. */
11945                 if (!matcher || (matcher && errno != EINVAL))
11946                         DRV_LOG(ERR, "Unexpected error in counter offset "
11947                                      "support detection");
11948                 ret = 0;
11949         }
11950         for (i = 0; i < 2; i++) {
11951                 if (actions[i])
11952                         claim_zero(mlx5_flow_os_destroy_flow_action
11953                                    (actions[i]));
11954         }
11955         if (matcher)
11956                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
11957         if (tbl)
11958                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11959         if (dest_tbl)
11960                 flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
11961         if (dcs)
11962                 claim_zero(mlx5_devx_cmd_destroy(dcs));
11963         return ret;
11964 }
11965
11966 /**
11967  * Query a devx counter.
11968  *
11969  * @param[in] dev
11970  *   Pointer to the Ethernet device structure.
11971  * @param[in] cnt
11972  *   Index to the flow counter.
11973  * @param[in] clear
11974  *   Set to clear the counter statistics.
11975  * @param[out] pkts
11976  *   The statistics value of packets.
11977  * @param[out] bytes
11978  *   The statistics value of bytes.
11979  *
11980  * @return
11981  *   0 on success, otherwise return -1.
11982  */
11983 static int
11984 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
11985                       uint64_t *pkts, uint64_t *bytes)
11986 {
11987         struct mlx5_priv *priv = dev->data->dev_private;
11988         struct mlx5_flow_counter *cnt;
11989         uint64_t inn_pkts, inn_bytes;
11990         int ret;
11991
11992         if (!priv->config.devx)
11993                 return -1;
11994
11995         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
11996         if (ret)
11997                 return -1;
11998         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
11999         *pkts = inn_pkts - cnt->hits;
12000         *bytes = inn_bytes - cnt->bytes;
12001         if (clear) {
12002                 cnt->hits = inn_pkts;
12003                 cnt->bytes = inn_bytes;
12004         }
12005         return 0;
12006 }
12007
12008 /**
12009  * Get aged-out flows.
12010  *
12011  * @param[in] dev
12012  *   Pointer to the Ethernet device structure.
12013  * @param[in] context
12014  *   The address of an array of pointers to the aged-out flows contexts.
12015  * @param[in] nb_contexts
12016  *   The length of context array pointers.
12017  * @param[out] error
12018  *   Perform verbose error reporting if not NULL. Initialized in case of
12019  *   error only.
12020  *
12021  * @return
12022  *   how many contexts get in success, otherwise negative errno value.
12023  *   if nb_contexts is 0, return the amount of all aged contexts.
12024  *   if nb_contexts is not 0 , return the amount of aged flows reported
12025  *   in the context array.
12026  * @note: only stub for now
12027  */
12028 static int
12029 flow_get_aged_flows(struct rte_eth_dev *dev,
12030                     void **context,
12031                     uint32_t nb_contexts,
12032                     struct rte_flow_error *error)
12033 {
12034         struct mlx5_priv *priv = dev->data->dev_private;
12035         struct mlx5_age_info *age_info;
12036         struct mlx5_age_param *age_param;
12037         struct mlx5_flow_counter *counter;
12038         int nb_flows = 0;
12039
12040         if (nb_contexts && !context)
12041                 return rte_flow_error_set(error, EINVAL,
12042                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12043                                           NULL,
12044                                           "Should assign at least one flow or"
12045                                           " context to get if nb_contexts != 0");
12046         age_info = GET_PORT_AGE_INFO(priv);
12047         rte_spinlock_lock(&age_info->aged_sl);
12048         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12049                 nb_flows++;
12050                 if (nb_contexts) {
12051                         age_param = MLX5_CNT_TO_AGE(counter);
12052                         context[nb_flows - 1] = age_param->context;
12053                         if (!(--nb_contexts))
12054                                 break;
12055                 }
12056         }
12057         rte_spinlock_unlock(&age_info->aged_sl);
12058         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12059         return nb_flows;
12060 }
12061
12062 /*
12063  * Mutex-protected thunk to lock-free  __flow_dv_translate().
12064  */
12065 static int
12066 flow_dv_translate(struct rte_eth_dev *dev,
12067                   struct mlx5_flow *dev_flow,
12068                   const struct rte_flow_attr *attr,
12069                   const struct rte_flow_item items[],
12070                   const struct rte_flow_action actions[],
12071                   struct rte_flow_error *error)
12072 {
12073         int ret;
12074
12075         flow_dv_shared_lock(dev);
12076         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12077         flow_dv_shared_unlock(dev);
12078         return ret;
12079 }
12080
12081 /*
12082  * Mutex-protected thunk to lock-free  __flow_dv_apply().
12083  */
12084 static int
12085 flow_dv_apply(struct rte_eth_dev *dev,
12086               struct rte_flow *flow,
12087               struct rte_flow_error *error)
12088 {
12089         int ret;
12090
12091         flow_dv_shared_lock(dev);
12092         ret = __flow_dv_apply(dev, flow, error);
12093         flow_dv_shared_unlock(dev);
12094         return ret;
12095 }
12096
12097 /*
12098  * Mutex-protected thunk to lock-free __flow_dv_remove().
12099  */
12100 static void
12101 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12102 {
12103         flow_dv_shared_lock(dev);
12104         __flow_dv_remove(dev, flow);
12105         flow_dv_shared_unlock(dev);
12106 }
12107
12108 /*
12109  * Mutex-protected thunk to lock-free __flow_dv_destroy().
12110  */
12111 static void
12112 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12113 {
12114         flow_dv_shared_lock(dev);
12115         __flow_dv_destroy(dev, flow);
12116         flow_dv_shared_unlock(dev);
12117 }
12118
12119 /*
12120  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12121  */
12122 static uint32_t
12123 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12124 {
12125         uint32_t cnt;
12126
12127         flow_dv_shared_lock(dev);
12128         cnt = flow_dv_counter_alloc(dev, 0);
12129         flow_dv_shared_unlock(dev);
12130         return cnt;
12131 }
12132
12133 /*
12134  * Mutex-protected thunk to lock-free flow_dv_counter_release().
12135  */
12136 static void
12137 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12138 {
12139         flow_dv_shared_lock(dev);
12140         flow_dv_counter_release(dev, cnt);
12141         flow_dv_shared_unlock(dev);
12142 }
12143
12144 /**
12145  * Validate shared action.
12146  * Dispatcher for action type specific validation.
12147  *
12148  * @param[in] dev
12149  *   Pointer to the Ethernet device structure.
12150  * @param[in] conf
12151  *   Shared action configuration.
12152  * @param[in] action
12153  *   The shared action object to validate.
12154  * @param[out] error
12155  *   Perform verbose error reporting if not NULL. Initialized in case of
12156  *   error only.
12157  *
12158  * @return
12159  *   0 on success, otherwise negative errno value.
12160  */
12161 static int
12162 flow_dv_action_validate(struct rte_eth_dev *dev,
12163                         const struct rte_flow_shared_action_conf *conf,
12164                         const struct rte_flow_action *action,
12165                         struct rte_flow_error *error)
12166 {
12167         RTE_SET_USED(conf);
12168         switch (action->type) {
12169         case RTE_FLOW_ACTION_TYPE_RSS:
12170                 return mlx5_validate_action_rss(dev, action, error);
12171         default:
12172                 return rte_flow_error_set(error, ENOTSUP,
12173                                           RTE_FLOW_ERROR_TYPE_ACTION,
12174                                           NULL,
12175                                           "action type not supported");
12176         }
12177 }
12178
12179 /*
12180  * Mutex-protected thunk to lock-free  __flow_dv_action_create().
12181  */
12182 static struct rte_flow_shared_action *
12183 flow_dv_action_create(struct rte_eth_dev *dev,
12184                       const struct rte_flow_shared_action_conf *conf,
12185                       const struct rte_flow_action *action,
12186                       struct rte_flow_error *error)
12187 {
12188         struct rte_flow_shared_action *shared_action = NULL;
12189
12190         flow_dv_shared_lock(dev);
12191         shared_action = __flow_dv_action_create(dev, conf, action, error);
12192         flow_dv_shared_unlock(dev);
12193         return shared_action;
12194 }
12195
12196 /*
12197  * Mutex-protected thunk to lock-free  __flow_dv_action_destroy().
12198  */
12199 static int
12200 flow_dv_action_destroy(struct rte_eth_dev *dev,
12201                        struct rte_flow_shared_action *action,
12202                        struct rte_flow_error *error)
12203 {
12204         int ret;
12205
12206         flow_dv_shared_lock(dev);
12207         ret = __flow_dv_action_destroy(dev, action, error);
12208         flow_dv_shared_unlock(dev);
12209         return ret;
12210 }
12211
12212 /*
12213  * Mutex-protected thunk to lock-free  __flow_dv_action_update().
12214  */
12215 static int
12216 flow_dv_action_update(struct rte_eth_dev *dev,
12217                       struct rte_flow_shared_action *action,
12218                       const void *action_conf,
12219                       struct rte_flow_error *error)
12220 {
12221         int ret;
12222
12223         flow_dv_shared_lock(dev);
12224         ret = __flow_dv_action_update(dev, action, action_conf,
12225                                       error);
12226         flow_dv_shared_unlock(dev);
12227         return ret;
12228 }
12229
12230 static int
12231 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12232 {
12233         struct mlx5_priv *priv = dev->data->dev_private;
12234         int ret = 0;
12235
12236         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12237                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12238                                                 flags);
12239                 if (ret != 0)
12240                         return ret;
12241         }
12242         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12243                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12244                 if (ret != 0)
12245                         return ret;
12246         }
12247         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12248                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12249                 if (ret != 0)
12250                         return ret;
12251         }
12252         return 0;
12253 }
12254
12255 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12256         .validate = flow_dv_validate,
12257         .prepare = flow_dv_prepare,
12258         .translate = flow_dv_translate,
12259         .apply = flow_dv_apply,
12260         .remove = flow_dv_remove,
12261         .destroy = flow_dv_destroy,
12262         .query = flow_dv_query,
12263         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12264         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12265         .create_policer_rules = flow_dv_create_policer_rules,
12266         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12267         .counter_alloc = flow_dv_counter_allocate,
12268         .counter_free = flow_dv_counter_free,
12269         .counter_query = flow_dv_counter_query,
12270         .get_aged_flows = flow_get_aged_flows,
12271         .action_validate = flow_dv_action_validate,
12272         .action_create = flow_dv_action_create,
12273         .action_destroy = flow_dv_action_destroy,
12274         .action_update = flow_dv_action_update,
12275         .sync_domain = flow_dv_sync_domain,
12276 };
12277
12278 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12279