net/mlx5: make flow modify action list thread safe
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84
85 /**
86  * Initialize flow attributes structure according to flow items' types.
87  *
88  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89  * mode. For tunnel mode, the items to be modified are the outermost ones.
90  *
91  * @param[in] item
92  *   Pointer to item specification.
93  * @param[out] attr
94  *   Pointer to flow attributes structure.
95  * @param[in] dev_flow
96  *   Pointer to the sub flow.
97  * @param[in] tunnel_decap
98  *   Whether action is after tunnel decapsulation.
99  */
100 static void
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102                   struct mlx5_flow *dev_flow, bool tunnel_decap)
103 {
104         uint64_t layers = dev_flow->handle->layers;
105
106         /*
107          * If layers is already initialized, it means this dev_flow is the
108          * suffix flow, the layers flags is set by the prefix flow. Need to
109          * use the layer flags from prefix flow as the suffix flow may not
110          * have the user defined items as the flow is split.
111          */
112         if (layers) {
113                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
114                         attr->ipv4 = 1;
115                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
116                         attr->ipv6 = 1;
117                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
118                         attr->tcp = 1;
119                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
120                         attr->udp = 1;
121                 attr->valid = 1;
122                 return;
123         }
124         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125                 uint8_t next_protocol = 0xff;
126                 switch (item->type) {
127                 case RTE_FLOW_ITEM_TYPE_GRE:
128                 case RTE_FLOW_ITEM_TYPE_NVGRE:
129                 case RTE_FLOW_ITEM_TYPE_VXLAN:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
132                 case RTE_FLOW_ITEM_TYPE_MPLS:
133                         if (tunnel_decap)
134                                 attr->attr = 0;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_IPV4:
137                         if (!attr->ipv6)
138                                 attr->ipv4 = 1;
139                         if (item->mask != NULL &&
140                             ((const struct rte_flow_item_ipv4 *)
141                             item->mask)->hdr.next_proto_id)
142                                 next_protocol =
143                                     ((const struct rte_flow_item_ipv4 *)
144                                       (item->spec))->hdr.next_proto_id &
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->mask))->hdr.next_proto_id;
147                         if ((next_protocol == IPPROTO_IPIP ||
148                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
149                                 attr->attr = 0;
150                         break;
151                 case RTE_FLOW_ITEM_TYPE_IPV6:
152                         if (!attr->ipv4)
153                                 attr->ipv6 = 1;
154                         if (item->mask != NULL &&
155                             ((const struct rte_flow_item_ipv6 *)
156                             item->mask)->hdr.proto)
157                                 next_protocol =
158                                     ((const struct rte_flow_item_ipv6 *)
159                                       (item->spec))->hdr.proto &
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->mask))->hdr.proto;
162                         if ((next_protocol == IPPROTO_IPIP ||
163                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
164                                 attr->attr = 0;
165                         break;
166                 case RTE_FLOW_ITEM_TYPE_UDP:
167                         if (!attr->tcp)
168                                 attr->udp = 1;
169                         break;
170                 case RTE_FLOW_ITEM_TYPE_TCP:
171                         if (!attr->udp)
172                                 attr->tcp = 1;
173                         break;
174                 default:
175                         break;
176                 }
177         }
178         attr->valid = 1;
179 }
180
181 /**
182  * Convert rte_mtr_color to mlx5 color.
183  *
184  * @param[in] rcol
185  *   rte_mtr_color.
186  *
187  * @return
188  *   mlx5 color.
189  */
190 static int
191 rte_col_2_mlx5_col(enum rte_color rcol)
192 {
193         switch (rcol) {
194         case RTE_COLOR_GREEN:
195                 return MLX5_FLOW_COLOR_GREEN;
196         case RTE_COLOR_YELLOW:
197                 return MLX5_FLOW_COLOR_YELLOW;
198         case RTE_COLOR_RED:
199                 return MLX5_FLOW_COLOR_RED;
200         default:
201                 break;
202         }
203         return MLX5_FLOW_COLOR_UNDEFINED;
204 }
205
206 struct field_modify_info {
207         uint32_t size; /* Size of field in protocol header, in bytes. */
208         uint32_t offset; /* Offset of field in protocol header, in bytes. */
209         enum mlx5_modification_field id;
210 };
211
212 struct field_modify_info modify_eth[] = {
213         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
214         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
215         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
216         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221         /* Size in bits !!! */
222         {12, 0, MLX5_MODI_OUT_FIRST_VID},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_ipv4[] = {
227         {1,  1, MLX5_MODI_OUT_IP_DSCP},
228         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
229         {4, 12, MLX5_MODI_OUT_SIPV4},
230         {4, 16, MLX5_MODI_OUT_DIPV4},
231         {0, 0, 0},
232 };
233
234 struct field_modify_info modify_ipv6[] = {
235         {1,  0, MLX5_MODI_OUT_IP_DSCP},
236         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
238         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_udp[] = {
249         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_tcp[] = {
255         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
259         {0, 0, 0},
260 };
261
262 static void
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264                           uint8_t next_protocol, uint64_t *item_flags,
265                           int *tunnel)
266 {
267         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269         if (next_protocol == IPPROTO_IPIP) {
270                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
271                 *tunnel = 1;
272         }
273         if (next_protocol == IPPROTO_IPV6) {
274                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
275                 *tunnel = 1;
276         }
277 }
278
279 /**
280  * Acquire the synchronizing object to protect multithreaded access
281  * to shared dv context. Lock occurs only if context is actually
282  * shared, i.e. we have multiport IB device and representors are
283  * created.
284  *
285  * @param[in] dev
286  *   Pointer to the rte_eth_dev structure.
287  */
288 static void
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
290 {
291         struct mlx5_priv *priv = dev->data->dev_private;
292         struct mlx5_dev_ctx_shared *sh = priv->sh;
293
294         if (sh->refcnt > 1) {
295                 int ret;
296
297                 ret = pthread_mutex_lock(&sh->dv_mutex);
298                 MLX5_ASSERT(!ret);
299                 (void)ret;
300         }
301 }
302
303 static void
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
305 {
306         struct mlx5_priv *priv = dev->data->dev_private;
307         struct mlx5_dev_ctx_shared *sh = priv->sh;
308
309         if (sh->refcnt > 1) {
310                 int ret;
311
312                 ret = pthread_mutex_unlock(&sh->dv_mutex);
313                 MLX5_ASSERT(!ret);
314                 (void)ret;
315         }
316 }
317
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
319  *
320  * @param[in] action
321  *   Pointer to struct rte_flow_action.
322  * @param[out] vlan
323  *   Pointer to struct rte_vlan_hdr.
324  */
325 static void
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327                          struct rte_vlan_hdr *vlan)
328 {
329         uint16_t vlan_tci;
330         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
331                 vlan_tci =
332                     ((const struct rte_flow_action_of_set_vlan_pcp *)
333                                                action->conf)->vlan_pcp;
334                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336                 vlan->vlan_tci |= vlan_tci;
337         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339                 vlan->vlan_tci |= rte_be_to_cpu_16
340                     (((const struct rte_flow_action_of_set_vlan_vid *)
341                                              action->conf)->vlan_vid);
342         }
343 }
344
345 /**
346  * Fetch 1, 2, 3 or 4 byte field from the byte array
347  * and return as unsigned integer in host-endian format.
348  *
349  * @param[in] data
350  *   Pointer to data array.
351  * @param[in] size
352  *   Size of field to extract.
353  *
354  * @return
355  *   converted field in host endian format.
356  */
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
359 {
360         uint32_t ret;
361
362         switch (size) {
363         case 1:
364                 ret = *data;
365                 break;
366         case 2:
367                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
368                 break;
369         case 3:
370                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371                 ret = (ret << 8) | *(data + sizeof(uint16_t));
372                 break;
373         case 4:
374                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
375                 break;
376         default:
377                 MLX5_ASSERT(false);
378                 ret = 0;
379                 break;
380         }
381         return ret;
382 }
383
384 /**
385  * Convert modify-header action to DV specification.
386  *
387  * Data length of each action is determined by provided field description
388  * and the item mask. Data bit offset and width of each action is determined
389  * by provided item mask.
390  *
391  * @param[in] item
392  *   Pointer to item specification.
393  * @param[in] field
394  *   Pointer to field modification information.
395  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
398  * @param[in] dcopy
399  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400  *   Negative offset value sets the same offset as source offset.
401  *   size field is ignored, value is taken from source field.
402  * @param[in,out] resource
403  *   Pointer to the modify-header resource.
404  * @param[in] type
405  *   Type of modification.
406  * @param[out] error
407  *   Pointer to the error structure.
408  *
409  * @return
410  *   0 on success, a negative errno value otherwise and rte_errno is set.
411  */
412 static int
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414                               struct field_modify_info *field,
415                               struct field_modify_info *dcopy,
416                               struct mlx5_flow_dv_modify_hdr_resource *resource,
417                               uint32_t type, struct rte_flow_error *error)
418 {
419         uint32_t i = resource->actions_num;
420         struct mlx5_modification_cmd *actions = resource->actions;
421
422         /*
423          * The item and mask are provided in big-endian format.
424          * The fields should be presented as in big-endian format either.
425          * Mask must be always present, it defines the actual field width.
426          */
427         MLX5_ASSERT(item->mask);
428         MLX5_ASSERT(field->size);
429         do {
430                 unsigned int size_b;
431                 unsigned int off_b;
432                 uint32_t mask;
433                 uint32_t data;
434
435                 if (i >= MLX5_MAX_MODIFY_NUM)
436                         return rte_flow_error_set(error, EINVAL,
437                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438                                  "too many items to modify");
439                 /* Fetch variable byte size mask from the array. */
440                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441                                            field->offset, field->size);
442                 if (!mask) {
443                         ++field;
444                         continue;
445                 }
446                 /* Deduce actual data width in bits from mask value. */
447                 off_b = rte_bsf32(mask);
448                 size_b = sizeof(uint32_t) * CHAR_BIT -
449                          off_b - __builtin_clz(mask);
450                 MLX5_ASSERT(size_b);
451                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452                 actions[i] = (struct mlx5_modification_cmd) {
453                         .action_type = type,
454                         .field = field->id,
455                         .offset = off_b,
456                         .length = size_b,
457                 };
458                 /* Convert entire record to expected big-endian format. */
459                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
461                         MLX5_ASSERT(dcopy);
462                         actions[i].dst_field = dcopy->id;
463                         actions[i].dst_offset =
464                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465                         /* Convert entire record to big-endian format. */
466                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
467                 } else {
468                         MLX5_ASSERT(item->spec);
469                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
470                                                    field->offset, field->size);
471                         /* Shift out the trailing masked bits from data. */
472                         data = (data & mask) >> off_b;
473                         actions[i].data1 = rte_cpu_to_be_32(data);
474                 }
475                 ++i;
476                 ++field;
477         } while (field->size);
478         if (resource->actions_num == i)
479                 return rte_flow_error_set(error, EINVAL,
480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481                                           "invalid modification flow item");
482         resource->actions_num = i;
483         return 0;
484 }
485
486 /**
487  * Convert modify-header set IPv4 address action to DV specification.
488  *
489  * @param[in,out] resource
490  *   Pointer to the modify-header resource.
491  * @param[in] action
492  *   Pointer to action specification.
493  * @param[out] error
494  *   Pointer to the error structure.
495  *
496  * @return
497  *   0 on success, a negative errno value otherwise and rte_errno is set.
498  */
499 static int
500 flow_dv_convert_action_modify_ipv4
501                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
502                          const struct rte_flow_action *action,
503                          struct rte_flow_error *error)
504 {
505         const struct rte_flow_action_set_ipv4 *conf =
506                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508         struct rte_flow_item_ipv4 ipv4;
509         struct rte_flow_item_ipv4 ipv4_mask;
510
511         memset(&ipv4, 0, sizeof(ipv4));
512         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514                 ipv4.hdr.src_addr = conf->ipv4_addr;
515                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
516         } else {
517                 ipv4.hdr.dst_addr = conf->ipv4_addr;
518                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
519         }
520         item.spec = &ipv4;
521         item.mask = &ipv4_mask;
522         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523                                              MLX5_MODIFICATION_TYPE_SET, error);
524 }
525
526 /**
527  * Convert modify-header set IPv6 address action to DV specification.
528  *
529  * @param[in,out] resource
530  *   Pointer to the modify-header resource.
531  * @param[in] action
532  *   Pointer to action specification.
533  * @param[out] error
534  *   Pointer to the error structure.
535  *
536  * @return
537  *   0 on success, a negative errno value otherwise and rte_errno is set.
538  */
539 static int
540 flow_dv_convert_action_modify_ipv6
541                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
542                          const struct rte_flow_action *action,
543                          struct rte_flow_error *error)
544 {
545         const struct rte_flow_action_set_ipv6 *conf =
546                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548         struct rte_flow_item_ipv6 ipv6;
549         struct rte_flow_item_ipv6 ipv6_mask;
550
551         memset(&ipv6, 0, sizeof(ipv6));
552         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555                        sizeof(ipv6.hdr.src_addr));
556                 memcpy(&ipv6_mask.hdr.src_addr,
557                        &rte_flow_item_ipv6_mask.hdr.src_addr,
558                        sizeof(ipv6.hdr.src_addr));
559         } else {
560                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561                        sizeof(ipv6.hdr.dst_addr));
562                 memcpy(&ipv6_mask.hdr.dst_addr,
563                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
564                        sizeof(ipv6.hdr.dst_addr));
565         }
566         item.spec = &ipv6;
567         item.mask = &ipv6_mask;
568         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569                                              MLX5_MODIFICATION_TYPE_SET, error);
570 }
571
572 /**
573  * Convert modify-header set MAC address action to DV specification.
574  *
575  * @param[in,out] resource
576  *   Pointer to the modify-header resource.
577  * @param[in] action
578  *   Pointer to action specification.
579  * @param[out] error
580  *   Pointer to the error structure.
581  *
582  * @return
583  *   0 on success, a negative errno value otherwise and rte_errno is set.
584  */
585 static int
586 flow_dv_convert_action_modify_mac
587                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
588                          const struct rte_flow_action *action,
589                          struct rte_flow_error *error)
590 {
591         const struct rte_flow_action_set_mac *conf =
592                 (const struct rte_flow_action_set_mac *)(action->conf);
593         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594         struct rte_flow_item_eth eth;
595         struct rte_flow_item_eth eth_mask;
596
597         memset(&eth, 0, sizeof(eth));
598         memset(&eth_mask, 0, sizeof(eth_mask));
599         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
601                        sizeof(eth.src.addr_bytes));
602                 memcpy(&eth_mask.src.addr_bytes,
603                        &rte_flow_item_eth_mask.src.addr_bytes,
604                        sizeof(eth_mask.src.addr_bytes));
605         } else {
606                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
607                        sizeof(eth.dst.addr_bytes));
608                 memcpy(&eth_mask.dst.addr_bytes,
609                        &rte_flow_item_eth_mask.dst.addr_bytes,
610                        sizeof(eth_mask.dst.addr_bytes));
611         }
612         item.spec = &eth;
613         item.mask = &eth_mask;
614         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615                                              MLX5_MODIFICATION_TYPE_SET, error);
616 }
617
618 /**
619  * Convert modify-header set VLAN VID action to DV specification.
620  *
621  * @param[in,out] resource
622  *   Pointer to the modify-header resource.
623  * @param[in] action
624  *   Pointer to action specification.
625  * @param[out] error
626  *   Pointer to the error structure.
627  *
628  * @return
629  *   0 on success, a negative errno value otherwise and rte_errno is set.
630  */
631 static int
632 flow_dv_convert_action_modify_vlan_vid
633                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
634                          const struct rte_flow_action *action,
635                          struct rte_flow_error *error)
636 {
637         const struct rte_flow_action_of_set_vlan_vid *conf =
638                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639         int i = resource->actions_num;
640         struct mlx5_modification_cmd *actions = resource->actions;
641         struct field_modify_info *field = modify_vlan_out_first_vid;
642
643         if (i >= MLX5_MAX_MODIFY_NUM)
644                 return rte_flow_error_set(error, EINVAL,
645                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646                          "too many items to modify");
647         actions[i] = (struct mlx5_modification_cmd) {
648                 .action_type = MLX5_MODIFICATION_TYPE_SET,
649                 .field = field->id,
650                 .length = field->size,
651                 .offset = field->offset,
652         };
653         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654         actions[i].data1 = conf->vlan_vid;
655         actions[i].data1 = actions[i].data1 << 16;
656         resource->actions_num = ++i;
657         return 0;
658 }
659
660 /**
661  * Convert modify-header set TP action to DV specification.
662  *
663  * @param[in,out] resource
664  *   Pointer to the modify-header resource.
665  * @param[in] action
666  *   Pointer to action specification.
667  * @param[in] items
668  *   Pointer to rte_flow_item objects list.
669  * @param[in] attr
670  *   Pointer to flow attributes structure.
671  * @param[in] dev_flow
672  *   Pointer to the sub flow.
673  * @param[in] tunnel_decap
674  *   Whether action is after tunnel decapsulation.
675  * @param[out] error
676  *   Pointer to the error structure.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 flow_dv_convert_action_modify_tp
683                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
684                          const struct rte_flow_action *action,
685                          const struct rte_flow_item *items,
686                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687                          bool tunnel_decap, struct rte_flow_error *error)
688 {
689         const struct rte_flow_action_set_tp *conf =
690                 (const struct rte_flow_action_set_tp *)(action->conf);
691         struct rte_flow_item item;
692         struct rte_flow_item_udp udp;
693         struct rte_flow_item_udp udp_mask;
694         struct rte_flow_item_tcp tcp;
695         struct rte_flow_item_tcp tcp_mask;
696         struct field_modify_info *field;
697
698         if (!attr->valid)
699                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
700         if (attr->udp) {
701                 memset(&udp, 0, sizeof(udp));
702                 memset(&udp_mask, 0, sizeof(udp_mask));
703                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704                         udp.hdr.src_port = conf->port;
705                         udp_mask.hdr.src_port =
706                                         rte_flow_item_udp_mask.hdr.src_port;
707                 } else {
708                         udp.hdr.dst_port = conf->port;
709                         udp_mask.hdr.dst_port =
710                                         rte_flow_item_udp_mask.hdr.dst_port;
711                 }
712                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
713                 item.spec = &udp;
714                 item.mask = &udp_mask;
715                 field = modify_udp;
716         } else {
717                 MLX5_ASSERT(attr->tcp);
718                 memset(&tcp, 0, sizeof(tcp));
719                 memset(&tcp_mask, 0, sizeof(tcp_mask));
720                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721                         tcp.hdr.src_port = conf->port;
722                         tcp_mask.hdr.src_port =
723                                         rte_flow_item_tcp_mask.hdr.src_port;
724                 } else {
725                         tcp.hdr.dst_port = conf->port;
726                         tcp_mask.hdr.dst_port =
727                                         rte_flow_item_tcp_mask.hdr.dst_port;
728                 }
729                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
730                 item.spec = &tcp;
731                 item.mask = &tcp_mask;
732                 field = modify_tcp;
733         }
734         return flow_dv_convert_modify_action(&item, field, NULL, resource,
735                                              MLX5_MODIFICATION_TYPE_SET, error);
736 }
737
738 /**
739  * Convert modify-header set TTL action to DV specification.
740  *
741  * @param[in,out] resource
742  *   Pointer to the modify-header resource.
743  * @param[in] action
744  *   Pointer to action specification.
745  * @param[in] items
746  *   Pointer to rte_flow_item objects list.
747  * @param[in] attr
748  *   Pointer to flow attributes structure.
749  * @param[in] dev_flow
750  *   Pointer to the sub flow.
751  * @param[in] tunnel_decap
752  *   Whether action is after tunnel decapsulation.
753  * @param[out] error
754  *   Pointer to the error structure.
755  *
756  * @return
757  *   0 on success, a negative errno value otherwise and rte_errno is set.
758  */
759 static int
760 flow_dv_convert_action_modify_ttl
761                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
762                          const struct rte_flow_action *action,
763                          const struct rte_flow_item *items,
764                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765                          bool tunnel_decap, struct rte_flow_error *error)
766 {
767         const struct rte_flow_action_set_ttl *conf =
768                 (const struct rte_flow_action_set_ttl *)(action->conf);
769         struct rte_flow_item item;
770         struct rte_flow_item_ipv4 ipv4;
771         struct rte_flow_item_ipv4 ipv4_mask;
772         struct rte_flow_item_ipv6 ipv6;
773         struct rte_flow_item_ipv6 ipv6_mask;
774         struct field_modify_info *field;
775
776         if (!attr->valid)
777                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
778         if (attr->ipv4) {
779                 memset(&ipv4, 0, sizeof(ipv4));
780                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781                 ipv4.hdr.time_to_live = conf->ttl_value;
782                 ipv4_mask.hdr.time_to_live = 0xFF;
783                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
784                 item.spec = &ipv4;
785                 item.mask = &ipv4_mask;
786                 field = modify_ipv4;
787         } else {
788                 MLX5_ASSERT(attr->ipv6);
789                 memset(&ipv6, 0, sizeof(ipv6));
790                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791                 ipv6.hdr.hop_limits = conf->ttl_value;
792                 ipv6_mask.hdr.hop_limits = 0xFF;
793                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
794                 item.spec = &ipv6;
795                 item.mask = &ipv6_mask;
796                 field = modify_ipv6;
797         }
798         return flow_dv_convert_modify_action(&item, field, NULL, resource,
799                                              MLX5_MODIFICATION_TYPE_SET, error);
800 }
801
802 /**
803  * Convert modify-header decrement TTL action to DV specification.
804  *
805  * @param[in,out] resource
806  *   Pointer to the modify-header resource.
807  * @param[in] action
808  *   Pointer to action specification.
809  * @param[in] items
810  *   Pointer to rte_flow_item objects list.
811  * @param[in] attr
812  *   Pointer to flow attributes structure.
813  * @param[in] dev_flow
814  *   Pointer to the sub flow.
815  * @param[in] tunnel_decap
816  *   Whether action is after tunnel decapsulation.
817  * @param[out] error
818  *   Pointer to the error structure.
819  *
820  * @return
821  *   0 on success, a negative errno value otherwise and rte_errno is set.
822  */
823 static int
824 flow_dv_convert_action_modify_dec_ttl
825                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
826                          const struct rte_flow_item *items,
827                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828                          bool tunnel_decap, struct rte_flow_error *error)
829 {
830         struct rte_flow_item item;
831         struct rte_flow_item_ipv4 ipv4;
832         struct rte_flow_item_ipv4 ipv4_mask;
833         struct rte_flow_item_ipv6 ipv6;
834         struct rte_flow_item_ipv6 ipv6_mask;
835         struct field_modify_info *field;
836
837         if (!attr->valid)
838                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
839         if (attr->ipv4) {
840                 memset(&ipv4, 0, sizeof(ipv4));
841                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842                 ipv4.hdr.time_to_live = 0xFF;
843                 ipv4_mask.hdr.time_to_live = 0xFF;
844                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
845                 item.spec = &ipv4;
846                 item.mask = &ipv4_mask;
847                 field = modify_ipv4;
848         } else {
849                 MLX5_ASSERT(attr->ipv6);
850                 memset(&ipv6, 0, sizeof(ipv6));
851                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852                 ipv6.hdr.hop_limits = 0xFF;
853                 ipv6_mask.hdr.hop_limits = 0xFF;
854                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
855                 item.spec = &ipv6;
856                 item.mask = &ipv6_mask;
857                 field = modify_ipv6;
858         }
859         return flow_dv_convert_modify_action(&item, field, NULL, resource,
860                                              MLX5_MODIFICATION_TYPE_ADD, error);
861 }
862
863 /**
864  * Convert modify-header increment/decrement TCP Sequence number
865  * to DV specification.
866  *
867  * @param[in,out] resource
868  *   Pointer to the modify-header resource.
869  * @param[in] action
870  *   Pointer to action specification.
871  * @param[out] error
872  *   Pointer to the error structure.
873  *
874  * @return
875  *   0 on success, a negative errno value otherwise and rte_errno is set.
876  */
877 static int
878 flow_dv_convert_action_modify_tcp_seq
879                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
880                          const struct rte_flow_action *action,
881                          struct rte_flow_error *error)
882 {
883         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884         uint64_t value = rte_be_to_cpu_32(*conf);
885         struct rte_flow_item item;
886         struct rte_flow_item_tcp tcp;
887         struct rte_flow_item_tcp tcp_mask;
888
889         memset(&tcp, 0, sizeof(tcp));
890         memset(&tcp_mask, 0, sizeof(tcp_mask));
891         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
892                 /*
893                  * The HW has no decrement operation, only increment operation.
894                  * To simulate decrement X from Y using increment operation
895                  * we need to add UINT32_MAX X times to Y.
896                  * Each adding of UINT32_MAX decrements Y by 1.
897                  */
898                 value *= UINT32_MAX;
899         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901         item.type = RTE_FLOW_ITEM_TYPE_TCP;
902         item.spec = &tcp;
903         item.mask = &tcp_mask;
904         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905                                              MLX5_MODIFICATION_TYPE_ADD, error);
906 }
907
908 /**
909  * Convert modify-header increment/decrement TCP Acknowledgment number
910  * to DV specification.
911  *
912  * @param[in,out] resource
913  *   Pointer to the modify-header resource.
914  * @param[in] action
915  *   Pointer to action specification.
916  * @param[out] error
917  *   Pointer to the error structure.
918  *
919  * @return
920  *   0 on success, a negative errno value otherwise and rte_errno is set.
921  */
922 static int
923 flow_dv_convert_action_modify_tcp_ack
924                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
925                          const struct rte_flow_action *action,
926                          struct rte_flow_error *error)
927 {
928         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929         uint64_t value = rte_be_to_cpu_32(*conf);
930         struct rte_flow_item item;
931         struct rte_flow_item_tcp tcp;
932         struct rte_flow_item_tcp tcp_mask;
933
934         memset(&tcp, 0, sizeof(tcp));
935         memset(&tcp_mask, 0, sizeof(tcp_mask));
936         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
937                 /*
938                  * The HW has no decrement operation, only increment operation.
939                  * To simulate decrement X from Y using increment operation
940                  * we need to add UINT32_MAX X times to Y.
941                  * Each adding of UINT32_MAX decrements Y by 1.
942                  */
943                 value *= UINT32_MAX;
944         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946         item.type = RTE_FLOW_ITEM_TYPE_TCP;
947         item.spec = &tcp;
948         item.mask = &tcp_mask;
949         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950                                              MLX5_MODIFICATION_TYPE_ADD, error);
951 }
952
953 static enum mlx5_modification_field reg_to_field[] = {
954         [REG_NON] = MLX5_MODI_OUT_NONE,
955         [REG_A] = MLX5_MODI_META_DATA_REG_A,
956         [REG_B] = MLX5_MODI_META_DATA_REG_B,
957         [REG_C_0] = MLX5_MODI_META_REG_C_0,
958         [REG_C_1] = MLX5_MODI_META_REG_C_1,
959         [REG_C_2] = MLX5_MODI_META_REG_C_2,
960         [REG_C_3] = MLX5_MODI_META_REG_C_3,
961         [REG_C_4] = MLX5_MODI_META_REG_C_4,
962         [REG_C_5] = MLX5_MODI_META_REG_C_5,
963         [REG_C_6] = MLX5_MODI_META_REG_C_6,
964         [REG_C_7] = MLX5_MODI_META_REG_C_7,
965 };
966
967 /**
968  * Convert register set to DV specification.
969  *
970  * @param[in,out] resource
971  *   Pointer to the modify-header resource.
972  * @param[in] action
973  *   Pointer to action specification.
974  * @param[out] error
975  *   Pointer to the error structure.
976  *
977  * @return
978  *   0 on success, a negative errno value otherwise and rte_errno is set.
979  */
980 static int
981 flow_dv_convert_action_set_reg
982                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
983                          const struct rte_flow_action *action,
984                          struct rte_flow_error *error)
985 {
986         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987         struct mlx5_modification_cmd *actions = resource->actions;
988         uint32_t i = resource->actions_num;
989
990         if (i >= MLX5_MAX_MODIFY_NUM)
991                 return rte_flow_error_set(error, EINVAL,
992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993                                           "too many items to modify");
994         MLX5_ASSERT(conf->id != REG_NON);
995         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996         actions[i] = (struct mlx5_modification_cmd) {
997                 .action_type = MLX5_MODIFICATION_TYPE_SET,
998                 .field = reg_to_field[conf->id],
999         };
1000         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1002         ++i;
1003         resource->actions_num = i;
1004         return 0;
1005 }
1006
1007 /**
1008  * Convert SET_TAG action to DV specification.
1009  *
1010  * @param[in] dev
1011  *   Pointer to the rte_eth_dev structure.
1012  * @param[in,out] resource
1013  *   Pointer to the modify-header resource.
1014  * @param[in] conf
1015  *   Pointer to action specification.
1016  * @param[out] error
1017  *   Pointer to the error structure.
1018  *
1019  * @return
1020  *   0 on success, a negative errno value otherwise and rte_errno is set.
1021  */
1022 static int
1023 flow_dv_convert_action_set_tag
1024                         (struct rte_eth_dev *dev,
1025                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1026                          const struct rte_flow_action_set_tag *conf,
1027                          struct rte_flow_error *error)
1028 {
1029         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031         struct rte_flow_item item = {
1032                 .spec = &data,
1033                 .mask = &mask,
1034         };
1035         struct field_modify_info reg_c_x[] = {
1036                 [1] = {0, 0, 0},
1037         };
1038         enum mlx5_modification_field reg_type;
1039         int ret;
1040
1041         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1042         if (ret < 0)
1043                 return ret;
1044         MLX5_ASSERT(ret != REG_NON);
1045         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046         reg_type = reg_to_field[ret];
1047         MLX5_ASSERT(reg_type > 0);
1048         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050                                              MLX5_MODIFICATION_TYPE_SET, error);
1051 }
1052
1053 /**
1054  * Convert internal COPY_REG action to DV specification.
1055  *
1056  * @param[in] dev
1057  *   Pointer to the rte_eth_dev structure.
1058  * @param[in,out] res
1059  *   Pointer to the modify-header resource.
1060  * @param[in] action
1061  *   Pointer to action specification.
1062  * @param[out] error
1063  *   Pointer to the error structure.
1064  *
1065  * @return
1066  *   0 on success, a negative errno value otherwise and rte_errno is set.
1067  */
1068 static int
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1071                                  const struct rte_flow_action *action,
1072                                  struct rte_flow_error *error)
1073 {
1074         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076         struct rte_flow_item item = {
1077                 .spec = NULL,
1078                 .mask = &mask,
1079         };
1080         struct field_modify_info reg_src[] = {
1081                 {4, 0, reg_to_field[conf->src]},
1082                 {0, 0, 0},
1083         };
1084         struct field_modify_info reg_dst = {
1085                 .offset = 0,
1086                 .id = reg_to_field[conf->dst],
1087         };
1088         /* Adjust reg_c[0] usage according to reported mask. */
1089         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090                 struct mlx5_priv *priv = dev->data->dev_private;
1091                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1092
1093                 MLX5_ASSERT(reg_c0);
1094                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095                 if (conf->dst == REG_C_0) {
1096                         /* Copy to reg_c[0], within mask only. */
1097                         reg_dst.offset = rte_bsf32(reg_c0);
1098                         /*
1099                          * Mask is ignoring the enianness, because
1100                          * there is no conversion in datapath.
1101                          */
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103                         /* Copy from destination lower bits to reg_c[0]. */
1104                         mask = reg_c0 >> reg_dst.offset;
1105 #else
1106                         /* Copy from destination upper bits to reg_c[0]. */
1107                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108                                           rte_fls_u32(reg_c0));
1109 #endif
1110                 } else {
1111                         mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113                         /* Copy from reg_c[0] to destination lower bits. */
1114                         reg_dst.offset = 0;
1115 #else
1116                         /* Copy from reg_c[0] to destination upper bits. */
1117                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118                                          (rte_fls_u32(reg_c0) -
1119                                           rte_bsf32(reg_c0));
1120 #endif
1121                 }
1122         }
1123         return flow_dv_convert_modify_action(&item,
1124                                              reg_src, &reg_dst, res,
1125                                              MLX5_MODIFICATION_TYPE_COPY,
1126                                              error);
1127 }
1128
1129 /**
1130  * Convert MARK action to DV specification. This routine is used
1131  * in extensive metadata only and requires metadata register to be
1132  * handled. In legacy mode hardware tag resource is engaged.
1133  *
1134  * @param[in] dev
1135  *   Pointer to the rte_eth_dev structure.
1136  * @param[in] conf
1137  *   Pointer to MARK action specification.
1138  * @param[in,out] resource
1139  *   Pointer to the modify-header resource.
1140  * @param[out] error
1141  *   Pointer to the error structure.
1142  *
1143  * @return
1144  *   0 on success, a negative errno value otherwise and rte_errno is set.
1145  */
1146 static int
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148                             const struct rte_flow_action_mark *conf,
1149                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1150                             struct rte_flow_error *error)
1151 {
1152         struct mlx5_priv *priv = dev->data->dev_private;
1153         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154                                            priv->sh->dv_mark_mask);
1155         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156         struct rte_flow_item item = {
1157                 .spec = &data,
1158                 .mask = &mask,
1159         };
1160         struct field_modify_info reg_c_x[] = {
1161                 [1] = {0, 0, 0},
1162         };
1163         int reg;
1164
1165         if (!mask)
1166                 return rte_flow_error_set(error, EINVAL,
1167                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1168                                           NULL, "zero mark action mask");
1169         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1170         if (reg < 0)
1171                 return reg;
1172         MLX5_ASSERT(reg > 0);
1173         if (reg == REG_C_0) {
1174                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1175                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1176
1177                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1178                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1179                 mask = rte_cpu_to_be_32(mask << shl_c0);
1180         }
1181         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1182         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1183                                              MLX5_MODIFICATION_TYPE_SET, error);
1184 }
1185
1186 /**
1187  * Get metadata register index for specified steering domain.
1188  *
1189  * @param[in] dev
1190  *   Pointer to the rte_eth_dev structure.
1191  * @param[in] attr
1192  *   Attributes of flow to determine steering domain.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   positive index on success, a negative errno value otherwise
1198  *   and rte_errno is set.
1199  */
1200 static enum modify_reg
1201 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1202                          const struct rte_flow_attr *attr,
1203                          struct rte_flow_error *error)
1204 {
1205         int reg =
1206                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1207                                           MLX5_METADATA_FDB :
1208                                             attr->egress ?
1209                                             MLX5_METADATA_TX :
1210                                             MLX5_METADATA_RX, 0, error);
1211         if (reg < 0)
1212                 return rte_flow_error_set(error,
1213                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1214                                           NULL, "unavailable "
1215                                           "metadata register");
1216         return reg;
1217 }
1218
1219 /**
1220  * Convert SET_META action to DV specification.
1221  *
1222  * @param[in] dev
1223  *   Pointer to the rte_eth_dev structure.
1224  * @param[in,out] resource
1225  *   Pointer to the modify-header resource.
1226  * @param[in] attr
1227  *   Attributes of flow that includes this item.
1228  * @param[in] conf
1229  *   Pointer to action specification.
1230  * @param[out] error
1231  *   Pointer to the error structure.
1232  *
1233  * @return
1234  *   0 on success, a negative errno value otherwise and rte_errno is set.
1235  */
1236 static int
1237 flow_dv_convert_action_set_meta
1238                         (struct rte_eth_dev *dev,
1239                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1240                          const struct rte_flow_attr *attr,
1241                          const struct rte_flow_action_set_meta *conf,
1242                          struct rte_flow_error *error)
1243 {
1244         uint32_t data = conf->data;
1245         uint32_t mask = conf->mask;
1246         struct rte_flow_item item = {
1247                 .spec = &data,
1248                 .mask = &mask,
1249         };
1250         struct field_modify_info reg_c_x[] = {
1251                 [1] = {0, 0, 0},
1252         };
1253         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1254
1255         if (reg < 0)
1256                 return reg;
1257         /*
1258          * In datapath code there is no endianness
1259          * coversions for perfromance reasons, all
1260          * pattern conversions are done in rte_flow.
1261          */
1262         if (reg == REG_C_0) {
1263                 struct mlx5_priv *priv = dev->data->dev_private;
1264                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1265                 uint32_t shl_c0;
1266
1267                 MLX5_ASSERT(msk_c0);
1268 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1269                 shl_c0 = rte_bsf32(msk_c0);
1270 #else
1271                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1272 #endif
1273                 mask <<= shl_c0;
1274                 data <<= shl_c0;
1275                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1276         }
1277         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1278         /* The routine expects parameters in memory as big-endian ones. */
1279         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1280                                              MLX5_MODIFICATION_TYPE_SET, error);
1281 }
1282
1283 /**
1284  * Convert modify-header set IPv4 DSCP action to DV specification.
1285  *
1286  * @param[in,out] resource
1287  *   Pointer to the modify-header resource.
1288  * @param[in] action
1289  *   Pointer to action specification.
1290  * @param[out] error
1291  *   Pointer to the error structure.
1292  *
1293  * @return
1294  *   0 on success, a negative errno value otherwise and rte_errno is set.
1295  */
1296 static int
1297 flow_dv_convert_action_modify_ipv4_dscp
1298                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1299                          const struct rte_flow_action *action,
1300                          struct rte_flow_error *error)
1301 {
1302         const struct rte_flow_action_set_dscp *conf =
1303                 (const struct rte_flow_action_set_dscp *)(action->conf);
1304         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1305         struct rte_flow_item_ipv4 ipv4;
1306         struct rte_flow_item_ipv4 ipv4_mask;
1307
1308         memset(&ipv4, 0, sizeof(ipv4));
1309         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1310         ipv4.hdr.type_of_service = conf->dscp;
1311         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1312         item.spec = &ipv4;
1313         item.mask = &ipv4_mask;
1314         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1315                                              MLX5_MODIFICATION_TYPE_SET, error);
1316 }
1317
1318 /**
1319  * Convert modify-header set IPv6 DSCP action to DV specification.
1320  *
1321  * @param[in,out] resource
1322  *   Pointer to the modify-header resource.
1323  * @param[in] action
1324  *   Pointer to action specification.
1325  * @param[out] error
1326  *   Pointer to the error structure.
1327  *
1328  * @return
1329  *   0 on success, a negative errno value otherwise and rte_errno is set.
1330  */
1331 static int
1332 flow_dv_convert_action_modify_ipv6_dscp
1333                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1334                          const struct rte_flow_action *action,
1335                          struct rte_flow_error *error)
1336 {
1337         const struct rte_flow_action_set_dscp *conf =
1338                 (const struct rte_flow_action_set_dscp *)(action->conf);
1339         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1340         struct rte_flow_item_ipv6 ipv6;
1341         struct rte_flow_item_ipv6 ipv6_mask;
1342
1343         memset(&ipv6, 0, sizeof(ipv6));
1344         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1345         /*
1346          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1347          * rdma-core only accept the DSCP bits byte aligned start from
1348          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1349          * bits in IPv6 case as rdma-core requires byte aligned value.
1350          */
1351         ipv6.hdr.vtc_flow = conf->dscp;
1352         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1353         item.spec = &ipv6;
1354         item.mask = &ipv6_mask;
1355         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1356                                              MLX5_MODIFICATION_TYPE_SET, error);
1357 }
1358
1359 /**
1360  * Validate MARK item.
1361  *
1362  * @param[in] dev
1363  *   Pointer to the rte_eth_dev structure.
1364  * @param[in] item
1365  *   Item specification.
1366  * @param[in] attr
1367  *   Attributes of flow that includes this item.
1368  * @param[out] error
1369  *   Pointer to error structure.
1370  *
1371  * @return
1372  *   0 on success, a negative errno value otherwise and rte_errno is set.
1373  */
1374 static int
1375 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1376                            const struct rte_flow_item *item,
1377                            const struct rte_flow_attr *attr __rte_unused,
1378                            struct rte_flow_error *error)
1379 {
1380         struct mlx5_priv *priv = dev->data->dev_private;
1381         struct mlx5_dev_config *config = &priv->config;
1382         const struct rte_flow_item_mark *spec = item->spec;
1383         const struct rte_flow_item_mark *mask = item->mask;
1384         const struct rte_flow_item_mark nic_mask = {
1385                 .id = priv->sh->dv_mark_mask,
1386         };
1387         int ret;
1388
1389         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1390                 return rte_flow_error_set(error, ENOTSUP,
1391                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1392                                           "extended metadata feature"
1393                                           " isn't enabled");
1394         if (!mlx5_flow_ext_mreg_supported(dev))
1395                 return rte_flow_error_set(error, ENOTSUP,
1396                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1397                                           "extended metadata register"
1398                                           " isn't supported");
1399         if (!nic_mask.id)
1400                 return rte_flow_error_set(error, ENOTSUP,
1401                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1402                                           "extended metadata register"
1403                                           " isn't available");
1404         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1405         if (ret < 0)
1406                 return ret;
1407         if (!spec)
1408                 return rte_flow_error_set(error, EINVAL,
1409                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1410                                           item->spec,
1411                                           "data cannot be empty");
1412         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1413                 return rte_flow_error_set(error, EINVAL,
1414                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1415                                           &spec->id,
1416                                           "mark id exceeds the limit");
1417         if (!mask)
1418                 mask = &nic_mask;
1419         if (!mask->id)
1420                 return rte_flow_error_set(error, EINVAL,
1421                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1422                                         "mask cannot be zero");
1423
1424         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1425                                         (const uint8_t *)&nic_mask,
1426                                         sizeof(struct rte_flow_item_mark),
1427                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1428         if (ret < 0)
1429                 return ret;
1430         return 0;
1431 }
1432
1433 /**
1434  * Validate META item.
1435  *
1436  * @param[in] dev
1437  *   Pointer to the rte_eth_dev structure.
1438  * @param[in] item
1439  *   Item specification.
1440  * @param[in] attr
1441  *   Attributes of flow that includes this item.
1442  * @param[out] error
1443  *   Pointer to error structure.
1444  *
1445  * @return
1446  *   0 on success, a negative errno value otherwise and rte_errno is set.
1447  */
1448 static int
1449 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1450                            const struct rte_flow_item *item,
1451                            const struct rte_flow_attr *attr,
1452                            struct rte_flow_error *error)
1453 {
1454         struct mlx5_priv *priv = dev->data->dev_private;
1455         struct mlx5_dev_config *config = &priv->config;
1456         const struct rte_flow_item_meta *spec = item->spec;
1457         const struct rte_flow_item_meta *mask = item->mask;
1458         struct rte_flow_item_meta nic_mask = {
1459                 .data = UINT32_MAX
1460         };
1461         int reg;
1462         int ret;
1463
1464         if (!spec)
1465                 return rte_flow_error_set(error, EINVAL,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1467                                           item->spec,
1468                                           "data cannot be empty");
1469         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1470                 if (!mlx5_flow_ext_mreg_supported(dev))
1471                         return rte_flow_error_set(error, ENOTSUP,
1472                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1473                                           "extended metadata register"
1474                                           " isn't supported");
1475                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1476                 if (reg < 0)
1477                         return reg;
1478                 if (reg == REG_B)
1479                         return rte_flow_error_set(error, ENOTSUP,
1480                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1481                                           "match on reg_b "
1482                                           "isn't supported");
1483                 if (reg != REG_A)
1484                         nic_mask.data = priv->sh->dv_meta_mask;
1485         } else if (attr->transfer) {
1486                 return rte_flow_error_set(error, ENOTSUP,
1487                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1488                                         "extended metadata feature "
1489                                         "should be enabled when "
1490                                         "meta item is requested "
1491                                         "with e-switch mode ");
1492         }
1493         if (!mask)
1494                 mask = &rte_flow_item_meta_mask;
1495         if (!mask->data)
1496                 return rte_flow_error_set(error, EINVAL,
1497                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1498                                         "mask cannot be zero");
1499
1500         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1501                                         (const uint8_t *)&nic_mask,
1502                                         sizeof(struct rte_flow_item_meta),
1503                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1504         return ret;
1505 }
1506
1507 /**
1508  * Validate TAG item.
1509  *
1510  * @param[in] dev
1511  *   Pointer to the rte_eth_dev structure.
1512  * @param[in] item
1513  *   Item specification.
1514  * @param[in] attr
1515  *   Attributes of flow that includes this item.
1516  * @param[out] error
1517  *   Pointer to error structure.
1518  *
1519  * @return
1520  *   0 on success, a negative errno value otherwise and rte_errno is set.
1521  */
1522 static int
1523 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1524                           const struct rte_flow_item *item,
1525                           const struct rte_flow_attr *attr __rte_unused,
1526                           struct rte_flow_error *error)
1527 {
1528         const struct rte_flow_item_tag *spec = item->spec;
1529         const struct rte_flow_item_tag *mask = item->mask;
1530         const struct rte_flow_item_tag nic_mask = {
1531                 .data = RTE_BE32(UINT32_MAX),
1532                 .index = 0xff,
1533         };
1534         int ret;
1535
1536         if (!mlx5_flow_ext_mreg_supported(dev))
1537                 return rte_flow_error_set(error, ENOTSUP,
1538                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1539                                           "extensive metadata register"
1540                                           " isn't supported");
1541         if (!spec)
1542                 return rte_flow_error_set(error, EINVAL,
1543                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1544                                           item->spec,
1545                                           "data cannot be empty");
1546         if (!mask)
1547                 mask = &rte_flow_item_tag_mask;
1548         if (!mask->data)
1549                 return rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551                                         "mask cannot be zero");
1552
1553         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1554                                         (const uint8_t *)&nic_mask,
1555                                         sizeof(struct rte_flow_item_tag),
1556                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1557         if (ret < 0)
1558                 return ret;
1559         if (mask->index != 0xff)
1560                 return rte_flow_error_set(error, EINVAL,
1561                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1562                                           "partial mask for tag index"
1563                                           " is not supported");
1564         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1565         if (ret < 0)
1566                 return ret;
1567         MLX5_ASSERT(ret != REG_NON);
1568         return 0;
1569 }
1570
1571 /**
1572  * Validate vport item.
1573  *
1574  * @param[in] dev
1575  *   Pointer to the rte_eth_dev structure.
1576  * @param[in] item
1577  *   Item specification.
1578  * @param[in] attr
1579  *   Attributes of flow that includes this item.
1580  * @param[in] item_flags
1581  *   Bit-fields that holds the items detected until now.
1582  * @param[out] error
1583  *   Pointer to error structure.
1584  *
1585  * @return
1586  *   0 on success, a negative errno value otherwise and rte_errno is set.
1587  */
1588 static int
1589 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1590                               const struct rte_flow_item *item,
1591                               const struct rte_flow_attr *attr,
1592                               uint64_t item_flags,
1593                               struct rte_flow_error *error)
1594 {
1595         const struct rte_flow_item_port_id *spec = item->spec;
1596         const struct rte_flow_item_port_id *mask = item->mask;
1597         const struct rte_flow_item_port_id switch_mask = {
1598                         .id = 0xffffffff,
1599         };
1600         struct mlx5_priv *esw_priv;
1601         struct mlx5_priv *dev_priv;
1602         int ret;
1603
1604         if (!attr->transfer)
1605                 return rte_flow_error_set(error, EINVAL,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM,
1607                                           NULL,
1608                                           "match on port id is valid only"
1609                                           " when transfer flag is enabled");
1610         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1611                 return rte_flow_error_set(error, ENOTSUP,
1612                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1613                                           "multiple source ports are not"
1614                                           " supported");
1615         if (!mask)
1616                 mask = &switch_mask;
1617         if (mask->id != 0xffffffff)
1618                 return rte_flow_error_set(error, ENOTSUP,
1619                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1620                                            mask,
1621                                            "no support for partial mask on"
1622                                            " \"id\" field");
1623         ret = mlx5_flow_item_acceptable
1624                                 (item, (const uint8_t *)mask,
1625                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1626                                  sizeof(struct rte_flow_item_port_id),
1627                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1628         if (ret)
1629                 return ret;
1630         if (!spec)
1631                 return 0;
1632         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1633         if (!esw_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636                                           "failed to obtain E-Switch info for"
1637                                           " port");
1638         dev_priv = mlx5_dev_to_eswitch_info(dev);
1639         if (!dev_priv)
1640                 return rte_flow_error_set(error, rte_errno,
1641                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1642                                           NULL,
1643                                           "failed to obtain E-Switch info");
1644         if (esw_priv->domain_id != dev_priv->domain_id)
1645                 return rte_flow_error_set(error, EINVAL,
1646                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1647                                           "cannot match on a port from a"
1648                                           " different E-Switch");
1649         return 0;
1650 }
1651
1652 /**
1653  * Validate VLAN item.
1654  *
1655  * @param[in] item
1656  *   Item specification.
1657  * @param[in] item_flags
1658  *   Bit-fields that holds the items detected until now.
1659  * @param[in] dev
1660  *   Ethernet device flow is being created on.
1661  * @param[out] error
1662  *   Pointer to error structure.
1663  *
1664  * @return
1665  *   0 on success, a negative errno value otherwise and rte_errno is set.
1666  */
1667 static int
1668 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1669                            uint64_t item_flags,
1670                            struct rte_eth_dev *dev,
1671                            struct rte_flow_error *error)
1672 {
1673         const struct rte_flow_item_vlan *mask = item->mask;
1674         const struct rte_flow_item_vlan nic_mask = {
1675                 .tci = RTE_BE16(UINT16_MAX),
1676                 .inner_type = RTE_BE16(UINT16_MAX),
1677                 .has_more_vlan = 1,
1678         };
1679         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1680         int ret;
1681         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682                                         MLX5_FLOW_LAYER_INNER_L4) :
1683                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1684                                         MLX5_FLOW_LAYER_OUTER_L4);
1685         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1687
1688         if (item_flags & vlanm)
1689                 return rte_flow_error_set(error, EINVAL,
1690                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1691                                           "multiple VLAN layers not supported");
1692         else if ((item_flags & l34m) != 0)
1693                 return rte_flow_error_set(error, EINVAL,
1694                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1695                                           "VLAN cannot follow L3/L4 layer");
1696         if (!mask)
1697                 mask = &rte_flow_item_vlan_mask;
1698         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699                                         (const uint8_t *)&nic_mask,
1700                                         sizeof(struct rte_flow_item_vlan),
1701                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1702         if (ret)
1703                 return ret;
1704         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705                 struct mlx5_priv *priv = dev->data->dev_private;
1706
1707                 if (priv->vmwa_context) {
1708                         /*
1709                          * Non-NULL context means we have a virtual machine
1710                          * and SR-IOV enabled, we have to create VLAN interface
1711                          * to make hypervisor to setup E-Switch vport
1712                          * context correctly. We avoid creating the multiple
1713                          * VLAN interfaces, so we cannot support VLAN tag mask.
1714                          */
1715                         return rte_flow_error_set(error, EINVAL,
1716                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1717                                                   item,
1718                                                   "VLAN tag mask is not"
1719                                                   " supported in virtual"
1720                                                   " environment");
1721                 }
1722         }
1723         return 0;
1724 }
1725
1726 /*
1727  * GTP flags are contained in 1 byte of the format:
1728  * -------------------------------------------
1729  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1730  * |-----------------------------------------|
1731  * | value | Version | PT | Res | E | S | PN |
1732  * -------------------------------------------
1733  *
1734  * Matching is supported only for GTP flags E, S, PN.
1735  */
1736 #define MLX5_GTP_FLAGS_MASK     0x07
1737
1738 /**
1739  * Validate GTP item.
1740  *
1741  * @param[in] dev
1742  *   Pointer to the rte_eth_dev structure.
1743  * @param[in] item
1744  *   Item specification.
1745  * @param[in] item_flags
1746  *   Bit-fields that holds the items detected until now.
1747  * @param[out] error
1748  *   Pointer to error structure.
1749  *
1750  * @return
1751  *   0 on success, a negative errno value otherwise and rte_errno is set.
1752  */
1753 static int
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755                           const struct rte_flow_item *item,
1756                           uint64_t item_flags,
1757                           struct rte_flow_error *error)
1758 {
1759         struct mlx5_priv *priv = dev->data->dev_private;
1760         const struct rte_flow_item_gtp *spec = item->spec;
1761         const struct rte_flow_item_gtp *mask = item->mask;
1762         const struct rte_flow_item_gtp nic_mask = {
1763                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1764                 .msg_type = 0xff,
1765                 .teid = RTE_BE32(0xffffffff),
1766         };
1767
1768         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769                 return rte_flow_error_set(error, ENOTSUP,
1770                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1771                                           "GTP support is not enabled");
1772         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773                 return rte_flow_error_set(error, ENOTSUP,
1774                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1775                                           "multiple tunnel layers not"
1776                                           " supported");
1777         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778                 return rte_flow_error_set(error, EINVAL,
1779                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1780                                           "no outer UDP layer found");
1781         if (!mask)
1782                 mask = &rte_flow_item_gtp_mask;
1783         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784                 return rte_flow_error_set(error, ENOTSUP,
1785                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1786                                           "Match is supported for GTP"
1787                                           " flags only");
1788         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1789                                          (const uint8_t *)&nic_mask,
1790                                          sizeof(struct rte_flow_item_gtp),
1791                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1792 }
1793
1794 /**
1795  * Validate IPV4 item.
1796  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1797  * add specific validation of fragment_offset field,
1798  *
1799  * @param[in] item
1800  *   Item specification.
1801  * @param[in] item_flags
1802  *   Bit-fields that holds the items detected until now.
1803  * @param[out] error
1804  *   Pointer to error structure.
1805  *
1806  * @return
1807  *   0 on success, a negative errno value otherwise and rte_errno is set.
1808  */
1809 static int
1810 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1811                            uint64_t item_flags,
1812                            uint64_t last_item,
1813                            uint16_t ether_type,
1814                            struct rte_flow_error *error)
1815 {
1816         int ret;
1817         const struct rte_flow_item_ipv4 *spec = item->spec;
1818         const struct rte_flow_item_ipv4 *last = item->last;
1819         const struct rte_flow_item_ipv4 *mask = item->mask;
1820         rte_be16_t fragment_offset_spec = 0;
1821         rte_be16_t fragment_offset_last = 0;
1822         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1823                 .hdr = {
1824                         .src_addr = RTE_BE32(0xffffffff),
1825                         .dst_addr = RTE_BE32(0xffffffff),
1826                         .type_of_service = 0xff,
1827                         .fragment_offset = RTE_BE16(0xffff),
1828                         .next_proto_id = 0xff,
1829                         .time_to_live = 0xff,
1830                 },
1831         };
1832
1833         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1834                                            ether_type, &nic_ipv4_mask,
1835                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1836         if (ret < 0)
1837                 return ret;
1838         if (spec && mask)
1839                 fragment_offset_spec = spec->hdr.fragment_offset &
1840                                        mask->hdr.fragment_offset;
1841         if (!fragment_offset_spec)
1842                 return 0;
1843         /*
1844          * spec and mask are valid, enforce using full mask to make sure the
1845          * complete value is used correctly.
1846          */
1847         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1848                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1849                 return rte_flow_error_set(error, EINVAL,
1850                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1851                                           item, "must use full mask for"
1852                                           " fragment_offset");
1853         /*
1854          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1855          * indicating this is 1st fragment of fragmented packet.
1856          * This is not yet supported in MLX5, return appropriate error message.
1857          */
1858         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1859                 return rte_flow_error_set(error, ENOTSUP,
1860                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1861                                           "match on first fragment not "
1862                                           "supported");
1863         if (fragment_offset_spec && !last)
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1866                                           "specified value not supported");
1867         /* spec and last are valid, validate the specified range. */
1868         fragment_offset_last = last->hdr.fragment_offset &
1869                                mask->hdr.fragment_offset;
1870         /*
1871          * Match on fragment_offset spec 0x2001 and last 0x3fff
1872          * means MF is 1 and frag-offset is > 0.
1873          * This packet is fragment 2nd and onward, excluding last.
1874          * This is not yet supported in MLX5, return appropriate
1875          * error message.
1876          */
1877         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1878             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1879                 return rte_flow_error_set(error, ENOTSUP,
1880                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1881                                           last, "match on following "
1882                                           "fragments not supported");
1883         /*
1884          * Match on fragment_offset spec 0x0001 and last 0x1fff
1885          * means MF is 0 and frag-offset is > 0.
1886          * This packet is last fragment of fragmented packet.
1887          * This is not yet supported in MLX5, return appropriate
1888          * error message.
1889          */
1890         if (fragment_offset_spec == RTE_BE16(1) &&
1891             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1892                 return rte_flow_error_set(error, ENOTSUP,
1893                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1894                                           last, "match on last "
1895                                           "fragment not supported");
1896         /*
1897          * Match on fragment_offset spec 0x0001 and last 0x3fff
1898          * means MF and/or frag-offset is not 0.
1899          * This is a fragmented packet.
1900          * Other range values are invalid and rejected.
1901          */
1902         if (!(fragment_offset_spec == RTE_BE16(1) &&
1903               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1904                 return rte_flow_error_set(error, ENOTSUP,
1905                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1906                                           "specified range not supported");
1907         return 0;
1908 }
1909
1910 /**
1911  * Validate IPV6 fragment extension item.
1912  *
1913  * @param[in] item
1914  *   Item specification.
1915  * @param[in] item_flags
1916  *   Bit-fields that holds the items detected until now.
1917  * @param[out] error
1918  *   Pointer to error structure.
1919  *
1920  * @return
1921  *   0 on success, a negative errno value otherwise and rte_errno is set.
1922  */
1923 static int
1924 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1925                                     uint64_t item_flags,
1926                                     struct rte_flow_error *error)
1927 {
1928         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1929         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1930         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1931         rte_be16_t frag_data_spec = 0;
1932         rte_be16_t frag_data_last = 0;
1933         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1934         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1935                                       MLX5_FLOW_LAYER_OUTER_L4;
1936         int ret = 0;
1937         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1938                 .hdr = {
1939                         .next_header = 0xff,
1940                         .frag_data = RTE_BE16(0xffff),
1941                 },
1942         };
1943
1944         if (item_flags & l4m)
1945                 return rte_flow_error_set(error, EINVAL,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "ipv6 fragment extension item cannot "
1948                                           "follow L4 item.");
1949         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1950             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1953                                           "ipv6 fragment extension item must "
1954                                           "follow ipv6 item");
1955         if (spec && mask)
1956                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1957         if (!frag_data_spec)
1958                 return 0;
1959         /*
1960          * spec and mask are valid, enforce using full mask to make sure the
1961          * complete value is used correctly.
1962          */
1963         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1964                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1967                                           item, "must use full mask for"
1968                                           " frag_data");
1969         /*
1970          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1971          * This is 1st fragment of fragmented packet.
1972          */
1973         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1974                 return rte_flow_error_set(error, ENOTSUP,
1975                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1976                                           "match on first fragment not "
1977                                           "supported");
1978         if (frag_data_spec && !last)
1979                 return rte_flow_error_set(error, EINVAL,
1980                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                           "specified value not supported");
1982         ret = mlx5_flow_item_acceptable
1983                                 (item, (const uint8_t *)mask,
1984                                  (const uint8_t *)&nic_mask,
1985                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1986                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1987         if (ret)
1988                 return ret;
1989         /* spec and last are valid, validate the specified range. */
1990         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1991         /*
1992          * Match on frag_data spec 0x0009 and last 0xfff9
1993          * means M is 1 and frag-offset is > 0.
1994          * This packet is fragment 2nd and onward, excluding last.
1995          * This is not yet supported in MLX5, return appropriate
1996          * error message.
1997          */
1998         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1999                                        RTE_IPV6_EHDR_MF_MASK) &&
2000             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2001                 return rte_flow_error_set(error, ENOTSUP,
2002                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2003                                           last, "match on following "
2004                                           "fragments not supported");
2005         /*
2006          * Match on frag_data spec 0x0008 and last 0xfff8
2007          * means M is 0 and frag-offset is > 0.
2008          * This packet is last fragment of fragmented packet.
2009          * This is not yet supported in MLX5, return appropriate
2010          * error message.
2011          */
2012         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2013             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2014                 return rte_flow_error_set(error, ENOTSUP,
2015                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2016                                           last, "match on last "
2017                                           "fragment not supported");
2018         /* Other range values are invalid and rejected. */
2019         return rte_flow_error_set(error, EINVAL,
2020                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2021                                   "specified range not supported");
2022 }
2023
2024 /**
2025  * Validate the pop VLAN action.
2026  *
2027  * @param[in] dev
2028  *   Pointer to the rte_eth_dev structure.
2029  * @param[in] action_flags
2030  *   Holds the actions detected until now.
2031  * @param[in] action
2032  *   Pointer to the pop vlan action.
2033  * @param[in] item_flags
2034  *   The items found in this flow rule.
2035  * @param[in] attr
2036  *   Pointer to flow attributes.
2037  * @param[out] error
2038  *   Pointer to error structure.
2039  *
2040  * @return
2041  *   0 on success, a negative errno value otherwise and rte_errno is set.
2042  */
2043 static int
2044 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2045                                  uint64_t action_flags,
2046                                  const struct rte_flow_action *action,
2047                                  uint64_t item_flags,
2048                                  const struct rte_flow_attr *attr,
2049                                  struct rte_flow_error *error)
2050 {
2051         const struct mlx5_priv *priv = dev->data->dev_private;
2052
2053         (void)action;
2054         (void)attr;
2055         if (!priv->sh->pop_vlan_action)
2056                 return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058                                           NULL,
2059                                           "pop vlan action is not supported");
2060         if (attr->egress)
2061                 return rte_flow_error_set(error, ENOTSUP,
2062                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2063                                           NULL,
2064                                           "pop vlan action not supported for "
2065                                           "egress");
2066         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2067                 return rte_flow_error_set(error, ENOTSUP,
2068                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2069                                           "no support for multiple VLAN "
2070                                           "actions");
2071         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2072         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2073             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2074                 return rte_flow_error_set(error, ENOTSUP,
2075                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2076                                           NULL,
2077                                           "cannot pop vlan after decap without "
2078                                           "match on inner vlan in the flow");
2079         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2080         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2081             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2082                 return rte_flow_error_set(error, ENOTSUP,
2083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2084                                           NULL,
2085                                           "cannot pop vlan without a "
2086                                           "match on (outer) vlan in the flow");
2087         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2088                 return rte_flow_error_set(error, EINVAL,
2089                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2090                                           "wrong action order, port_id should "
2091                                           "be after pop VLAN action");
2092         if (!attr->transfer && priv->representor)
2093                 return rte_flow_error_set(error, ENOTSUP,
2094                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2095                                           "pop vlan action for VF representor "
2096                                           "not supported on NIC table");
2097         return 0;
2098 }
2099
2100 /**
2101  * Get VLAN default info from vlan match info.
2102  *
2103  * @param[in] items
2104  *   the list of item specifications.
2105  * @param[out] vlan
2106  *   pointer VLAN info to fill to.
2107  *
2108  * @return
2109  *   0 on success, a negative errno value otherwise and rte_errno is set.
2110  */
2111 static void
2112 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2113                                   struct rte_vlan_hdr *vlan)
2114 {
2115         const struct rte_flow_item_vlan nic_mask = {
2116                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2117                                 MLX5DV_FLOW_VLAN_VID_MASK),
2118                 .inner_type = RTE_BE16(0xffff),
2119         };
2120
2121         if (items == NULL)
2122                 return;
2123         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2124                 int type = items->type;
2125
2126                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2127                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2128                         break;
2129         }
2130         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2131                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2132                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2133
2134                 /* If VLAN item in pattern doesn't contain data, return here. */
2135                 if (!vlan_v)
2136                         return;
2137                 if (!vlan_m)
2138                         vlan_m = &nic_mask;
2139                 /* Only full match values are accepted */
2140                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2141                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2142                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2143                         vlan->vlan_tci |=
2144                                 rte_be_to_cpu_16(vlan_v->tci &
2145                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2146                 }
2147                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2148                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2149                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2150                         vlan->vlan_tci |=
2151                                 rte_be_to_cpu_16(vlan_v->tci &
2152                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2153                 }
2154                 if (vlan_m->inner_type == nic_mask.inner_type)
2155                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2156                                                            vlan_m->inner_type);
2157         }
2158 }
2159
2160 /**
2161  * Validate the push VLAN action.
2162  *
2163  * @param[in] dev
2164  *   Pointer to the rte_eth_dev structure.
2165  * @param[in] action_flags
2166  *   Holds the actions detected until now.
2167  * @param[in] item_flags
2168  *   The items found in this flow rule.
2169  * @param[in] action
2170  *   Pointer to the action structure.
2171  * @param[in] attr
2172  *   Pointer to flow attributes
2173  * @param[out] error
2174  *   Pointer to error structure.
2175  *
2176  * @return
2177  *   0 on success, a negative errno value otherwise and rte_errno is set.
2178  */
2179 static int
2180 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2181                                   uint64_t action_flags,
2182                                   const struct rte_flow_item_vlan *vlan_m,
2183                                   const struct rte_flow_action *action,
2184                                   const struct rte_flow_attr *attr,
2185                                   struct rte_flow_error *error)
2186 {
2187         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2188         const struct mlx5_priv *priv = dev->data->dev_private;
2189
2190         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2191             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2192                 return rte_flow_error_set(error, EINVAL,
2193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2194                                           "invalid vlan ethertype");
2195         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2196                 return rte_flow_error_set(error, EINVAL,
2197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2198                                           "wrong action order, port_id should "
2199                                           "be after push VLAN");
2200         if (!attr->transfer && priv->representor)
2201                 return rte_flow_error_set(error, ENOTSUP,
2202                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2203                                           "push vlan action for VF representor "
2204                                           "not supported on NIC table");
2205         if (vlan_m &&
2206             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2207             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2208                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2209             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2210             !(mlx5_flow_find_action
2211                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2212                 return rte_flow_error_set(error, EINVAL,
2213                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2214                                           "not full match mask on VLAN PCP and "
2215                                           "there is no of_set_vlan_pcp action, "
2216                                           "push VLAN action cannot figure out "
2217                                           "PCP value");
2218         if (vlan_m &&
2219             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2220             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2221                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2222             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2223             !(mlx5_flow_find_action
2224                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2225                 return rte_flow_error_set(error, EINVAL,
2226                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2227                                           "not full match mask on VLAN VID and "
2228                                           "there is no of_set_vlan_vid action, "
2229                                           "push VLAN action cannot figure out "
2230                                           "VID value");
2231         (void)attr;
2232         return 0;
2233 }
2234
2235 /**
2236  * Validate the set VLAN PCP.
2237  *
2238  * @param[in] action_flags
2239  *   Holds the actions detected until now.
2240  * @param[in] actions
2241  *   Pointer to the list of actions remaining in the flow rule.
2242  * @param[out] error
2243  *   Pointer to error structure.
2244  *
2245  * @return
2246  *   0 on success, a negative errno value otherwise and rte_errno is set.
2247  */
2248 static int
2249 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2250                                      const struct rte_flow_action actions[],
2251                                      struct rte_flow_error *error)
2252 {
2253         const struct rte_flow_action *action = actions;
2254         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2255
2256         if (conf->vlan_pcp > 7)
2257                 return rte_flow_error_set(error, EINVAL,
2258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2259                                           "VLAN PCP value is too big");
2260         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2261                 return rte_flow_error_set(error, ENOTSUP,
2262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2263                                           "set VLAN PCP action must follow "
2264                                           "the push VLAN action");
2265         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2266                 return rte_flow_error_set(error, ENOTSUP,
2267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2268                                           "Multiple VLAN PCP modification are "
2269                                           "not supported");
2270         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2273                                           "wrong action order, port_id should "
2274                                           "be after set VLAN PCP");
2275         return 0;
2276 }
2277
2278 /**
2279  * Validate the set VLAN VID.
2280  *
2281  * @param[in] item_flags
2282  *   Holds the items detected in this rule.
2283  * @param[in] action_flags
2284  *   Holds the actions detected until now.
2285  * @param[in] actions
2286  *   Pointer to the list of actions remaining in the flow rule.
2287  * @param[out] error
2288  *   Pointer to error structure.
2289  *
2290  * @return
2291  *   0 on success, a negative errno value otherwise and rte_errno is set.
2292  */
2293 static int
2294 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2295                                      uint64_t action_flags,
2296                                      const struct rte_flow_action actions[],
2297                                      struct rte_flow_error *error)
2298 {
2299         const struct rte_flow_action *action = actions;
2300         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2301
2302         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2303                 return rte_flow_error_set(error, EINVAL,
2304                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2305                                           "VLAN VID value is too big");
2306         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2307             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2308                 return rte_flow_error_set(error, ENOTSUP,
2309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2310                                           "set VLAN VID action must follow push"
2311                                           " VLAN action or match on VLAN item");
2312         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2313                 return rte_flow_error_set(error, ENOTSUP,
2314                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2315                                           "Multiple VLAN VID modifications are "
2316                                           "not supported");
2317         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2318                 return rte_flow_error_set(error, EINVAL,
2319                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2320                                           "wrong action order, port_id should "
2321                                           "be after set VLAN VID");
2322         return 0;
2323 }
2324
2325 /*
2326  * Validate the FLAG action.
2327  *
2328  * @param[in] dev
2329  *   Pointer to the rte_eth_dev structure.
2330  * @param[in] action_flags
2331  *   Holds the actions detected until now.
2332  * @param[in] attr
2333  *   Pointer to flow attributes
2334  * @param[out] error
2335  *   Pointer to error structure.
2336  *
2337  * @return
2338  *   0 on success, a negative errno value otherwise and rte_errno is set.
2339  */
2340 static int
2341 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2342                              uint64_t action_flags,
2343                              const struct rte_flow_attr *attr,
2344                              struct rte_flow_error *error)
2345 {
2346         struct mlx5_priv *priv = dev->data->dev_private;
2347         struct mlx5_dev_config *config = &priv->config;
2348         int ret;
2349
2350         /* Fall back if no extended metadata register support. */
2351         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2352                 return mlx5_flow_validate_action_flag(action_flags, attr,
2353                                                       error);
2354         /* Extensive metadata mode requires registers. */
2355         if (!mlx5_flow_ext_mreg_supported(dev))
2356                 return rte_flow_error_set(error, ENOTSUP,
2357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2358                                           "no metadata registers "
2359                                           "to support flag action");
2360         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2361                 return rte_flow_error_set(error, ENOTSUP,
2362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2363                                           "extended metadata register"
2364                                           " isn't available");
2365         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2366         if (ret < 0)
2367                 return ret;
2368         MLX5_ASSERT(ret > 0);
2369         if (action_flags & MLX5_FLOW_ACTION_MARK)
2370                 return rte_flow_error_set(error, EINVAL,
2371                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2372                                           "can't mark and flag in same flow");
2373         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2374                 return rte_flow_error_set(error, EINVAL,
2375                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2376                                           "can't have 2 flag"
2377                                           " actions in same flow");
2378         return 0;
2379 }
2380
2381 /**
2382  * Validate MARK action.
2383  *
2384  * @param[in] dev
2385  *   Pointer to the rte_eth_dev structure.
2386  * @param[in] action
2387  *   Pointer to action.
2388  * @param[in] action_flags
2389  *   Holds the actions detected until now.
2390  * @param[in] attr
2391  *   Pointer to flow attributes
2392  * @param[out] error
2393  *   Pointer to error structure.
2394  *
2395  * @return
2396  *   0 on success, a negative errno value otherwise and rte_errno is set.
2397  */
2398 static int
2399 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2400                              const struct rte_flow_action *action,
2401                              uint64_t action_flags,
2402                              const struct rte_flow_attr *attr,
2403                              struct rte_flow_error *error)
2404 {
2405         struct mlx5_priv *priv = dev->data->dev_private;
2406         struct mlx5_dev_config *config = &priv->config;
2407         const struct rte_flow_action_mark *mark = action->conf;
2408         int ret;
2409
2410         /* Fall back if no extended metadata register support. */
2411         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2412                 return mlx5_flow_validate_action_mark(action, action_flags,
2413                                                       attr, error);
2414         /* Extensive metadata mode requires registers. */
2415         if (!mlx5_flow_ext_mreg_supported(dev))
2416                 return rte_flow_error_set(error, ENOTSUP,
2417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418                                           "no metadata registers "
2419                                           "to support mark action");
2420         if (!priv->sh->dv_mark_mask)
2421                 return rte_flow_error_set(error, ENOTSUP,
2422                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423                                           "extended metadata register"
2424                                           " isn't available");
2425         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2426         if (ret < 0)
2427                 return ret;
2428         MLX5_ASSERT(ret > 0);
2429         if (!mark)
2430                 return rte_flow_error_set(error, EINVAL,
2431                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2432                                           "configuration cannot be null");
2433         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2434                 return rte_flow_error_set(error, EINVAL,
2435                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2436                                           &mark->id,
2437                                           "mark id exceeds the limit");
2438         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2439                 return rte_flow_error_set(error, EINVAL,
2440                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441                                           "can't flag and mark in same flow");
2442         if (action_flags & MLX5_FLOW_ACTION_MARK)
2443                 return rte_flow_error_set(error, EINVAL,
2444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2445                                           "can't have 2 mark actions in same"
2446                                           " flow");
2447         return 0;
2448 }
2449
2450 /**
2451  * Validate SET_META action.
2452  *
2453  * @param[in] dev
2454  *   Pointer to the rte_eth_dev structure.
2455  * @param[in] action
2456  *   Pointer to the action structure.
2457  * @param[in] action_flags
2458  *   Holds the actions detected until now.
2459  * @param[in] attr
2460  *   Pointer to flow attributes
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno value otherwise and rte_errno is set.
2466  */
2467 static int
2468 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2469                                  const struct rte_flow_action *action,
2470                                  uint64_t action_flags __rte_unused,
2471                                  const struct rte_flow_attr *attr,
2472                                  struct rte_flow_error *error)
2473 {
2474         const struct rte_flow_action_set_meta *conf;
2475         uint32_t nic_mask = UINT32_MAX;
2476         int reg;
2477
2478         if (!mlx5_flow_ext_mreg_supported(dev))
2479                 return rte_flow_error_set(error, ENOTSUP,
2480                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2481                                           "extended metadata register"
2482                                           " isn't supported");
2483         reg = flow_dv_get_metadata_reg(dev, attr, error);
2484         if (reg < 0)
2485                 return reg;
2486         if (reg != REG_A && reg != REG_B) {
2487                 struct mlx5_priv *priv = dev->data->dev_private;
2488
2489                 nic_mask = priv->sh->dv_meta_mask;
2490         }
2491         if (!(action->conf))
2492                 return rte_flow_error_set(error, EINVAL,
2493                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2494                                           "configuration cannot be null");
2495         conf = (const struct rte_flow_action_set_meta *)action->conf;
2496         if (!conf->mask)
2497                 return rte_flow_error_set(error, EINVAL,
2498                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2499                                           "zero mask doesn't have any effect");
2500         if (conf->mask & ~nic_mask)
2501                 return rte_flow_error_set(error, EINVAL,
2502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2503                                           "meta data must be within reg C0");
2504         return 0;
2505 }
2506
2507 /**
2508  * Validate SET_TAG action.
2509  *
2510  * @param[in] dev
2511  *   Pointer to the rte_eth_dev structure.
2512  * @param[in] action
2513  *   Pointer to the action structure.
2514  * @param[in] action_flags
2515  *   Holds the actions detected until now.
2516  * @param[in] attr
2517  *   Pointer to flow attributes
2518  * @param[out] error
2519  *   Pointer to error structure.
2520  *
2521  * @return
2522  *   0 on success, a negative errno value otherwise and rte_errno is set.
2523  */
2524 static int
2525 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2526                                 const struct rte_flow_action *action,
2527                                 uint64_t action_flags,
2528                                 const struct rte_flow_attr *attr,
2529                                 struct rte_flow_error *error)
2530 {
2531         const struct rte_flow_action_set_tag *conf;
2532         const uint64_t terminal_action_flags =
2533                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2534                 MLX5_FLOW_ACTION_RSS;
2535         int ret;
2536
2537         if (!mlx5_flow_ext_mreg_supported(dev))
2538                 return rte_flow_error_set(error, ENOTSUP,
2539                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2540                                           "extensive metadata register"
2541                                           " isn't supported");
2542         if (!(action->conf))
2543                 return rte_flow_error_set(error, EINVAL,
2544                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2545                                           "configuration cannot be null");
2546         conf = (const struct rte_flow_action_set_tag *)action->conf;
2547         if (!conf->mask)
2548                 return rte_flow_error_set(error, EINVAL,
2549                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2550                                           "zero mask doesn't have any effect");
2551         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2552         if (ret < 0)
2553                 return ret;
2554         if (!attr->transfer && attr->ingress &&
2555             (action_flags & terminal_action_flags))
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2558                                           "set_tag has no effect"
2559                                           " with terminal actions");
2560         return 0;
2561 }
2562
2563 /**
2564  * Validate count action.
2565  *
2566  * @param[in] dev
2567  *   Pointer to rte_eth_dev structure.
2568  * @param[out] error
2569  *   Pointer to error structure.
2570  *
2571  * @return
2572  *   0 on success, a negative errno value otherwise and rte_errno is set.
2573  */
2574 static int
2575 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2576                               struct rte_flow_error *error)
2577 {
2578         struct mlx5_priv *priv = dev->data->dev_private;
2579
2580         if (!priv->config.devx)
2581                 goto notsup_err;
2582 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2583         return 0;
2584 #endif
2585 notsup_err:
2586         return rte_flow_error_set
2587                       (error, ENOTSUP,
2588                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2589                        NULL,
2590                        "count action not supported");
2591 }
2592
2593 /**
2594  * Validate the L2 encap action.
2595  *
2596  * @param[in] dev
2597  *   Pointer to the rte_eth_dev structure.
2598  * @param[in] action_flags
2599  *   Holds the actions detected until now.
2600  * @param[in] action
2601  *   Pointer to the action structure.
2602  * @param[in] attr
2603  *   Pointer to flow attributes.
2604  * @param[out] error
2605  *   Pointer to error structure.
2606  *
2607  * @return
2608  *   0 on success, a negative errno value otherwise and rte_errno is set.
2609  */
2610 static int
2611 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2612                                  uint64_t action_flags,
2613                                  const struct rte_flow_action *action,
2614                                  const struct rte_flow_attr *attr,
2615                                  struct rte_flow_error *error)
2616 {
2617         const struct mlx5_priv *priv = dev->data->dev_private;
2618
2619         if (!(action->conf))
2620                 return rte_flow_error_set(error, EINVAL,
2621                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2622                                           "configuration cannot be null");
2623         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2624                 return rte_flow_error_set(error, EINVAL,
2625                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626                                           "can only have a single encap action "
2627                                           "in a flow");
2628         if (!attr->transfer && priv->representor)
2629                 return rte_flow_error_set(error, ENOTSUP,
2630                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2631                                           "encap action for VF representor "
2632                                           "not supported on NIC table");
2633         return 0;
2634 }
2635
2636 /**
2637  * Validate a decap action.
2638  *
2639  * @param[in] dev
2640  *   Pointer to the rte_eth_dev structure.
2641  * @param[in] action_flags
2642  *   Holds the actions detected until now.
2643  * @param[in] attr
2644  *   Pointer to flow attributes
2645  * @param[out] error
2646  *   Pointer to error structure.
2647  *
2648  * @return
2649  *   0 on success, a negative errno value otherwise and rte_errno is set.
2650  */
2651 static int
2652 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2653                               uint64_t action_flags,
2654                               const struct rte_flow_attr *attr,
2655                               struct rte_flow_error *error)
2656 {
2657         const struct mlx5_priv *priv = dev->data->dev_private;
2658
2659         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2660             !priv->config.decap_en)
2661                 return rte_flow_error_set(error, ENOTSUP,
2662                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663                                           "decap is not enabled");
2664         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2665                 return rte_flow_error_set(error, ENOTSUP,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2667                                           action_flags &
2668                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2669                                           "have a single decap action" : "decap "
2670                                           "after encap is not supported");
2671         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2672                 return rte_flow_error_set(error, EINVAL,
2673                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2674                                           "can't have decap action after"
2675                                           " modify action");
2676         if (attr->egress)
2677                 return rte_flow_error_set(error, ENOTSUP,
2678                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2679                                           NULL,
2680                                           "decap action not supported for "
2681                                           "egress");
2682         if (!attr->transfer && priv->representor)
2683                 return rte_flow_error_set(error, ENOTSUP,
2684                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685                                           "decap action for VF representor "
2686                                           "not supported on NIC table");
2687         return 0;
2688 }
2689
2690 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2691
2692 /**
2693  * Validate the raw encap and decap actions.
2694  *
2695  * @param[in] dev
2696  *   Pointer to the rte_eth_dev structure.
2697  * @param[in] decap
2698  *   Pointer to the decap action.
2699  * @param[in] encap
2700  *   Pointer to the encap action.
2701  * @param[in] attr
2702  *   Pointer to flow attributes
2703  * @param[in/out] action_flags
2704  *   Holds the actions detected until now.
2705  * @param[out] actions_n
2706  *   pointer to the number of actions counter.
2707  * @param[out] error
2708  *   Pointer to error structure.
2709  *
2710  * @return
2711  *   0 on success, a negative errno value otherwise and rte_errno is set.
2712  */
2713 static int
2714 flow_dv_validate_action_raw_encap_decap
2715         (struct rte_eth_dev *dev,
2716          const struct rte_flow_action_raw_decap *decap,
2717          const struct rte_flow_action_raw_encap *encap,
2718          const struct rte_flow_attr *attr, uint64_t *action_flags,
2719          int *actions_n, struct rte_flow_error *error)
2720 {
2721         const struct mlx5_priv *priv = dev->data->dev_private;
2722         int ret;
2723
2724         if (encap && (!encap->size || !encap->data))
2725                 return rte_flow_error_set(error, EINVAL,
2726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727                                           "raw encap data cannot be empty");
2728         if (decap && encap) {
2729                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2730                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2731                         /* L3 encap. */
2732                         decap = NULL;
2733                 else if (encap->size <=
2734                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2735                            decap->size >
2736                            MLX5_ENCAPSULATION_DECISION_SIZE)
2737                         /* L3 decap. */
2738                         encap = NULL;
2739                 else if (encap->size >
2740                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2741                            decap->size >
2742                            MLX5_ENCAPSULATION_DECISION_SIZE)
2743                         /* 2 L2 actions: encap and decap. */
2744                         ;
2745                 else
2746                         return rte_flow_error_set(error,
2747                                 ENOTSUP,
2748                                 RTE_FLOW_ERROR_TYPE_ACTION,
2749                                 NULL, "unsupported too small "
2750                                 "raw decap and too small raw "
2751                                 "encap combination");
2752         }
2753         if (decap) {
2754                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2755                                                     error);
2756                 if (ret < 0)
2757                         return ret;
2758                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2759                 ++(*actions_n);
2760         }
2761         if (encap) {
2762                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2763                         return rte_flow_error_set(error, ENOTSUP,
2764                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2765                                                   NULL,
2766                                                   "small raw encap size");
2767                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2768                         return rte_flow_error_set(error, EINVAL,
2769                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2770                                                   NULL,
2771                                                   "more than one encap action");
2772                 if (!attr->transfer && priv->representor)
2773                         return rte_flow_error_set
2774                                         (error, ENOTSUP,
2775                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776                                          "encap action for VF representor "
2777                                          "not supported on NIC table");
2778                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2779                 ++(*actions_n);
2780         }
2781         return 0;
2782 }
2783
2784 /**
2785  * Match encap_decap resource.
2786  *
2787  * @param entry
2788  *   Pointer to exist resource entry object.
2789  * @param ctx
2790  *   Pointer to new encap_decap resource.
2791  *
2792  * @return
2793  *   0 on matching, -1 otherwise.
2794  */
2795 static int
2796 flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
2797 {
2798         struct mlx5_flow_dv_encap_decap_resource *resource;
2799         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2800
2801         resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx;
2802         cache_resource = container_of(entry,
2803                                       struct mlx5_flow_dv_encap_decap_resource,
2804                                       entry);
2805         if (resource->entry.key == cache_resource->entry.key &&
2806             resource->reformat_type == cache_resource->reformat_type &&
2807             resource->ft_type == cache_resource->ft_type &&
2808             resource->flags == cache_resource->flags &&
2809             resource->size == cache_resource->size &&
2810             !memcmp((const void *)resource->buf,
2811                     (const void *)cache_resource->buf,
2812                     resource->size))
2813                 return 0;
2814         return -1;
2815 }
2816
2817 /**
2818  * Find existing encap/decap resource or create and register a new one.
2819  *
2820  * @param[in, out] dev
2821  *   Pointer to rte_eth_dev structure.
2822  * @param[in, out] resource
2823  *   Pointer to encap/decap resource.
2824  * @parm[in, out] dev_flow
2825  *   Pointer to the dev_flow.
2826  * @param[out] error
2827  *   pointer to error structure.
2828  *
2829  * @return
2830  *   0 on success otherwise -errno and errno is set.
2831  */
2832 static int
2833 flow_dv_encap_decap_resource_register
2834                         (struct rte_eth_dev *dev,
2835                          struct mlx5_flow_dv_encap_decap_resource *resource,
2836                          struct mlx5_flow *dev_flow,
2837                          struct rte_flow_error *error)
2838 {
2839         struct mlx5_priv *priv = dev->data->dev_private;
2840         struct mlx5_dev_ctx_shared *sh = priv->sh;
2841         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2842         struct mlx5dv_dr_domain *domain;
2843         struct mlx5_hlist_entry *entry;
2844         union mlx5_flow_encap_decap_key encap_decap_key = {
2845                 {
2846                         .ft_type = resource->ft_type,
2847                         .refmt_type = resource->reformat_type,
2848                         .buf_size = resource->size,
2849                         .table_level = !!dev_flow->dv.group,
2850                         .cksum = 0,
2851                 }
2852         };
2853         int ret;
2854
2855         resource->flags = dev_flow->dv.group ? 0 : 1;
2856         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2857                 domain = sh->fdb_domain;
2858         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2859                 domain = sh->rx_domain;
2860         else
2861                 domain = sh->tx_domain;
2862         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2863                                                 resource->size, 0);
2864         resource->entry.key = encap_decap_key.v64;
2865         /* Lookup a matching resource from cache. */
2866         entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key,
2867                                      flow_dv_encap_decap_resource_match,
2868                                      (void *)resource);
2869         if (entry) {
2870                 cache_resource = container_of(entry,
2871                         struct mlx5_flow_dv_encap_decap_resource, entry);
2872                 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2873                         (void *)cache_resource,
2874                         __atomic_load_n(&cache_resource->refcnt,
2875                                         __ATOMIC_RELAXED));
2876                 __atomic_fetch_add(&cache_resource->refcnt, 1,
2877                                    __ATOMIC_RELAXED);
2878                 dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx;
2879                 dev_flow->dv.encap_decap = cache_resource;
2880                 return 0;
2881         }
2882         /* Register new encap/decap resource. */
2883         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2884                                        &dev_flow->handle->dvh.rix_encap_decap);
2885         if (!cache_resource)
2886                 return rte_flow_error_set(error, ENOMEM,
2887                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2888                                           "cannot allocate resource memory");
2889         *cache_resource = *resource;
2890         cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap;
2891         ret = mlx5_flow_os_create_flow_action_packet_reformat
2892                                         (sh->ctx, domain, cache_resource,
2893                                          &cache_resource->action);
2894         if (ret) {
2895                 mlx5_free(cache_resource);
2896                 return rte_flow_error_set(error, ENOMEM,
2897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2898                                           NULL, "cannot create action");
2899         }
2900         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
2901         if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry,
2902                                  flow_dv_encap_decap_resource_match,
2903                                  (void *)cache_resource)) {
2904                 claim_zero(mlx5_flow_os_destroy_flow_action
2905                                                 (cache_resource->action));
2906                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2907                                 cache_resource->idx);
2908                 return rte_flow_error_set(error, EEXIST,
2909                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2910                                           NULL, "action exist");
2911         }
2912         dev_flow->dv.encap_decap = cache_resource;
2913         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2914                 (void *)cache_resource,
2915                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
2916         return 0;
2917 }
2918
2919 /**
2920  * Find existing table jump resource or create and register a new one.
2921  *
2922  * @param[in, out] dev
2923  *   Pointer to rte_eth_dev structure.
2924  * @param[in, out] tbl
2925  *   Pointer to flow table resource.
2926  * @parm[in, out] dev_flow
2927  *   Pointer to the dev_flow.
2928  * @param[out] error
2929  *   pointer to error structure.
2930  *
2931  * @return
2932  *   0 on success otherwise -errno and errno is set.
2933  */
2934 static int
2935 flow_dv_jump_tbl_resource_register
2936                         (struct rte_eth_dev *dev __rte_unused,
2937                          struct mlx5_flow_tbl_resource *tbl,
2938                          struct mlx5_flow *dev_flow,
2939                          struct rte_flow_error *error __rte_unused)
2940 {
2941         struct mlx5_flow_tbl_data_entry *tbl_data =
2942                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2943
2944         MLX5_ASSERT(tbl);
2945         MLX5_ASSERT(tbl_data->jump.action);
2946         dev_flow->handle->rix_jump = tbl_data->idx;
2947         dev_flow->dv.jump = &tbl_data->jump;
2948         return 0;
2949 }
2950
2951 /**
2952  * Find existing table port ID resource or create and register a new one.
2953  *
2954  * @param[in, out] dev
2955  *   Pointer to rte_eth_dev structure.
2956  * @param[in, out] resource
2957  *   Pointer to port ID action resource.
2958  * @parm[in, out] dev_flow
2959  *   Pointer to the dev_flow.
2960  * @param[out] error
2961  *   pointer to error structure.
2962  *
2963  * @return
2964  *   0 on success otherwise -errno and errno is set.
2965  */
2966 static int
2967 flow_dv_port_id_action_resource_register
2968                         (struct rte_eth_dev *dev,
2969                          struct mlx5_flow_dv_port_id_action_resource *resource,
2970                          struct mlx5_flow *dev_flow,
2971                          struct rte_flow_error *error)
2972 {
2973         struct mlx5_priv *priv = dev->data->dev_private;
2974         struct mlx5_dev_ctx_shared *sh = priv->sh;
2975         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2976         uint32_t idx = 0;
2977         int ret;
2978
2979         /* Lookup a matching resource from cache. */
2980         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
2981                       idx, cache_resource, next) {
2982                 if (resource->port_id == cache_resource->port_id) {
2983                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2984                                 "refcnt %d++",
2985                                 (void *)cache_resource,
2986                                 __atomic_load_n(&cache_resource->refcnt,
2987                                                 __ATOMIC_RELAXED));
2988                         __atomic_fetch_add(&cache_resource->refcnt, 1,
2989                                            __ATOMIC_RELAXED);
2990                         dev_flow->handle->rix_port_id_action = idx;
2991                         dev_flow->dv.port_id_action = cache_resource;
2992                         return 0;
2993                 }
2994         }
2995         /* Register new port id action resource. */
2996         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
2997                                        &dev_flow->handle->rix_port_id_action);
2998         if (!cache_resource)
2999                 return rte_flow_error_set(error, ENOMEM,
3000                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3001                                           "cannot allocate resource memory");
3002         *cache_resource = *resource;
3003         ret = mlx5_flow_os_create_flow_action_dest_port
3004                                 (priv->sh->fdb_domain, resource->port_id,
3005                                  &cache_resource->action);
3006         if (ret) {
3007                 mlx5_free(cache_resource);
3008                 return rte_flow_error_set(error, ENOMEM,
3009                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3010                                           NULL, "cannot create action");
3011         }
3012         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
3013         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
3014                      dev_flow->handle->rix_port_id_action, cache_resource,
3015                      next);
3016         dev_flow->dv.port_id_action = cache_resource;
3017         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
3018                 (void *)cache_resource,
3019                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
3020         return 0;
3021 }
3022
3023 /**
3024  * Find existing push vlan resource or create and register a new one.
3025  *
3026  * @param [in, out] dev
3027  *   Pointer to rte_eth_dev structure.
3028  * @param[in, out] resource
3029  *   Pointer to port ID action resource.
3030  * @parm[in, out] dev_flow
3031  *   Pointer to the dev_flow.
3032  * @param[out] error
3033  *   pointer to error structure.
3034  *
3035  * @return
3036  *   0 on success otherwise -errno and errno is set.
3037  */
3038 static int
3039 flow_dv_push_vlan_action_resource_register
3040                        (struct rte_eth_dev *dev,
3041                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3042                         struct mlx5_flow *dev_flow,
3043                         struct rte_flow_error *error)
3044 {
3045         struct mlx5_priv *priv = dev->data->dev_private;
3046         struct mlx5_dev_ctx_shared *sh = priv->sh;
3047         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
3048         struct mlx5dv_dr_domain *domain;
3049         uint32_t idx = 0;
3050         int ret;
3051
3052         /* Lookup a matching resource from cache. */
3053         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3054                       sh->push_vlan_action_list, idx, cache_resource, next) {
3055                 if (resource->vlan_tag == cache_resource->vlan_tag &&
3056                     resource->ft_type == cache_resource->ft_type) {
3057                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
3058                                 "refcnt %d++",
3059                                 (void *)cache_resource,
3060                                 __atomic_load_n(&cache_resource->refcnt,
3061                                                 __ATOMIC_RELAXED));
3062                         __atomic_fetch_add(&cache_resource->refcnt, 1,
3063                                            __ATOMIC_RELAXED);
3064                         dev_flow->handle->dvh.rix_push_vlan = idx;
3065                         dev_flow->dv.push_vlan_res = cache_resource;
3066                         return 0;
3067                 }
3068         }
3069         /* Register new push_vlan action resource. */
3070         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3071                                        &dev_flow->handle->dvh.rix_push_vlan);
3072         if (!cache_resource)
3073                 return rte_flow_error_set(error, ENOMEM,
3074                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3075                                           "cannot allocate resource memory");
3076         *cache_resource = *resource;
3077         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3078                 domain = sh->fdb_domain;
3079         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3080                 domain = sh->rx_domain;
3081         else
3082                 domain = sh->tx_domain;
3083         ret = mlx5_flow_os_create_flow_action_push_vlan
3084                                         (domain, resource->vlan_tag,
3085                                          &cache_resource->action);
3086         if (ret) {
3087                 mlx5_free(cache_resource);
3088                 return rte_flow_error_set(error, ENOMEM,
3089                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3090                                           NULL, "cannot create action");
3091         }
3092         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
3093         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
3094                      &sh->push_vlan_action_list,
3095                      dev_flow->handle->dvh.rix_push_vlan,
3096                      cache_resource, next);
3097         dev_flow->dv.push_vlan_res = cache_resource;
3098         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
3099                 (void *)cache_resource,
3100                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
3101         return 0;
3102 }
3103 /**
3104  * Get the size of specific rte_flow_item_type hdr size
3105  *
3106  * @param[in] item_type
3107  *   Tested rte_flow_item_type.
3108  *
3109  * @return
3110  *   sizeof struct item_type, 0 if void or irrelevant.
3111  */
3112 static size_t
3113 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3114 {
3115         size_t retval;
3116
3117         switch (item_type) {
3118         case RTE_FLOW_ITEM_TYPE_ETH:
3119                 retval = sizeof(struct rte_ether_hdr);
3120                 break;
3121         case RTE_FLOW_ITEM_TYPE_VLAN:
3122                 retval = sizeof(struct rte_vlan_hdr);
3123                 break;
3124         case RTE_FLOW_ITEM_TYPE_IPV4:
3125                 retval = sizeof(struct rte_ipv4_hdr);
3126                 break;
3127         case RTE_FLOW_ITEM_TYPE_IPV6:
3128                 retval = sizeof(struct rte_ipv6_hdr);
3129                 break;
3130         case RTE_FLOW_ITEM_TYPE_UDP:
3131                 retval = sizeof(struct rte_udp_hdr);
3132                 break;
3133         case RTE_FLOW_ITEM_TYPE_TCP:
3134                 retval = sizeof(struct rte_tcp_hdr);
3135                 break;
3136         case RTE_FLOW_ITEM_TYPE_VXLAN:
3137         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3138                 retval = sizeof(struct rte_vxlan_hdr);
3139                 break;
3140         case RTE_FLOW_ITEM_TYPE_GRE:
3141         case RTE_FLOW_ITEM_TYPE_NVGRE:
3142                 retval = sizeof(struct rte_gre_hdr);
3143                 break;
3144         case RTE_FLOW_ITEM_TYPE_MPLS:
3145                 retval = sizeof(struct rte_mpls_hdr);
3146                 break;
3147         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3148         default:
3149                 retval = 0;
3150                 break;
3151         }
3152         return retval;
3153 }
3154
3155 #define MLX5_ENCAP_IPV4_VERSION         0x40
3156 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3157 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3158 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3159 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3160 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3161 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3162
3163 /**
3164  * Convert the encap action data from list of rte_flow_item to raw buffer
3165  *
3166  * @param[in] items
3167  *   Pointer to rte_flow_item objects list.
3168  * @param[out] buf
3169  *   Pointer to the output buffer.
3170  * @param[out] size
3171  *   Pointer to the output buffer size.
3172  * @param[out] error
3173  *   Pointer to the error structure.
3174  *
3175  * @return
3176  *   0 on success, a negative errno value otherwise and rte_errno is set.
3177  */
3178 static int
3179 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3180                            size_t *size, struct rte_flow_error *error)
3181 {
3182         struct rte_ether_hdr *eth = NULL;
3183         struct rte_vlan_hdr *vlan = NULL;
3184         struct rte_ipv4_hdr *ipv4 = NULL;
3185         struct rte_ipv6_hdr *ipv6 = NULL;
3186         struct rte_udp_hdr *udp = NULL;
3187         struct rte_vxlan_hdr *vxlan = NULL;
3188         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3189         struct rte_gre_hdr *gre = NULL;
3190         size_t len;
3191         size_t temp_size = 0;
3192
3193         if (!items)
3194                 return rte_flow_error_set(error, EINVAL,
3195                                           RTE_FLOW_ERROR_TYPE_ACTION,
3196                                           NULL, "invalid empty data");
3197         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3198                 len = flow_dv_get_item_hdr_len(items->type);
3199                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3200                         return rte_flow_error_set(error, EINVAL,
3201                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3202                                                   (void *)items->type,
3203                                                   "items total size is too big"
3204                                                   " for encap action");
3205                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3206                 switch (items->type) {
3207                 case RTE_FLOW_ITEM_TYPE_ETH:
3208                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3209                         break;
3210                 case RTE_FLOW_ITEM_TYPE_VLAN:
3211                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3212                         if (!eth)
3213                                 return rte_flow_error_set(error, EINVAL,
3214                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3215                                                 (void *)items->type,
3216                                                 "eth header not found");
3217                         if (!eth->ether_type)
3218                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3219                         break;
3220                 case RTE_FLOW_ITEM_TYPE_IPV4:
3221                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3222                         if (!vlan && !eth)
3223                                 return rte_flow_error_set(error, EINVAL,
3224                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3225                                                 (void *)items->type,
3226                                                 "neither eth nor vlan"
3227                                                 " header found");
3228                         if (vlan && !vlan->eth_proto)
3229                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3230                         else if (eth && !eth->ether_type)
3231                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3232                         if (!ipv4->version_ihl)
3233                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3234                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3235                         if (!ipv4->time_to_live)
3236                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3237                         break;
3238                 case RTE_FLOW_ITEM_TYPE_IPV6:
3239                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3240                         if (!vlan && !eth)
3241                                 return rte_flow_error_set(error, EINVAL,
3242                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3243                                                 (void *)items->type,
3244                                                 "neither eth nor vlan"
3245                                                 " header found");
3246                         if (vlan && !vlan->eth_proto)
3247                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3248                         else if (eth && !eth->ether_type)
3249                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3250                         if (!ipv6->vtc_flow)
3251                                 ipv6->vtc_flow =
3252                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3253                         if (!ipv6->hop_limits)
3254                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3255                         break;
3256                 case RTE_FLOW_ITEM_TYPE_UDP:
3257                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3258                         if (!ipv4 && !ipv6)
3259                                 return rte_flow_error_set(error, EINVAL,
3260                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3261                                                 (void *)items->type,
3262                                                 "ip header not found");
3263                         if (ipv4 && !ipv4->next_proto_id)
3264                                 ipv4->next_proto_id = IPPROTO_UDP;
3265                         else if (ipv6 && !ipv6->proto)
3266                                 ipv6->proto = IPPROTO_UDP;
3267                         break;
3268                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3269                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3270                         if (!udp)
3271                                 return rte_flow_error_set(error, EINVAL,
3272                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3273                                                 (void *)items->type,
3274                                                 "udp header not found");
3275                         if (!udp->dst_port)
3276                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3277                         if (!vxlan->vx_flags)
3278                                 vxlan->vx_flags =
3279                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3280                         break;
3281                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3282                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3283                         if (!udp)
3284                                 return rte_flow_error_set(error, EINVAL,
3285                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3286                                                 (void *)items->type,
3287                                                 "udp header not found");
3288                         if (!vxlan_gpe->proto)
3289                                 return rte_flow_error_set(error, EINVAL,
3290                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3291                                                 (void *)items->type,
3292                                                 "next protocol not found");
3293                         if (!udp->dst_port)
3294                                 udp->dst_port =
3295                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3296                         if (!vxlan_gpe->vx_flags)
3297                                 vxlan_gpe->vx_flags =
3298                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3299                         break;
3300                 case RTE_FLOW_ITEM_TYPE_GRE:
3301                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3302                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3303                         if (!gre->proto)
3304                                 return rte_flow_error_set(error, EINVAL,
3305                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3306                                                 (void *)items->type,
3307                                                 "next protocol not found");
3308                         if (!ipv4 && !ipv6)
3309                                 return rte_flow_error_set(error, EINVAL,
3310                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3311                                                 (void *)items->type,
3312                                                 "ip header not found");
3313                         if (ipv4 && !ipv4->next_proto_id)
3314                                 ipv4->next_proto_id = IPPROTO_GRE;
3315                         else if (ipv6 && !ipv6->proto)
3316                                 ipv6->proto = IPPROTO_GRE;
3317                         break;
3318                 case RTE_FLOW_ITEM_TYPE_VOID:
3319                         break;
3320                 default:
3321                         return rte_flow_error_set(error, EINVAL,
3322                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3323                                                   (void *)items->type,
3324                                                   "unsupported item type");
3325                         break;
3326                 }
3327                 temp_size += len;
3328         }
3329         *size = temp_size;
3330         return 0;
3331 }
3332
3333 static int
3334 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3335 {
3336         struct rte_ether_hdr *eth = NULL;
3337         struct rte_vlan_hdr *vlan = NULL;
3338         struct rte_ipv6_hdr *ipv6 = NULL;
3339         struct rte_udp_hdr *udp = NULL;
3340         char *next_hdr;
3341         uint16_t proto;
3342
3343         eth = (struct rte_ether_hdr *)data;
3344         next_hdr = (char *)(eth + 1);
3345         proto = RTE_BE16(eth->ether_type);
3346
3347         /* VLAN skipping */
3348         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3349                 vlan = (struct rte_vlan_hdr *)next_hdr;
3350                 proto = RTE_BE16(vlan->eth_proto);
3351                 next_hdr += sizeof(struct rte_vlan_hdr);
3352         }
3353
3354         /* HW calculates IPv4 csum. no need to proceed */
3355         if (proto == RTE_ETHER_TYPE_IPV4)
3356                 return 0;
3357
3358         /* non IPv4/IPv6 header. not supported */
3359         if (proto != RTE_ETHER_TYPE_IPV6) {
3360                 return rte_flow_error_set(error, ENOTSUP,
3361                                           RTE_FLOW_ERROR_TYPE_ACTION,
3362                                           NULL, "Cannot offload non IPv4/IPv6");
3363         }
3364
3365         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3366
3367         /* ignore non UDP */
3368         if (ipv6->proto != IPPROTO_UDP)
3369                 return 0;
3370
3371         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3372         udp->dgram_cksum = 0;
3373
3374         return 0;
3375 }
3376
3377 /**
3378  * Convert L2 encap action to DV specification.
3379  *
3380  * @param[in] dev
3381  *   Pointer to rte_eth_dev structure.
3382  * @param[in] action
3383  *   Pointer to action structure.
3384  * @param[in, out] dev_flow
3385  *   Pointer to the mlx5_flow.
3386  * @param[in] transfer
3387  *   Mark if the flow is E-Switch flow.
3388  * @param[out] error
3389  *   Pointer to the error structure.
3390  *
3391  * @return
3392  *   0 on success, a negative errno value otherwise and rte_errno is set.
3393  */
3394 static int
3395 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3396                                const struct rte_flow_action *action,
3397                                struct mlx5_flow *dev_flow,
3398                                uint8_t transfer,
3399                                struct rte_flow_error *error)
3400 {
3401         const struct rte_flow_item *encap_data;
3402         const struct rte_flow_action_raw_encap *raw_encap_data;
3403         struct mlx5_flow_dv_encap_decap_resource res = {
3404                 .reformat_type =
3405                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3406                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3407                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3408         };
3409
3410         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3411                 raw_encap_data =
3412                         (const struct rte_flow_action_raw_encap *)action->conf;
3413                 res.size = raw_encap_data->size;
3414                 memcpy(res.buf, raw_encap_data->data, res.size);
3415         } else {
3416                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3417                         encap_data =
3418                                 ((const struct rte_flow_action_vxlan_encap *)
3419                                                 action->conf)->definition;
3420                 else
3421                         encap_data =
3422                                 ((const struct rte_flow_action_nvgre_encap *)
3423                                                 action->conf)->definition;
3424                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3425                                                &res.size, error))
3426                         return -rte_errno;
3427         }
3428         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3429                 return -rte_errno;
3430         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3431                 return rte_flow_error_set(error, EINVAL,
3432                                           RTE_FLOW_ERROR_TYPE_ACTION,
3433                                           NULL, "can't create L2 encap action");
3434         return 0;
3435 }
3436
3437 /**
3438  * Convert L2 decap action to DV specification.
3439  *
3440  * @param[in] dev
3441  *   Pointer to rte_eth_dev structure.
3442  * @param[in, out] dev_flow
3443  *   Pointer to the mlx5_flow.
3444  * @param[in] transfer
3445  *   Mark if the flow is E-Switch flow.
3446  * @param[out] error
3447  *   Pointer to the error structure.
3448  *
3449  * @return
3450  *   0 on success, a negative errno value otherwise and rte_errno is set.
3451  */
3452 static int
3453 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3454                                struct mlx5_flow *dev_flow,
3455                                uint8_t transfer,
3456                                struct rte_flow_error *error)
3457 {
3458         struct mlx5_flow_dv_encap_decap_resource res = {
3459                 .size = 0,
3460                 .reformat_type =
3461                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3462                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3463                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3464         };
3465
3466         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3467                 return rte_flow_error_set(error, EINVAL,
3468                                           RTE_FLOW_ERROR_TYPE_ACTION,
3469                                           NULL, "can't create L2 decap action");
3470         return 0;
3471 }
3472
3473 /**
3474  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3475  *
3476  * @param[in] dev
3477  *   Pointer to rte_eth_dev structure.
3478  * @param[in] action
3479  *   Pointer to action structure.
3480  * @param[in, out] dev_flow
3481  *   Pointer to the mlx5_flow.
3482  * @param[in] attr
3483  *   Pointer to the flow attributes.
3484  * @param[out] error
3485  *   Pointer to the error structure.
3486  *
3487  * @return
3488  *   0 on success, a negative errno value otherwise and rte_errno is set.
3489  */
3490 static int
3491 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3492                                 const struct rte_flow_action *action,
3493                                 struct mlx5_flow *dev_flow,
3494                                 const struct rte_flow_attr *attr,
3495                                 struct rte_flow_error *error)
3496 {
3497         const struct rte_flow_action_raw_encap *encap_data;
3498         struct mlx5_flow_dv_encap_decap_resource res;
3499
3500         memset(&res, 0, sizeof(res));
3501         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3502         res.size = encap_data->size;
3503         memcpy(res.buf, encap_data->data, res.size);
3504         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3505                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3506                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3507         if (attr->transfer)
3508                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3509         else
3510                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3511                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3512         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3513                 return rte_flow_error_set(error, EINVAL,
3514                                           RTE_FLOW_ERROR_TYPE_ACTION,
3515                                           NULL, "can't create encap action");
3516         return 0;
3517 }
3518
3519 /**
3520  * Create action push VLAN.
3521  *
3522  * @param[in] dev
3523  *   Pointer to rte_eth_dev structure.
3524  * @param[in] attr
3525  *   Pointer to the flow attributes.
3526  * @param[in] vlan
3527  *   Pointer to the vlan to push to the Ethernet header.
3528  * @param[in, out] dev_flow
3529  *   Pointer to the mlx5_flow.
3530  * @param[out] error
3531  *   Pointer to the error structure.
3532  *
3533  * @return
3534  *   0 on success, a negative errno value otherwise and rte_errno is set.
3535  */
3536 static int
3537 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3538                                 const struct rte_flow_attr *attr,
3539                                 const struct rte_vlan_hdr *vlan,
3540                                 struct mlx5_flow *dev_flow,
3541                                 struct rte_flow_error *error)
3542 {
3543         struct mlx5_flow_dv_push_vlan_action_resource res;
3544
3545         memset(&res, 0, sizeof(res));
3546         res.vlan_tag =
3547                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3548                                  vlan->vlan_tci);
3549         if (attr->transfer)
3550                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3551         else
3552                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3553                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3554         return flow_dv_push_vlan_action_resource_register
3555                                             (dev, &res, dev_flow, error);
3556 }
3557
3558 static int fdb_mirror;
3559
3560 /**
3561  * Validate the modify-header actions.
3562  *
3563  * @param[in] action_flags
3564  *   Holds the actions detected until now.
3565  * @param[in] action
3566  *   Pointer to the modify action.
3567  * @param[out] error
3568  *   Pointer to error structure.
3569  *
3570  * @return
3571  *   0 on success, a negative errno value otherwise and rte_errno is set.
3572  */
3573 static int
3574 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3575                                    const struct rte_flow_action *action,
3576                                    struct rte_flow_error *error)
3577 {
3578         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3579                 return rte_flow_error_set(error, EINVAL,
3580                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3581                                           NULL, "action configuration not set");
3582         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3583                 return rte_flow_error_set(error, EINVAL,
3584                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3585                                           "can't have encap action before"
3586                                           " modify action");
3587         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3588                 return rte_flow_error_set(error, EINVAL,
3589                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3590                                           "can't support sample action before"
3591                                           " modify action for E-Switch"
3592                                           " mirroring");
3593         return 0;
3594 }
3595
3596 /**
3597  * Validate the modify-header MAC address actions.
3598  *
3599  * @param[in] action_flags
3600  *   Holds the actions detected until now.
3601  * @param[in] action
3602  *   Pointer to the modify action.
3603  * @param[in] item_flags
3604  *   Holds the items detected.
3605  * @param[out] error
3606  *   Pointer to error structure.
3607  *
3608  * @return
3609  *   0 on success, a negative errno value otherwise and rte_errno is set.
3610  */
3611 static int
3612 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3613                                    const struct rte_flow_action *action,
3614                                    const uint64_t item_flags,
3615                                    struct rte_flow_error *error)
3616 {
3617         int ret = 0;
3618
3619         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3620         if (!ret) {
3621                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3622                         return rte_flow_error_set(error, EINVAL,
3623                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3624                                                   NULL,
3625                                                   "no L2 item in pattern");
3626         }
3627         return ret;
3628 }
3629
3630 /**
3631  * Validate the modify-header IPv4 address actions.
3632  *
3633  * @param[in] action_flags
3634  *   Holds the actions detected until now.
3635  * @param[in] action
3636  *   Pointer to the modify action.
3637  * @param[in] item_flags
3638  *   Holds the items detected.
3639  * @param[out] error
3640  *   Pointer to error structure.
3641  *
3642  * @return
3643  *   0 on success, a negative errno value otherwise and rte_errno is set.
3644  */
3645 static int
3646 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3647                                     const struct rte_flow_action *action,
3648                                     const uint64_t item_flags,
3649                                     struct rte_flow_error *error)
3650 {
3651         int ret = 0;
3652         uint64_t layer;
3653
3654         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3655         if (!ret) {
3656                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3657                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3658                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3659                 if (!(item_flags & layer))
3660                         return rte_flow_error_set(error, EINVAL,
3661                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3662                                                   NULL,
3663                                                   "no ipv4 item in pattern");
3664         }
3665         return ret;
3666 }
3667
3668 /**
3669  * Validate the modify-header IPv6 address actions.
3670  *
3671  * @param[in] action_flags
3672  *   Holds the actions detected until now.
3673  * @param[in] action
3674  *   Pointer to the modify action.
3675  * @param[in] item_flags
3676  *   Holds the items detected.
3677  * @param[out] error
3678  *   Pointer to error structure.
3679  *
3680  * @return
3681  *   0 on success, a negative errno value otherwise and rte_errno is set.
3682  */
3683 static int
3684 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3685                                     const struct rte_flow_action *action,
3686                                     const uint64_t item_flags,
3687                                     struct rte_flow_error *error)
3688 {
3689         int ret = 0;
3690         uint64_t layer;
3691
3692         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3693         if (!ret) {
3694                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3695                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3696                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3697                 if (!(item_flags & layer))
3698                         return rte_flow_error_set(error, EINVAL,
3699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3700                                                   NULL,
3701                                                   "no ipv6 item in pattern");
3702         }
3703         return ret;
3704 }
3705
3706 /**
3707  * Validate the modify-header TP actions.
3708  *
3709  * @param[in] action_flags
3710  *   Holds the actions detected until now.
3711  * @param[in] action
3712  *   Pointer to the modify action.
3713  * @param[in] item_flags
3714  *   Holds the items detected.
3715  * @param[out] error
3716  *   Pointer to error structure.
3717  *
3718  * @return
3719  *   0 on success, a negative errno value otherwise and rte_errno is set.
3720  */
3721 static int
3722 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3723                                   const struct rte_flow_action *action,
3724                                   const uint64_t item_flags,
3725                                   struct rte_flow_error *error)
3726 {
3727         int ret = 0;
3728         uint64_t layer;
3729
3730         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3731         if (!ret) {
3732                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3733                                  MLX5_FLOW_LAYER_INNER_L4 :
3734                                  MLX5_FLOW_LAYER_OUTER_L4;
3735                 if (!(item_flags & layer))
3736                         return rte_flow_error_set(error, EINVAL,
3737                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3738                                                   NULL, "no transport layer "
3739                                                   "in pattern");
3740         }
3741         return ret;
3742 }
3743
3744 /**
3745  * Validate the modify-header actions of increment/decrement
3746  * TCP Sequence-number.
3747  *
3748  * @param[in] action_flags
3749  *   Holds the actions detected until now.
3750  * @param[in] action
3751  *   Pointer to the modify action.
3752  * @param[in] item_flags
3753  *   Holds the items detected.
3754  * @param[out] error
3755  *   Pointer to error structure.
3756  *
3757  * @return
3758  *   0 on success, a negative errno value otherwise and rte_errno is set.
3759  */
3760 static int
3761 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3762                                        const struct rte_flow_action *action,
3763                                        const uint64_t item_flags,
3764                                        struct rte_flow_error *error)
3765 {
3766         int ret = 0;
3767         uint64_t layer;
3768
3769         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3770         if (!ret) {
3771                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3773                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3774                 if (!(item_flags & layer))
3775                         return rte_flow_error_set(error, EINVAL,
3776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3777                                                   NULL, "no TCP item in"
3778                                                   " pattern");
3779                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3780                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3781                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3782                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3783                         return rte_flow_error_set(error, EINVAL,
3784                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3785                                                   NULL,
3786                                                   "cannot decrease and increase"
3787                                                   " TCP sequence number"
3788                                                   " at the same time");
3789         }
3790         return ret;
3791 }
3792
3793 /**
3794  * Validate the modify-header actions of increment/decrement
3795  * TCP Acknowledgment number.
3796  *
3797  * @param[in] action_flags
3798  *   Holds the actions detected until now.
3799  * @param[in] action
3800  *   Pointer to the modify action.
3801  * @param[in] item_flags
3802  *   Holds the items detected.
3803  * @param[out] error
3804  *   Pointer to error structure.
3805  *
3806  * @return
3807  *   0 on success, a negative errno value otherwise and rte_errno is set.
3808  */
3809 static int
3810 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3811                                        const struct rte_flow_action *action,
3812                                        const uint64_t item_flags,
3813                                        struct rte_flow_error *error)
3814 {
3815         int ret = 0;
3816         uint64_t layer;
3817
3818         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3819         if (!ret) {
3820                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3821                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3822                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3823                 if (!(item_flags & layer))
3824                         return rte_flow_error_set(error, EINVAL,
3825                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3826                                                   NULL, "no TCP item in"
3827                                                   " pattern");
3828                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3829                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3830                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3831                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3832                         return rte_flow_error_set(error, EINVAL,
3833                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3834                                                   NULL,
3835                                                   "cannot decrease and increase"
3836                                                   " TCP acknowledgment number"
3837                                                   " at the same time");
3838         }
3839         return ret;
3840 }
3841
3842 /**
3843  * Validate the modify-header TTL actions.
3844  *
3845  * @param[in] action_flags
3846  *   Holds the actions detected until now.
3847  * @param[in] action
3848  *   Pointer to the modify action.
3849  * @param[in] item_flags
3850  *   Holds the items detected.
3851  * @param[out] error
3852  *   Pointer to error structure.
3853  *
3854  * @return
3855  *   0 on success, a negative errno value otherwise and rte_errno is set.
3856  */
3857 static int
3858 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3859                                    const struct rte_flow_action *action,
3860                                    const uint64_t item_flags,
3861                                    struct rte_flow_error *error)
3862 {
3863         int ret = 0;
3864         uint64_t layer;
3865
3866         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3867         if (!ret) {
3868                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3869                                  MLX5_FLOW_LAYER_INNER_L3 :
3870                                  MLX5_FLOW_LAYER_OUTER_L3;
3871                 if (!(item_flags & layer))
3872                         return rte_flow_error_set(error, EINVAL,
3873                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3874                                                   NULL,
3875                                                   "no IP protocol in pattern");
3876         }
3877         return ret;
3878 }
3879
3880 /**
3881  * Validate jump action.
3882  *
3883  * @param[in] action
3884  *   Pointer to the jump action.
3885  * @param[in] action_flags
3886  *   Holds the actions detected until now.
3887  * @param[in] attributes
3888  *   Pointer to flow attributes
3889  * @param[in] external
3890  *   Action belongs to flow rule created by request external to PMD.
3891  * @param[out] error
3892  *   Pointer to error structure.
3893  *
3894  * @return
3895  *   0 on success, a negative errno value otherwise and rte_errno is set.
3896  */
3897 static int
3898 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3899                              const struct mlx5_flow_tunnel *tunnel,
3900                              const struct rte_flow_action *action,
3901                              uint64_t action_flags,
3902                              const struct rte_flow_attr *attributes,
3903                              bool external, struct rte_flow_error *error)
3904 {
3905         uint32_t target_group, table;
3906         int ret = 0;
3907         struct flow_grp_info grp_info = {
3908                 .external = !!external,
3909                 .transfer = !!attributes->transfer,
3910                 .fdb_def_rule = 1,
3911                 .std_tbl_fix = 0
3912         };
3913         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3914                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3915                 return rte_flow_error_set(error, EINVAL,
3916                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3917                                           "can't have 2 fate actions in"
3918                                           " same flow");
3919         if (action_flags & MLX5_FLOW_ACTION_METER)
3920                 return rte_flow_error_set(error, ENOTSUP,
3921                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3922                                           "jump with meter not support");
3923         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3924                 return rte_flow_error_set(error, EINVAL,
3925                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3926                                           "E-Switch mirroring can't support"
3927                                           " Sample action and jump action in"
3928                                           " same flow now");
3929         if (!action->conf)
3930                 return rte_flow_error_set(error, EINVAL,
3931                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3932                                           NULL, "action configuration not set");
3933         target_group =
3934                 ((const struct rte_flow_action_jump *)action->conf)->group;
3935         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3936                                        grp_info, error);
3937         if (ret)
3938                 return ret;
3939         if (attributes->group == target_group &&
3940             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3941                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3942                 return rte_flow_error_set(error, EINVAL,
3943                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3944                                           "target group must be other than"
3945                                           " the current flow group");
3946         return 0;
3947 }
3948
3949 /*
3950  * Validate the port_id action.
3951  *
3952  * @param[in] dev
3953  *   Pointer to rte_eth_dev structure.
3954  * @param[in] action_flags
3955  *   Bit-fields that holds the actions detected until now.
3956  * @param[in] action
3957  *   Port_id RTE action structure.
3958  * @param[in] attr
3959  *   Attributes of flow that includes this action.
3960  * @param[out] error
3961  *   Pointer to error structure.
3962  *
3963  * @return
3964  *   0 on success, a negative errno value otherwise and rte_errno is set.
3965  */
3966 static int
3967 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3968                                 uint64_t action_flags,
3969                                 const struct rte_flow_action *action,
3970                                 const struct rte_flow_attr *attr,
3971                                 struct rte_flow_error *error)
3972 {
3973         const struct rte_flow_action_port_id *port_id;
3974         struct mlx5_priv *act_priv;
3975         struct mlx5_priv *dev_priv;
3976         uint16_t port;
3977
3978         if (!attr->transfer)
3979                 return rte_flow_error_set(error, ENOTSUP,
3980                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3981                                           NULL,
3982                                           "port id action is valid in transfer"
3983                                           " mode only");
3984         if (!action || !action->conf)
3985                 return rte_flow_error_set(error, ENOTSUP,
3986                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3987                                           NULL,
3988                                           "port id action parameters must be"
3989                                           " specified");
3990         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3991                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3992                 return rte_flow_error_set(error, EINVAL,
3993                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3994                                           "can have only one fate actions in"
3995                                           " a flow");
3996         dev_priv = mlx5_dev_to_eswitch_info(dev);
3997         if (!dev_priv)
3998                 return rte_flow_error_set(error, rte_errno,
3999                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4000                                           NULL,
4001                                           "failed to obtain E-Switch info");
4002         port_id = action->conf;
4003         port = port_id->original ? dev->data->port_id : port_id->id;
4004         act_priv = mlx5_port_to_eswitch_info(port, false);
4005         if (!act_priv)
4006                 return rte_flow_error_set
4007                                 (error, rte_errno,
4008                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4009                                  "failed to obtain E-Switch port id for port");
4010         if (act_priv->domain_id != dev_priv->domain_id)
4011                 return rte_flow_error_set
4012                                 (error, EINVAL,
4013                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4014                                  "port does not belong to"
4015                                  " E-Switch being configured");
4016         return 0;
4017 }
4018
4019 /**
4020  * Get the maximum number of modify header actions.
4021  *
4022  * @param dev
4023  *   Pointer to rte_eth_dev structure.
4024  * @param flags
4025  *   Flags bits to check if root level.
4026  *
4027  * @return
4028  *   Max number of modify header actions device can support.
4029  */
4030 static inline unsigned int
4031 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4032                               uint64_t flags)
4033 {
4034         /*
4035          * There's no way to directly query the max capacity from FW.
4036          * The maximal value on root table should be assumed to be supported.
4037          */
4038         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4039                 return MLX5_MAX_MODIFY_NUM;
4040         else
4041                 return MLX5_ROOT_TBL_MODIFY_NUM;
4042 }
4043
4044 /**
4045  * Validate the meter action.
4046  *
4047  * @param[in] dev
4048  *   Pointer to rte_eth_dev structure.
4049  * @param[in] action_flags
4050  *   Bit-fields that holds the actions detected until now.
4051  * @param[in] action
4052  *   Pointer to the meter action.
4053  * @param[in] attr
4054  *   Attributes of flow that includes this action.
4055  * @param[out] error
4056  *   Pointer to error structure.
4057  *
4058  * @return
4059  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4060  */
4061 static int
4062 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4063                                 uint64_t action_flags,
4064                                 const struct rte_flow_action *action,
4065                                 const struct rte_flow_attr *attr,
4066                                 struct rte_flow_error *error)
4067 {
4068         struct mlx5_priv *priv = dev->data->dev_private;
4069         const struct rte_flow_action_meter *am = action->conf;
4070         struct mlx5_flow_meter *fm;
4071
4072         if (!am)
4073                 return rte_flow_error_set(error, EINVAL,
4074                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4075                                           "meter action conf is NULL");
4076
4077         if (action_flags & MLX5_FLOW_ACTION_METER)
4078                 return rte_flow_error_set(error, ENOTSUP,
4079                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4080                                           "meter chaining not support");
4081         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4082                 return rte_flow_error_set(error, ENOTSUP,
4083                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4084                                           "meter with jump not support");
4085         if (!priv->mtr_en)
4086                 return rte_flow_error_set(error, ENOTSUP,
4087                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4088                                           NULL,
4089                                           "meter action not supported");
4090         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4091         if (!fm)
4092                 return rte_flow_error_set(error, EINVAL,
4093                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4094                                           "Meter not found");
4095         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4096               (!fm->ingress && !attr->ingress && attr->egress) ||
4097               (!fm->egress && !attr->egress && attr->ingress))))
4098                 return rte_flow_error_set(error, EINVAL,
4099                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4100                                           "Flow attributes are either invalid "
4101                                           "or have a conflict with current "
4102                                           "meter attributes");
4103         return 0;
4104 }
4105
4106 /**
4107  * Validate the age action.
4108  *
4109  * @param[in] action_flags
4110  *   Holds the actions detected until now.
4111  * @param[in] action
4112  *   Pointer to the age action.
4113  * @param[in] dev
4114  *   Pointer to the Ethernet device structure.
4115  * @param[out] error
4116  *   Pointer to error structure.
4117  *
4118  * @return
4119  *   0 on success, a negative errno value otherwise and rte_errno is set.
4120  */
4121 static int
4122 flow_dv_validate_action_age(uint64_t action_flags,
4123                             const struct rte_flow_action *action,
4124                             struct rte_eth_dev *dev,
4125                             struct rte_flow_error *error)
4126 {
4127         struct mlx5_priv *priv = dev->data->dev_private;
4128         const struct rte_flow_action_age *age = action->conf;
4129
4130         if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4131                 return rte_flow_error_set(error, ENOTSUP,
4132                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4133                                           NULL,
4134                                           "age action not supported");
4135         if (!(action->conf))
4136                 return rte_flow_error_set(error, EINVAL,
4137                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4138                                           "configuration cannot be null");
4139         if (!(age->timeout))
4140                 return rte_flow_error_set(error, EINVAL,
4141                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4142                                           "invalid timeout value 0");
4143         if (action_flags & MLX5_FLOW_ACTION_AGE)
4144                 return rte_flow_error_set(error, EINVAL,
4145                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4146                                           "duplicate age actions set");
4147         return 0;
4148 }
4149
4150 /**
4151  * Validate the modify-header IPv4 DSCP actions.
4152  *
4153  * @param[in] action_flags
4154  *   Holds the actions detected until now.
4155  * @param[in] action
4156  *   Pointer to the modify action.
4157  * @param[in] item_flags
4158  *   Holds the items detected.
4159  * @param[out] error
4160  *   Pointer to error structure.
4161  *
4162  * @return
4163  *   0 on success, a negative errno value otherwise and rte_errno is set.
4164  */
4165 static int
4166 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4167                                          const struct rte_flow_action *action,
4168                                          const uint64_t item_flags,
4169                                          struct rte_flow_error *error)
4170 {
4171         int ret = 0;
4172
4173         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4174         if (!ret) {
4175                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4176                         return rte_flow_error_set(error, EINVAL,
4177                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4178                                                   NULL,
4179                                                   "no ipv4 item in pattern");
4180         }
4181         return ret;
4182 }
4183
4184 /**
4185  * Validate the modify-header IPv6 DSCP actions.
4186  *
4187  * @param[in] action_flags
4188  *   Holds the actions detected until now.
4189  * @param[in] action
4190  *   Pointer to the modify action.
4191  * @param[in] item_flags
4192  *   Holds the items detected.
4193  * @param[out] error
4194  *   Pointer to error structure.
4195  *
4196  * @return
4197  *   0 on success, a negative errno value otherwise and rte_errno is set.
4198  */
4199 static int
4200 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4201                                          const struct rte_flow_action *action,
4202                                          const uint64_t item_flags,
4203                                          struct rte_flow_error *error)
4204 {
4205         int ret = 0;
4206
4207         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4208         if (!ret) {
4209                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4210                         return rte_flow_error_set(error, EINVAL,
4211                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4212                                                   NULL,
4213                                                   "no ipv6 item in pattern");
4214         }
4215         return ret;
4216 }
4217
4218 /**
4219  * Match modify-header resource.
4220  *
4221  * @param list
4222  *   Pointer to the hash list.
4223  * @param entry
4224  *   Pointer to exist resource entry object.
4225  * @param key
4226  *   Key of the new entry.
4227  * @param ctx
4228  *   Pointer to new modify-header resource.
4229  *
4230  * @return
4231  *   0 on matching, non-zero otherwise.
4232  */
4233 int
4234 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4235                         struct mlx5_hlist_entry *entry,
4236                         uint64_t key __rte_unused, void *cb_ctx)
4237 {
4238         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4239         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4240         struct mlx5_flow_dv_modify_hdr_resource *resource =
4241                         container_of(entry, typeof(*resource), entry);
4242         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4243
4244         key_len += ref->actions_num * sizeof(ref->actions[0]);
4245         return ref->actions_num != resource->actions_num ||
4246                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4247 }
4248
4249 struct mlx5_hlist_entry *
4250 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4251                          void *cb_ctx)
4252 {
4253         struct mlx5_dev_ctx_shared *sh = list->ctx;
4254         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4255         struct mlx5dv_dr_domain *ns;
4256         struct mlx5_flow_dv_modify_hdr_resource *entry;
4257         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4258         int ret;
4259         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4260         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4261
4262         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4263                             SOCKET_ID_ANY);
4264         if (!entry) {
4265                 rte_flow_error_set(ctx->error, ENOMEM,
4266                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4267                                    "cannot allocate resource memory");
4268                 return NULL;
4269         }
4270         rte_memcpy(&entry->ft_type,
4271                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4272                    key_len + data_len);
4273         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4274                 ns = sh->fdb_domain;
4275         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4276                 ns = sh->tx_domain;
4277         else
4278                 ns = sh->rx_domain;
4279         ret = mlx5_flow_os_create_flow_action_modify_header
4280                                         (sh->ctx, ns, entry,
4281                                          data_len, &entry->action);
4282         if (ret) {
4283                 mlx5_free(entry);
4284                 rte_flow_error_set(ctx->error, ENOMEM,
4285                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4286                                    NULL, "cannot create modification action");
4287                 return NULL;
4288         }
4289         return &entry->entry;
4290 }
4291
4292 /**
4293  * Validate the sample action.
4294  *
4295  * @param[in] action_flags
4296  *   Holds the actions detected until now.
4297  * @param[in] action
4298  *   Pointer to the sample action.
4299  * @param[in] dev
4300  *   Pointer to the Ethernet device structure.
4301  * @param[in] attr
4302  *   Attributes of flow that includes this action.
4303  * @param[out] error
4304  *   Pointer to error structure.
4305  *
4306  * @return
4307  *   0 on success, a negative errno value otherwise and rte_errno is set.
4308  */
4309 static int
4310 flow_dv_validate_action_sample(uint64_t action_flags,
4311                                const struct rte_flow_action *action,
4312                                struct rte_eth_dev *dev,
4313                                const struct rte_flow_attr *attr,
4314                                struct rte_flow_error *error)
4315 {
4316         struct mlx5_priv *priv = dev->data->dev_private;
4317         struct mlx5_dev_config *dev_conf = &priv->config;
4318         const struct rte_flow_action_sample *sample = action->conf;
4319         const struct rte_flow_action *act;
4320         uint64_t sub_action_flags = 0;
4321         uint16_t queue_index = 0xFFFF;
4322         int actions_n = 0;
4323         int ret;
4324         fdb_mirror = 0;
4325
4326         if (!sample)
4327                 return rte_flow_error_set(error, EINVAL,
4328                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4329                                           "configuration cannot be NULL");
4330         if (sample->ratio == 0)
4331                 return rte_flow_error_set(error, EINVAL,
4332                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4333                                           "ratio value starts from 1");
4334         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4335                 return rte_flow_error_set(error, ENOTSUP,
4336                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4337                                           NULL,
4338                                           "sample action not supported");
4339         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4340                 return rte_flow_error_set(error, EINVAL,
4341                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4342                                           "Multiple sample actions not "
4343                                           "supported");
4344         if (action_flags & MLX5_FLOW_ACTION_METER)
4345                 return rte_flow_error_set(error, EINVAL,
4346                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4347                                           "wrong action order, meter should "
4348                                           "be after sample action");
4349         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4350                 return rte_flow_error_set(error, EINVAL,
4351                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4352                                           "wrong action order, jump should "
4353                                           "be after sample action");
4354         act = sample->actions;
4355         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4356                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4357                         return rte_flow_error_set(error, ENOTSUP,
4358                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4359                                                   act, "too many actions");
4360                 switch (act->type) {
4361                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4362                         ret = mlx5_flow_validate_action_queue(act,
4363                                                               sub_action_flags,
4364                                                               dev,
4365                                                               attr, error);
4366                         if (ret < 0)
4367                                 return ret;
4368                         queue_index = ((const struct rte_flow_action_queue *)
4369                                                         (act->conf))->index;
4370                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4371                         ++actions_n;
4372                         break;
4373                 case RTE_FLOW_ACTION_TYPE_MARK:
4374                         ret = flow_dv_validate_action_mark(dev, act,
4375                                                            sub_action_flags,
4376                                                            attr, error);
4377                         if (ret < 0)
4378                                 return ret;
4379                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4380                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4381                                                 MLX5_FLOW_ACTION_MARK_EXT;
4382                         else
4383                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4384                         ++actions_n;
4385                         break;
4386                 case RTE_FLOW_ACTION_TYPE_COUNT:
4387                         ret = flow_dv_validate_action_count(dev, error);
4388                         if (ret < 0)
4389                                 return ret;
4390                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4391                         ++actions_n;
4392                         break;
4393                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4394                         ret = flow_dv_validate_action_port_id(dev,
4395                                                               sub_action_flags,
4396                                                               act,
4397                                                               attr,
4398                                                               error);
4399                         if (ret)
4400                                 return ret;
4401                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4402                         ++actions_n;
4403                         break;
4404                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4405                         ret = flow_dv_validate_action_raw_encap_decap
4406                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4407                                  &actions_n, error);
4408                         if (ret < 0)
4409                                 return ret;
4410                         ++actions_n;
4411                         break;
4412                 default:
4413                         return rte_flow_error_set(error, ENOTSUP,
4414                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4415                                                   NULL,
4416                                                   "Doesn't support optional "
4417                                                   "action");
4418                 }
4419         }
4420         if (attr->ingress && !attr->transfer) {
4421                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4422                         return rte_flow_error_set(error, EINVAL,
4423                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4424                                                   NULL,
4425                                                   "Ingress must has a dest "
4426                                                   "QUEUE for Sample");
4427         } else if (attr->egress && !attr->transfer) {
4428                 return rte_flow_error_set(error, ENOTSUP,
4429                                           RTE_FLOW_ERROR_TYPE_ACTION,
4430                                           NULL,
4431                                           "Sample Only support Ingress "
4432                                           "or E-Switch");
4433         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4434                 MLX5_ASSERT(attr->transfer);
4435                 if (sample->ratio > 1)
4436                         return rte_flow_error_set(error, ENOTSUP,
4437                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4438                                                   NULL,
4439                                                   "E-Switch doesn't support "
4440                                                   "any optional action "
4441                                                   "for sampling");
4442                 fdb_mirror = 1;
4443                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4444                         return rte_flow_error_set(error, ENOTSUP,
4445                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4446                                                   NULL,
4447                                                   "unsupported action QUEUE");
4448                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4449                         return rte_flow_error_set(error, EINVAL,
4450                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4451                                                   NULL,
4452                                                   "E-Switch must has a dest "
4453                                                   "port for mirroring");
4454         }
4455         /* Continue validation for Xcap actions.*/
4456         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4457             (queue_index == 0xFFFF ||
4458              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4459                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4460                      MLX5_FLOW_XCAP_ACTIONS)
4461                         return rte_flow_error_set(error, ENOTSUP,
4462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4463                                                   NULL, "encap and decap "
4464                                                   "combination aren't "
4465                                                   "supported");
4466                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4467                                                         MLX5_FLOW_ACTION_ENCAP))
4468                         return rte_flow_error_set(error, ENOTSUP,
4469                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4470                                                   NULL, "encap is not supported"
4471                                                   " for ingress traffic");
4472         }
4473         return 0;
4474 }
4475
4476 /**
4477  * Find existing modify-header resource or create and register a new one.
4478  *
4479  * @param dev[in, out]
4480  *   Pointer to rte_eth_dev structure.
4481  * @param[in, out] resource
4482  *   Pointer to modify-header resource.
4483  * @parm[in, out] dev_flow
4484  *   Pointer to the dev_flow.
4485  * @param[out] error
4486  *   pointer to error structure.
4487  *
4488  * @return
4489  *   0 on success otherwise -errno and errno is set.
4490  */
4491 static int
4492 flow_dv_modify_hdr_resource_register
4493                         (struct rte_eth_dev *dev,
4494                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4495                          struct mlx5_flow *dev_flow,
4496                          struct rte_flow_error *error)
4497 {
4498         struct mlx5_priv *priv = dev->data->dev_private;
4499         struct mlx5_dev_ctx_shared *sh = priv->sh;
4500         uint32_t key_len = sizeof(*resource) -
4501                            offsetof(typeof(*resource), ft_type) +
4502                            resource->actions_num * sizeof(resource->actions[0]);
4503         struct mlx5_hlist_entry *entry;
4504         struct mlx5_flow_cb_ctx ctx = {
4505                 .error = error,
4506                 .data = resource,
4507         };
4508
4509         resource->flags = dev_flow->dv.group ? 0 :
4510                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4511         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4512                                     resource->flags))
4513                 return rte_flow_error_set(error, EOVERFLOW,
4514                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4515                                           "too many modify header items");
4516         resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4517         entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4518         if (!entry)
4519                 return -rte_errno;
4520         resource = container_of(entry, typeof(*resource), entry);
4521         dev_flow->handle->dvh.modify_hdr = resource;
4522         return 0;
4523 }
4524
4525 /**
4526  * Get DV flow counter by index.
4527  *
4528  * @param[in] dev
4529  *   Pointer to the Ethernet device structure.
4530  * @param[in] idx
4531  *   mlx5 flow counter index in the container.
4532  * @param[out] ppool
4533  *   mlx5 flow counter pool in the container,
4534  *
4535  * @return
4536  *   Pointer to the counter, NULL otherwise.
4537  */
4538 static struct mlx5_flow_counter *
4539 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4540                            uint32_t idx,
4541                            struct mlx5_flow_counter_pool **ppool)
4542 {
4543         struct mlx5_priv *priv = dev->data->dev_private;
4544         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4545         struct mlx5_flow_counter_pool *pool;
4546
4547         /* Decrease to original index and clear shared bit. */
4548         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4549         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4550         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4551         MLX5_ASSERT(pool);
4552         if (ppool)
4553                 *ppool = pool;
4554         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4555 }
4556
4557 /**
4558  * Check the devx counter belongs to the pool.
4559  *
4560  * @param[in] pool
4561  *   Pointer to the counter pool.
4562  * @param[in] id
4563  *   The counter devx ID.
4564  *
4565  * @return
4566  *   True if counter belongs to the pool, false otherwise.
4567  */
4568 static bool
4569 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4570 {
4571         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4572                    MLX5_COUNTERS_PER_POOL;
4573
4574         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4575                 return true;
4576         return false;
4577 }
4578
4579 /**
4580  * Get a pool by devx counter ID.
4581  *
4582  * @param[in] cmng
4583  *   Pointer to the counter management.
4584  * @param[in] id
4585  *   The counter devx ID.
4586  *
4587  * @return
4588  *   The counter pool pointer if exists, NULL otherwise,
4589  */
4590 static struct mlx5_flow_counter_pool *
4591 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4592 {
4593         uint32_t i;
4594         struct mlx5_flow_counter_pool *pool = NULL;
4595
4596         rte_spinlock_lock(&cmng->pool_update_sl);
4597         /* Check last used pool. */
4598         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4599             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4600                 pool = cmng->pools[cmng->last_pool_idx];
4601                 goto out;
4602         }
4603         /* ID out of range means no suitable pool in the container. */
4604         if (id > cmng->max_id || id < cmng->min_id)
4605                 goto out;
4606         /*
4607          * Find the pool from the end of the container, since mostly counter
4608          * ID is sequence increasing, and the last pool should be the needed
4609          * one.
4610          */
4611         i = cmng->n_valid;
4612         while (i--) {
4613                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4614
4615                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4616                         pool = pool_tmp;
4617                         break;
4618                 }
4619         }
4620 out:
4621         rte_spinlock_unlock(&cmng->pool_update_sl);
4622         return pool;
4623 }
4624
4625 /**
4626  * Resize a counter container.
4627  *
4628  * @param[in] dev
4629  *   Pointer to the Ethernet device structure.
4630  *
4631  * @return
4632  *   0 on success, otherwise negative errno value and rte_errno is set.
4633  */
4634 static int
4635 flow_dv_container_resize(struct rte_eth_dev *dev)
4636 {
4637         struct mlx5_priv *priv = dev->data->dev_private;
4638         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4639         void *old_pools = cmng->pools;
4640         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4641         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4642         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4643
4644         if (!pools) {
4645                 rte_errno = ENOMEM;
4646                 return -ENOMEM;
4647         }
4648         if (old_pools)
4649                 memcpy(pools, old_pools, cmng->n *
4650                                        sizeof(struct mlx5_flow_counter_pool *));
4651         cmng->n = resize;
4652         cmng->pools = pools;
4653         if (old_pools)
4654                 mlx5_free(old_pools);
4655         return 0;
4656 }
4657
4658 /**
4659  * Query a devx flow counter.
4660  *
4661  * @param[in] dev
4662  *   Pointer to the Ethernet device structure.
4663  * @param[in] cnt
4664  *   Index to the flow counter.
4665  * @param[out] pkts
4666  *   The statistics value of packets.
4667  * @param[out] bytes
4668  *   The statistics value of bytes.
4669  *
4670  * @return
4671  *   0 on success, otherwise a negative errno value and rte_errno is set.
4672  */
4673 static inline int
4674 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4675                      uint64_t *bytes)
4676 {
4677         struct mlx5_priv *priv = dev->data->dev_private;
4678         struct mlx5_flow_counter_pool *pool = NULL;
4679         struct mlx5_flow_counter *cnt;
4680         int offset;
4681
4682         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4683         MLX5_ASSERT(pool);
4684         if (priv->sh->cmng.counter_fallback)
4685                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4686                                         0, pkts, bytes, 0, NULL, NULL, 0);
4687         rte_spinlock_lock(&pool->sl);
4688         if (!pool->raw) {
4689                 *pkts = 0;
4690                 *bytes = 0;
4691         } else {
4692                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4693                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4694                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4695         }
4696         rte_spinlock_unlock(&pool->sl);
4697         return 0;
4698 }
4699
4700 /**
4701  * Create and initialize a new counter pool.
4702  *
4703  * @param[in] dev
4704  *   Pointer to the Ethernet device structure.
4705  * @param[out] dcs
4706  *   The devX counter handle.
4707  * @param[in] age
4708  *   Whether the pool is for counter that was allocated for aging.
4709  * @param[in/out] cont_cur
4710  *   Pointer to the container pointer, it will be update in pool resize.
4711  *
4712  * @return
4713  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4714  */
4715 static struct mlx5_flow_counter_pool *
4716 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4717                     uint32_t age)
4718 {
4719         struct mlx5_priv *priv = dev->data->dev_private;
4720         struct mlx5_flow_counter_pool *pool;
4721         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4722         bool fallback = priv->sh->cmng.counter_fallback;
4723         uint32_t size = sizeof(*pool);
4724
4725         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4726         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4727         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4728         if (!pool) {
4729                 rte_errno = ENOMEM;
4730                 return NULL;
4731         }
4732         pool->raw = NULL;
4733         pool->is_aged = !!age;
4734         pool->query_gen = 0;
4735         pool->min_dcs = dcs;
4736         rte_spinlock_init(&pool->sl);
4737         rte_spinlock_init(&pool->csl);
4738         TAILQ_INIT(&pool->counters[0]);
4739         TAILQ_INIT(&pool->counters[1]);
4740         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4741         rte_spinlock_lock(&cmng->pool_update_sl);
4742         pool->index = cmng->n_valid;
4743         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4744                 mlx5_free(pool);
4745                 rte_spinlock_unlock(&cmng->pool_update_sl);
4746                 return NULL;
4747         }
4748         cmng->pools[pool->index] = pool;
4749         cmng->n_valid++;
4750         if (unlikely(fallback)) {
4751                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4752
4753                 if (base < cmng->min_id)
4754                         cmng->min_id = base;
4755                 if (base > cmng->max_id)
4756                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4757                 cmng->last_pool_idx = pool->index;
4758         }
4759         rte_spinlock_unlock(&cmng->pool_update_sl);
4760         return pool;
4761 }
4762
4763 /**
4764  * Prepare a new counter and/or a new counter pool.
4765  *
4766  * @param[in] dev
4767  *   Pointer to the Ethernet device structure.
4768  * @param[out] cnt_free
4769  *   Where to put the pointer of a new counter.
4770  * @param[in] age
4771  *   Whether the pool is for counter that was allocated for aging.
4772  *
4773  * @return
4774  *   The counter pool pointer and @p cnt_free is set on success,
4775  *   NULL otherwise and rte_errno is set.
4776  */
4777 static struct mlx5_flow_counter_pool *
4778 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4779                              struct mlx5_flow_counter **cnt_free,
4780                              uint32_t age)
4781 {
4782         struct mlx5_priv *priv = dev->data->dev_private;
4783         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4784         struct mlx5_flow_counter_pool *pool;
4785         struct mlx5_counters tmp_tq;
4786         struct mlx5_devx_obj *dcs = NULL;
4787         struct mlx5_flow_counter *cnt;
4788         enum mlx5_counter_type cnt_type =
4789                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4790         bool fallback = priv->sh->cmng.counter_fallback;
4791         uint32_t i;
4792
4793         if (fallback) {
4794                 /* bulk_bitmap must be 0 for single counter allocation. */
4795                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4796                 if (!dcs)
4797                         return NULL;
4798                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4799                 if (!pool) {
4800                         pool = flow_dv_pool_create(dev, dcs, age);
4801                         if (!pool) {
4802                                 mlx5_devx_cmd_destroy(dcs);
4803                                 return NULL;
4804                         }
4805                 }
4806                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4807                 cnt = MLX5_POOL_GET_CNT(pool, i);
4808                 cnt->pool = pool;
4809                 cnt->dcs_when_free = dcs;
4810                 *cnt_free = cnt;
4811                 return pool;
4812         }
4813         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4814         if (!dcs) {
4815                 rte_errno = ENODATA;
4816                 return NULL;
4817         }
4818         pool = flow_dv_pool_create(dev, dcs, age);
4819         if (!pool) {
4820                 mlx5_devx_cmd_destroy(dcs);
4821                 return NULL;
4822         }
4823         TAILQ_INIT(&tmp_tq);
4824         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4825                 cnt = MLX5_POOL_GET_CNT(pool, i);
4826                 cnt->pool = pool;
4827                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4828         }
4829         rte_spinlock_lock(&cmng->csl[cnt_type]);
4830         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4831         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4832         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4833         (*cnt_free)->pool = pool;
4834         return pool;
4835 }
4836
4837 /**
4838  * Allocate a flow counter.
4839  *
4840  * @param[in] dev
4841  *   Pointer to the Ethernet device structure.
4842  * @param[in] age
4843  *   Whether the counter was allocated for aging.
4844  *
4845  * @return
4846  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4847  */
4848 static uint32_t
4849 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4850 {
4851         struct mlx5_priv *priv = dev->data->dev_private;
4852         struct mlx5_flow_counter_pool *pool = NULL;
4853         struct mlx5_flow_counter *cnt_free = NULL;
4854         bool fallback = priv->sh->cmng.counter_fallback;
4855         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4856         enum mlx5_counter_type cnt_type =
4857                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4858         uint32_t cnt_idx;
4859
4860         if (!priv->config.devx) {
4861                 rte_errno = ENOTSUP;
4862                 return 0;
4863         }
4864         /* Get free counters from container. */
4865         rte_spinlock_lock(&cmng->csl[cnt_type]);
4866         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4867         if (cnt_free)
4868                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4869         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4870         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4871                 goto err;
4872         pool = cnt_free->pool;
4873         if (fallback)
4874                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4875         /* Create a DV counter action only in the first time usage. */
4876         if (!cnt_free->action) {
4877                 uint16_t offset;
4878                 struct mlx5_devx_obj *dcs;
4879                 int ret;
4880
4881                 if (!fallback) {
4882                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4883                         dcs = pool->min_dcs;
4884                 } else {
4885                         offset = 0;
4886                         dcs = cnt_free->dcs_when_free;
4887                 }
4888                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4889                                                             &cnt_free->action);
4890                 if (ret) {
4891                         rte_errno = errno;
4892                         goto err;
4893                 }
4894         }
4895         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4896                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4897         /* Update the counter reset values. */
4898         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4899                                  &cnt_free->bytes))
4900                 goto err;
4901         if (!fallback && !priv->sh->cmng.query_thread_on)
4902                 /* Start the asynchronous batch query by the host thread. */
4903                 mlx5_set_query_alarm(priv->sh);
4904         return cnt_idx;
4905 err:
4906         if (cnt_free) {
4907                 cnt_free->pool = pool;
4908                 if (fallback)
4909                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4910                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4911                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4912                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4913         }
4914         return 0;
4915 }
4916
4917 /**
4918  * Allocate a shared flow counter.
4919  *
4920  * @param[in] ctx
4921  *   Pointer to the shared counter configuration.
4922  * @param[in] data
4923  *   Pointer to save the allocated counter index.
4924  *
4925  * @return
4926  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4927  */
4928
4929 static int32_t
4930 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4931 {
4932         struct mlx5_shared_counter_conf *conf = ctx;
4933         struct rte_eth_dev *dev = conf->dev;
4934         struct mlx5_flow_counter *cnt;
4935
4936         data->dword = flow_dv_counter_alloc(dev, 0);
4937         data->dword |= MLX5_CNT_SHARED_OFFSET;
4938         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4939         cnt->shared_info.id = conf->id;
4940         return 0;
4941 }
4942
4943 /**
4944  * Get a shared flow counter.
4945  *
4946  * @param[in] dev
4947  *   Pointer to the Ethernet device structure.
4948  * @param[in] id
4949  *   Counter identifier.
4950  *
4951  * @return
4952  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4953  */
4954 static uint32_t
4955 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4956 {
4957         struct mlx5_priv *priv = dev->data->dev_private;
4958         struct mlx5_shared_counter_conf conf = {
4959                 .dev = dev,
4960                 .id = id,
4961         };
4962         union mlx5_l3t_data data = {
4963                 .dword = 0,
4964         };
4965
4966         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
4967                                flow_dv_counter_alloc_shared_cb, &conf);
4968         return data.dword;
4969 }
4970
4971 /**
4972  * Get age param from counter index.
4973  *
4974  * @param[in] dev
4975  *   Pointer to the Ethernet device structure.
4976  * @param[in] counter
4977  *   Index to the counter handler.
4978  *
4979  * @return
4980  *   The aging parameter specified for the counter index.
4981  */
4982 static struct mlx5_age_param*
4983 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4984                                 uint32_t counter)
4985 {
4986         struct mlx5_flow_counter *cnt;
4987         struct mlx5_flow_counter_pool *pool = NULL;
4988
4989         flow_dv_counter_get_by_idx(dev, counter, &pool);
4990         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4991         cnt = MLX5_POOL_GET_CNT(pool, counter);
4992         return MLX5_CNT_TO_AGE(cnt);
4993 }
4994
4995 /**
4996  * Remove a flow counter from aged counter list.
4997  *
4998  * @param[in] dev
4999  *   Pointer to the Ethernet device structure.
5000  * @param[in] counter
5001  *   Index to the counter handler.
5002  * @param[in] cnt
5003  *   Pointer to the counter handler.
5004  */
5005 static void
5006 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5007                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5008 {
5009         struct mlx5_age_info *age_info;
5010         struct mlx5_age_param *age_param;
5011         struct mlx5_priv *priv = dev->data->dev_private;
5012         uint16_t expected = AGE_CANDIDATE;
5013
5014         age_info = GET_PORT_AGE_INFO(priv);
5015         age_param = flow_dv_counter_idx_get_age(dev, counter);
5016         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5017                                          AGE_FREE, false, __ATOMIC_RELAXED,
5018                                          __ATOMIC_RELAXED)) {
5019                 /**
5020                  * We need the lock even it is age timeout,
5021                  * since counter may still in process.
5022                  */
5023                 rte_spinlock_lock(&age_info->aged_sl);
5024                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5025                 rte_spinlock_unlock(&age_info->aged_sl);
5026                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5027         }
5028 }
5029
5030 /**
5031  * Release a flow counter.
5032  *
5033  * @param[in] dev
5034  *   Pointer to the Ethernet device structure.
5035  * @param[in] counter
5036  *   Index to the counter handler.
5037  */
5038 static void
5039 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5040 {
5041         struct mlx5_priv *priv = dev->data->dev_private;
5042         struct mlx5_flow_counter_pool *pool = NULL;
5043         struct mlx5_flow_counter *cnt;
5044         enum mlx5_counter_type cnt_type;
5045
5046         if (!counter)
5047                 return;
5048         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5049         MLX5_ASSERT(pool);
5050         if (IS_SHARED_CNT(counter) &&
5051             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5052                 return;
5053         if (pool->is_aged)
5054                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5055         cnt->pool = pool;
5056         /*
5057          * Put the counter back to list to be updated in none fallback mode.
5058          * Currently, we are using two list alternately, while one is in query,
5059          * add the freed counter to the other list based on the pool query_gen
5060          * value. After query finishes, add counter the list to the global
5061          * container counter list. The list changes while query starts. In
5062          * this case, lock will not be needed as query callback and release
5063          * function both operate with the different list.
5064          *
5065          */
5066         if (!priv->sh->cmng.counter_fallback) {
5067                 rte_spinlock_lock(&pool->csl);
5068                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5069                 rte_spinlock_unlock(&pool->csl);
5070         } else {
5071                 cnt->dcs_when_free = cnt->dcs_when_active;
5072                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5073                                            MLX5_COUNTER_TYPE_ORIGIN;
5074                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5075                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5076                                   cnt, next);
5077                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5078         }
5079 }
5080
5081 /**
5082  * Verify the @p attributes will be correctly understood by the NIC and store
5083  * them in the @p flow if everything is correct.
5084  *
5085  * @param[in] dev
5086  *   Pointer to dev struct.
5087  * @param[in] attributes
5088  *   Pointer to flow attributes
5089  * @param[in] external
5090  *   This flow rule is created by request external to PMD.
5091  * @param[out] error
5092  *   Pointer to error structure.
5093  *
5094  * @return
5095  *   - 0 on success and non root table.
5096  *   - 1 on success and root table.
5097  *   - a negative errno value otherwise and rte_errno is set.
5098  */
5099 static int
5100 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5101                             const struct mlx5_flow_tunnel *tunnel,
5102                             const struct rte_flow_attr *attributes,
5103                             struct flow_grp_info grp_info,
5104                             struct rte_flow_error *error)
5105 {
5106         struct mlx5_priv *priv = dev->data->dev_private;
5107         uint32_t priority_max = priv->config.flow_prio - 1;
5108         int ret = 0;
5109
5110 #ifndef HAVE_MLX5DV_DR
5111         RTE_SET_USED(tunnel);
5112         RTE_SET_USED(grp_info);
5113         if (attributes->group)
5114                 return rte_flow_error_set(error, ENOTSUP,
5115                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5116                                           NULL,
5117                                           "groups are not supported");
5118 #else
5119         uint32_t table = 0;
5120
5121         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5122                                        grp_info, error);
5123         if (ret)
5124                 return ret;
5125         if (!table)
5126                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5127 #endif
5128         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5129             attributes->priority >= priority_max)
5130                 return rte_flow_error_set(error, ENOTSUP,
5131                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5132                                           NULL,
5133                                           "priority out of range");
5134         if (attributes->transfer) {
5135                 if (!priv->config.dv_esw_en)
5136                         return rte_flow_error_set
5137                                 (error, ENOTSUP,
5138                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5139                                  "E-Switch dr is not supported");
5140                 if (!(priv->representor || priv->master))
5141                         return rte_flow_error_set
5142                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5143                                  NULL, "E-Switch configuration can only be"
5144                                  " done by a master or a representor device");
5145                 if (attributes->egress)
5146                         return rte_flow_error_set
5147                                 (error, ENOTSUP,
5148                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5149                                  "egress is not supported");
5150         }
5151         if (!(attributes->egress ^ attributes->ingress))
5152                 return rte_flow_error_set(error, ENOTSUP,
5153                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5154                                           "must specify exactly one of "
5155                                           "ingress or egress");
5156         return ret;
5157 }
5158
5159 /**
5160  * Internal validation function. For validating both actions and items.
5161  *
5162  * @param[in] dev
5163  *   Pointer to the rte_eth_dev structure.
5164  * @param[in] attr
5165  *   Pointer to the flow attributes.
5166  * @param[in] items
5167  *   Pointer to the list of items.
5168  * @param[in] actions
5169  *   Pointer to the list of actions.
5170  * @param[in] external
5171  *   This flow rule is created by request external to PMD.
5172  * @param[in] hairpin
5173  *   Number of hairpin TX actions, 0 means classic flow.
5174  * @param[out] error
5175  *   Pointer to the error structure.
5176  *
5177  * @return
5178  *   0 on success, a negative errno value otherwise and rte_errno is set.
5179  */
5180 static int
5181 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5182                  const struct rte_flow_item items[],
5183                  const struct rte_flow_action actions[],
5184                  bool external, int hairpin, struct rte_flow_error *error)
5185 {
5186         int ret;
5187         uint64_t action_flags = 0;
5188         uint64_t item_flags = 0;
5189         uint64_t last_item = 0;
5190         uint8_t next_protocol = 0xff;
5191         uint16_t ether_type = 0;
5192         int actions_n = 0;
5193         uint8_t item_ipv6_proto = 0;
5194         const struct rte_flow_item *gre_item = NULL;
5195         const struct rte_flow_action_raw_decap *decap;
5196         const struct rte_flow_action_raw_encap *encap;
5197         const struct rte_flow_action_rss *rss;
5198         const struct rte_flow_item_tcp nic_tcp_mask = {
5199                 .hdr = {
5200                         .tcp_flags = 0xFF,
5201                         .src_port = RTE_BE16(UINT16_MAX),
5202                         .dst_port = RTE_BE16(UINT16_MAX),
5203                 }
5204         };
5205         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5206                 .hdr = {
5207                         .src_addr =
5208                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5209                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5210                         .dst_addr =
5211                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5212                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5213                         .vtc_flow = RTE_BE32(0xffffffff),
5214                         .proto = 0xff,
5215                         .hop_limits = 0xff,
5216                 },
5217                 .has_frag_ext = 1,
5218         };
5219         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5220                 .hdr = {
5221                         .common = {
5222                                 .u32 =
5223                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5224                                         .type = 0xFF,
5225                                         }).u32),
5226                         },
5227                         .dummy[0] = 0xffffffff,
5228                 },
5229         };
5230         struct mlx5_priv *priv = dev->data->dev_private;
5231         struct mlx5_dev_config *dev_conf = &priv->config;
5232         uint16_t queue_index = 0xFFFF;
5233         const struct rte_flow_item_vlan *vlan_m = NULL;
5234         int16_t rw_act_num = 0;
5235         uint64_t is_root;
5236         const struct mlx5_flow_tunnel *tunnel;
5237         struct flow_grp_info grp_info = {
5238                 .external = !!external,
5239                 .transfer = !!attr->transfer,
5240                 .fdb_def_rule = !!priv->fdb_def_rule,
5241         };
5242         const struct rte_eth_hairpin_conf *conf;
5243
5244         if (items == NULL)
5245                 return -1;
5246         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5247                 tunnel = flow_items_to_tunnel(items);
5248                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5249                                 MLX5_FLOW_ACTION_DECAP;
5250         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5251                 tunnel = flow_actions_to_tunnel(actions);
5252                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5253         } else {
5254                 tunnel = NULL;
5255         }
5256         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5257                                 (dev, tunnel, attr, items, actions);
5258         ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5259         if (ret < 0)
5260                 return ret;
5261         is_root = (uint64_t)ret;
5262         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5263                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5264                 int type = items->type;
5265
5266                 if (!mlx5_flow_os_item_supported(type))
5267                         return rte_flow_error_set(error, ENOTSUP,
5268                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5269                                                   NULL, "item not supported");
5270                 switch (type) {
5271                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5272                         if (items[0].type != (typeof(items[0].type))
5273                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5274                                 return rte_flow_error_set
5275                                                 (error, EINVAL,
5276                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5277                                                 NULL, "MLX5 private items "
5278                                                 "must be the first");
5279                         break;
5280                 case RTE_FLOW_ITEM_TYPE_VOID:
5281                         break;
5282                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5283                         ret = flow_dv_validate_item_port_id
5284                                         (dev, items, attr, item_flags, error);
5285                         if (ret < 0)
5286                                 return ret;
5287                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5288                         break;
5289                 case RTE_FLOW_ITEM_TYPE_ETH:
5290                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5291                                                           true, error);
5292                         if (ret < 0)
5293                                 return ret;
5294                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5295                                              MLX5_FLOW_LAYER_OUTER_L2;
5296                         if (items->mask != NULL && items->spec != NULL) {
5297                                 ether_type =
5298                                         ((const struct rte_flow_item_eth *)
5299                                          items->spec)->type;
5300                                 ether_type &=
5301                                         ((const struct rte_flow_item_eth *)
5302                                          items->mask)->type;
5303                                 ether_type = rte_be_to_cpu_16(ether_type);
5304                         } else {
5305                                 ether_type = 0;
5306                         }
5307                         break;
5308                 case RTE_FLOW_ITEM_TYPE_VLAN:
5309                         ret = flow_dv_validate_item_vlan(items, item_flags,
5310                                                          dev, error);
5311                         if (ret < 0)
5312                                 return ret;
5313                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5314                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5315                         if (items->mask != NULL && items->spec != NULL) {
5316                                 ether_type =
5317                                         ((const struct rte_flow_item_vlan *)
5318                                          items->spec)->inner_type;
5319                                 ether_type &=
5320                                         ((const struct rte_flow_item_vlan *)
5321                                          items->mask)->inner_type;
5322                                 ether_type = rte_be_to_cpu_16(ether_type);
5323                         } else {
5324                                 ether_type = 0;
5325                         }
5326                         /* Store outer VLAN mask for of_push_vlan action. */
5327                         if (!tunnel)
5328                                 vlan_m = items->mask;
5329                         break;
5330                 case RTE_FLOW_ITEM_TYPE_IPV4:
5331                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5332                                                   &item_flags, &tunnel);
5333                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5334                                                          last_item, ether_type,
5335                                                          error);
5336                         if (ret < 0)
5337                                 return ret;
5338                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5339                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5340                         if (items->mask != NULL &&
5341                             ((const struct rte_flow_item_ipv4 *)
5342                              items->mask)->hdr.next_proto_id) {
5343                                 next_protocol =
5344                                         ((const struct rte_flow_item_ipv4 *)
5345                                          (items->spec))->hdr.next_proto_id;
5346                                 next_protocol &=
5347                                         ((const struct rte_flow_item_ipv4 *)
5348                                          (items->mask))->hdr.next_proto_id;
5349                         } else {
5350                                 /* Reset for inner layer. */
5351                                 next_protocol = 0xff;
5352                         }
5353                         break;
5354                 case RTE_FLOW_ITEM_TYPE_IPV6:
5355                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5356                                                   &item_flags, &tunnel);
5357                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5358                                                            last_item,
5359                                                            ether_type,
5360                                                            &nic_ipv6_mask,
5361                                                            error);
5362                         if (ret < 0)
5363                                 return ret;
5364                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5365                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5366                         if (items->mask != NULL &&
5367                             ((const struct rte_flow_item_ipv6 *)
5368                              items->mask)->hdr.proto) {
5369                                 item_ipv6_proto =
5370                                         ((const struct rte_flow_item_ipv6 *)
5371                                          items->spec)->hdr.proto;
5372                                 next_protocol =
5373                                         ((const struct rte_flow_item_ipv6 *)
5374                                          items->spec)->hdr.proto;
5375                                 next_protocol &=
5376                                         ((const struct rte_flow_item_ipv6 *)
5377                                          items->mask)->hdr.proto;
5378                         } else {
5379                                 /* Reset for inner layer. */
5380                                 next_protocol = 0xff;
5381                         }
5382                         break;
5383                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5384                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5385                                                                   item_flags,
5386                                                                   error);
5387                         if (ret < 0)
5388                                 return ret;
5389                         last_item = tunnel ?
5390                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5391                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5392                         if (items->mask != NULL &&
5393                             ((const struct rte_flow_item_ipv6_frag_ext *)
5394                              items->mask)->hdr.next_header) {
5395                                 next_protocol =
5396                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5397                                  items->spec)->hdr.next_header;
5398                                 next_protocol &=
5399                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5400                                  items->mask)->hdr.next_header;
5401                         } else {
5402                                 /* Reset for inner layer. */
5403                                 next_protocol = 0xff;
5404                         }
5405                         break;
5406                 case RTE_FLOW_ITEM_TYPE_TCP:
5407                         ret = mlx5_flow_validate_item_tcp
5408                                                 (items, item_flags,
5409                                                  next_protocol,
5410                                                  &nic_tcp_mask,
5411                                                  error);
5412                         if (ret < 0)
5413                                 return ret;
5414                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5415                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5416                         break;
5417                 case RTE_FLOW_ITEM_TYPE_UDP:
5418                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5419                                                           next_protocol,
5420                                                           error);
5421                         if (ret < 0)
5422                                 return ret;
5423                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5424                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5425                         break;
5426                 case RTE_FLOW_ITEM_TYPE_GRE:
5427                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5428                                                           next_protocol, error);
5429                         if (ret < 0)
5430                                 return ret;
5431                         gre_item = items;
5432                         last_item = MLX5_FLOW_LAYER_GRE;
5433                         break;
5434                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5435                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5436                                                             next_protocol,
5437                                                             error);
5438                         if (ret < 0)
5439                                 return ret;
5440                         last_item = MLX5_FLOW_LAYER_NVGRE;
5441                         break;
5442                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5443                         ret = mlx5_flow_validate_item_gre_key
5444                                 (items, item_flags, gre_item, error);
5445                         if (ret < 0)
5446                                 return ret;
5447                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5448                         break;
5449                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5450                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5451                                                             error);
5452                         if (ret < 0)
5453                                 return ret;
5454                         last_item = MLX5_FLOW_LAYER_VXLAN;
5455                         break;
5456                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5457                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5458                                                                 item_flags, dev,
5459                                                                 error);
5460                         if (ret < 0)
5461                                 return ret;
5462                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5463                         break;
5464                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5465                         ret = mlx5_flow_validate_item_geneve(items,
5466                                                              item_flags, dev,
5467                                                              error);
5468                         if (ret < 0)
5469                                 return ret;
5470                         last_item = MLX5_FLOW_LAYER_GENEVE;
5471                         break;
5472                 case RTE_FLOW_ITEM_TYPE_MPLS:
5473                         ret = mlx5_flow_validate_item_mpls(dev, items,
5474                                                            item_flags,
5475                                                            last_item, error);
5476                         if (ret < 0)
5477                                 return ret;
5478                         last_item = MLX5_FLOW_LAYER_MPLS;
5479                         break;
5480
5481                 case RTE_FLOW_ITEM_TYPE_MARK:
5482                         ret = flow_dv_validate_item_mark(dev, items, attr,
5483                                                          error);
5484                         if (ret < 0)
5485                                 return ret;
5486                         last_item = MLX5_FLOW_ITEM_MARK;
5487                         break;
5488                 case RTE_FLOW_ITEM_TYPE_META:
5489                         ret = flow_dv_validate_item_meta(dev, items, attr,
5490                                                          error);
5491                         if (ret < 0)
5492                                 return ret;
5493                         last_item = MLX5_FLOW_ITEM_METADATA;
5494                         break;
5495                 case RTE_FLOW_ITEM_TYPE_ICMP:
5496                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5497                                                            next_protocol,
5498                                                            error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         last_item = MLX5_FLOW_LAYER_ICMP;
5502                         break;
5503                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5504                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5505                                                             next_protocol,
5506                                                             error);
5507                         if (ret < 0)
5508                                 return ret;
5509                         item_ipv6_proto = IPPROTO_ICMPV6;
5510                         last_item = MLX5_FLOW_LAYER_ICMP6;
5511                         break;
5512                 case RTE_FLOW_ITEM_TYPE_TAG:
5513                         ret = flow_dv_validate_item_tag(dev, items,
5514                                                         attr, error);
5515                         if (ret < 0)
5516                                 return ret;
5517                         last_item = MLX5_FLOW_ITEM_TAG;
5518                         break;
5519                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5520                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5521                         break;
5522                 case RTE_FLOW_ITEM_TYPE_GTP:
5523                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5524                                                         error);
5525                         if (ret < 0)
5526                                 return ret;
5527                         last_item = MLX5_FLOW_LAYER_GTP;
5528                         break;
5529                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5530                         /* Capacity will be checked in the translate stage. */
5531                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5532                                                             last_item,
5533                                                             ether_type,
5534                                                             &nic_ecpri_mask,
5535                                                             error);
5536                         if (ret < 0)
5537                                 return ret;
5538                         last_item = MLX5_FLOW_LAYER_ECPRI;
5539                         break;
5540                 default:
5541                         return rte_flow_error_set(error, ENOTSUP,
5542                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5543                                                   NULL, "item not supported");
5544                 }
5545                 item_flags |= last_item;
5546         }
5547         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5548                 int type = actions->type;
5549
5550                 if (!mlx5_flow_os_action_supported(type))
5551                         return rte_flow_error_set(error, ENOTSUP,
5552                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5553                                                   actions,
5554                                                   "action not supported");
5555                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5556                         return rte_flow_error_set(error, ENOTSUP,
5557                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5558                                                   actions, "too many actions");
5559                 switch (type) {
5560                 case RTE_FLOW_ACTION_TYPE_VOID:
5561                         break;
5562                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5563                         ret = flow_dv_validate_action_port_id(dev,
5564                                                               action_flags,
5565                                                               actions,
5566                                                               attr,
5567                                                               error);
5568                         if (ret)
5569                                 return ret;
5570                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5571                         ++actions_n;
5572                         break;
5573                 case RTE_FLOW_ACTION_TYPE_FLAG:
5574                         ret = flow_dv_validate_action_flag(dev, action_flags,
5575                                                            attr, error);
5576                         if (ret < 0)
5577                                 return ret;
5578                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5579                                 /* Count all modify-header actions as one. */
5580                                 if (!(action_flags &
5581                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5582                                         ++actions_n;
5583                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5584                                                 MLX5_FLOW_ACTION_MARK_EXT;
5585                         } else {
5586                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5587                                 ++actions_n;
5588                         }
5589                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5590                         break;
5591                 case RTE_FLOW_ACTION_TYPE_MARK:
5592                         ret = flow_dv_validate_action_mark(dev, actions,
5593                                                            action_flags,
5594                                                            attr, error);
5595                         if (ret < 0)
5596                                 return ret;
5597                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5598                                 /* Count all modify-header actions as one. */
5599                                 if (!(action_flags &
5600                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5601                                         ++actions_n;
5602                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5603                                                 MLX5_FLOW_ACTION_MARK_EXT;
5604                         } else {
5605                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5606                                 ++actions_n;
5607                         }
5608                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5609                         break;
5610                 case RTE_FLOW_ACTION_TYPE_SET_META:
5611                         ret = flow_dv_validate_action_set_meta(dev, actions,
5612                                                                action_flags,
5613                                                                attr, error);
5614                         if (ret < 0)
5615                                 return ret;
5616                         /* Count all modify-header actions as one action. */
5617                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5618                                 ++actions_n;
5619                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5620                         rw_act_num += MLX5_ACT_NUM_SET_META;
5621                         break;
5622                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5623                         ret = flow_dv_validate_action_set_tag(dev, actions,
5624                                                               action_flags,
5625                                                               attr, error);
5626                         if (ret < 0)
5627                                 return ret;
5628                         /* Count all modify-header actions as one action. */
5629                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5630                                 ++actions_n;
5631                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5632                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5633                         break;
5634                 case RTE_FLOW_ACTION_TYPE_DROP:
5635                         ret = mlx5_flow_validate_action_drop(action_flags,
5636                                                              attr, error);
5637                         if (ret < 0)
5638                                 return ret;
5639                         action_flags |= MLX5_FLOW_ACTION_DROP;
5640                         ++actions_n;
5641                         break;
5642                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5643                         ret = mlx5_flow_validate_action_queue(actions,
5644                                                               action_flags, dev,
5645                                                               attr, error);
5646                         if (ret < 0)
5647                                 return ret;
5648                         queue_index = ((const struct rte_flow_action_queue *)
5649                                                         (actions->conf))->index;
5650                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5651                         ++actions_n;
5652                         break;
5653                 case RTE_FLOW_ACTION_TYPE_RSS:
5654                         rss = actions->conf;
5655                         ret = mlx5_flow_validate_action_rss(actions,
5656                                                             action_flags, dev,
5657                                                             attr, item_flags,
5658                                                             error);
5659                         if (ret < 0)
5660                                 return ret;
5661                         if (rss != NULL && rss->queue_num)
5662                                 queue_index = rss->queue[0];
5663                         action_flags |= MLX5_FLOW_ACTION_RSS;
5664                         ++actions_n;
5665                         break;
5666                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5667                         ret =
5668                         mlx5_flow_validate_action_default_miss(action_flags,
5669                                         attr, error);
5670                         if (ret < 0)
5671                                 return ret;
5672                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5673                         ++actions_n;
5674                         break;
5675                 case RTE_FLOW_ACTION_TYPE_COUNT:
5676                         ret = flow_dv_validate_action_count(dev, error);
5677                         if (ret < 0)
5678                                 return ret;
5679                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5680                         ++actions_n;
5681                         break;
5682                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5683                         if (flow_dv_validate_action_pop_vlan(dev,
5684                                                              action_flags,
5685                                                              actions,
5686                                                              item_flags, attr,
5687                                                              error))
5688                                 return -rte_errno;
5689                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5690                         ++actions_n;
5691                         break;
5692                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5693                         ret = flow_dv_validate_action_push_vlan(dev,
5694                                                                 action_flags,
5695                                                                 vlan_m,
5696                                                                 actions, attr,
5697                                                                 error);
5698                         if (ret < 0)
5699                                 return ret;
5700                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5701                         ++actions_n;
5702                         break;
5703                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5704                         ret = flow_dv_validate_action_set_vlan_pcp
5705                                                 (action_flags, actions, error);
5706                         if (ret < 0)
5707                                 return ret;
5708                         /* Count PCP with push_vlan command. */
5709                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5710                         break;
5711                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5712                         ret = flow_dv_validate_action_set_vlan_vid
5713                                                 (item_flags, action_flags,
5714                                                  actions, error);
5715                         if (ret < 0)
5716                                 return ret;
5717                         /* Count VID with push_vlan command. */
5718                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5719                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5720                         break;
5721                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5722                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5723                         ret = flow_dv_validate_action_l2_encap(dev,
5724                                                                action_flags,
5725                                                                actions, attr,
5726                                                                error);
5727                         if (ret < 0)
5728                                 return ret;
5729                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5730                         ++actions_n;
5731                         break;
5732                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5733                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5734                         ret = flow_dv_validate_action_decap(dev, action_flags,
5735                                                             attr, error);
5736                         if (ret < 0)
5737                                 return ret;
5738                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5739                         ++actions_n;
5740                         break;
5741                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5742                         ret = flow_dv_validate_action_raw_encap_decap
5743                                 (dev, NULL, actions->conf, attr, &action_flags,
5744                                  &actions_n, error);
5745                         if (ret < 0)
5746                                 return ret;
5747                         break;
5748                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5749                         decap = actions->conf;
5750                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5751                                 ;
5752                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5753                                 encap = NULL;
5754                                 actions--;
5755                         } else {
5756                                 encap = actions->conf;
5757                         }
5758                         ret = flow_dv_validate_action_raw_encap_decap
5759                                            (dev,
5760                                             decap ? decap : &empty_decap, encap,
5761                                             attr, &action_flags, &actions_n,
5762                                             error);
5763                         if (ret < 0)
5764                                 return ret;
5765                         break;
5766                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5767                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5768                         ret = flow_dv_validate_action_modify_mac(action_flags,
5769                                                                  actions,
5770                                                                  item_flags,
5771                                                                  error);
5772                         if (ret < 0)
5773                                 return ret;
5774                         /* Count all modify-header actions as one action. */
5775                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5776                                 ++actions_n;
5777                         action_flags |= actions->type ==
5778                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5779                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5780                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5781                         /*
5782                          * Even if the source and destination MAC addresses have
5783                          * overlap in the header with 4B alignment, the convert
5784                          * function will handle them separately and 4 SW actions
5785                          * will be created. And 2 actions will be added each
5786                          * time no matter how many bytes of address will be set.
5787                          */
5788                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5789                         break;
5790                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5791                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5792                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5793                                                                   actions,
5794                                                                   item_flags,
5795                                                                   error);
5796                         if (ret < 0)
5797                                 return ret;
5798                         /* Count all modify-header actions as one action. */
5799                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5800                                 ++actions_n;
5801                         action_flags |= actions->type ==
5802                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5803                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5804                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5805                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5806                         break;
5807                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5808                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5809                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5810                                                                   actions,
5811                                                                   item_flags,
5812                                                                   error);
5813                         if (ret < 0)
5814                                 return ret;
5815                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5816                                 return rte_flow_error_set(error, ENOTSUP,
5817                                         RTE_FLOW_ERROR_TYPE_ACTION,
5818                                         actions,
5819                                         "Can't change header "
5820                                         "with ICMPv6 proto");
5821                         /* Count all modify-header actions as one action. */
5822                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5823                                 ++actions_n;
5824                         action_flags |= actions->type ==
5825                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5826                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5827                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5828                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5829                         break;
5830                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5831                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5832                         ret = flow_dv_validate_action_modify_tp(action_flags,
5833                                                                 actions,
5834                                                                 item_flags,
5835                                                                 error);
5836                         if (ret < 0)
5837                                 return ret;
5838                         /* Count all modify-header actions as one action. */
5839                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5840                                 ++actions_n;
5841                         action_flags |= actions->type ==
5842                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5843                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5844                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5845                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5846                         break;
5847                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5848                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5849                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5850                                                                  actions,
5851                                                                  item_flags,
5852                                                                  error);
5853                         if (ret < 0)
5854                                 return ret;
5855                         /* Count all modify-header actions as one action. */
5856                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5857                                 ++actions_n;
5858                         action_flags |= actions->type ==
5859                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5860                                                 MLX5_FLOW_ACTION_SET_TTL :
5861                                                 MLX5_FLOW_ACTION_DEC_TTL;
5862                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5863                         break;
5864                 case RTE_FLOW_ACTION_TYPE_JUMP:
5865                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5866                                                            action_flags,
5867                                                            attr, external,
5868                                                            error);
5869                         if (ret)
5870                                 return ret;
5871                         ++actions_n;
5872                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5873                         break;
5874                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5875                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5876                         ret = flow_dv_validate_action_modify_tcp_seq
5877                                                                 (action_flags,
5878                                                                  actions,
5879                                                                  item_flags,
5880                                                                  error);
5881                         if (ret < 0)
5882                                 return ret;
5883                         /* Count all modify-header actions as one action. */
5884                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5885                                 ++actions_n;
5886                         action_flags |= actions->type ==
5887                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5888                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5889                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5890                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5891                         break;
5892                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5893                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5894                         ret = flow_dv_validate_action_modify_tcp_ack
5895                                                                 (action_flags,
5896                                                                  actions,
5897                                                                  item_flags,
5898                                                                  error);
5899                         if (ret < 0)
5900                                 return ret;
5901                         /* Count all modify-header actions as one action. */
5902                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5903                                 ++actions_n;
5904                         action_flags |= actions->type ==
5905                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5906                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5907                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5908                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5909                         break;
5910                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5911                         break;
5912                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5913                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5914                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5915                         break;
5916                 case RTE_FLOW_ACTION_TYPE_METER:
5917                         ret = mlx5_flow_validate_action_meter(dev,
5918                                                               action_flags,
5919                                                               actions, attr,
5920                                                               error);
5921                         if (ret < 0)
5922                                 return ret;
5923                         action_flags |= MLX5_FLOW_ACTION_METER;
5924                         ++actions_n;
5925                         /* Meter action will add one more TAG action. */
5926                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5927                         break;
5928                 case RTE_FLOW_ACTION_TYPE_AGE:
5929                         ret = flow_dv_validate_action_age(action_flags,
5930                                                           actions, dev,
5931                                                           error);
5932                         if (ret < 0)
5933                                 return ret;
5934                         action_flags |= MLX5_FLOW_ACTION_AGE;
5935                         ++actions_n;
5936                         break;
5937                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5938                         ret = flow_dv_validate_action_modify_ipv4_dscp
5939                                                          (action_flags,
5940                                                           actions,
5941                                                           item_flags,
5942                                                           error);
5943                         if (ret < 0)
5944                                 return ret;
5945                         /* Count all modify-header actions as one action. */
5946                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5947                                 ++actions_n;
5948                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5949                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5950                         break;
5951                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5952                         ret = flow_dv_validate_action_modify_ipv6_dscp
5953                                                                 (action_flags,
5954                                                                  actions,
5955                                                                  item_flags,
5956                                                                  error);
5957                         if (ret < 0)
5958                                 return ret;
5959                         /* Count all modify-header actions as one action. */
5960                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5961                                 ++actions_n;
5962                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5963                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5964                         break;
5965                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
5966                         ret = flow_dv_validate_action_sample(action_flags,
5967                                                              actions, dev,
5968                                                              attr, error);
5969                         if (ret < 0)
5970                                 return ret;
5971                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
5972                         ++actions_n;
5973                         break;
5974                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
5975                         if (actions[0].type != (typeof(actions[0].type))
5976                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
5977                                 return rte_flow_error_set
5978                                                 (error, EINVAL,
5979                                                 RTE_FLOW_ERROR_TYPE_ACTION,
5980                                                 NULL, "MLX5 private action "
5981                                                 "must be the first");
5982
5983                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5984                         break;
5985                 default:
5986                         return rte_flow_error_set(error, ENOTSUP,
5987                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5988                                                   actions,
5989                                                   "action not supported");
5990                 }
5991         }
5992         /*
5993          * Validate actions in flow rules
5994          * - Explicit decap action is prohibited by the tunnel offload API.
5995          * - Drop action in tunnel steer rule is prohibited by the API.
5996          * - Application cannot use MARK action because it's value can mask
5997          *   tunnel default miss nitification.
5998          * - JUMP in tunnel match rule has no support in current PMD
5999          *   implementation.
6000          * - TAG & META are reserved for future uses.
6001          */
6002         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6003                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6004                                             MLX5_FLOW_ACTION_MARK     |
6005                                             MLX5_FLOW_ACTION_SET_TAG  |
6006                                             MLX5_FLOW_ACTION_SET_META |
6007                                             MLX5_FLOW_ACTION_DROP;
6008
6009                 if (action_flags & bad_actions_mask)
6010                         return rte_flow_error_set
6011                                         (error, EINVAL,
6012                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6013                                         "Invalid RTE action in tunnel "
6014                                         "set decap rule");
6015                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6016                         return rte_flow_error_set
6017                                         (error, EINVAL,
6018                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6019                                         "tunnel set decap rule must terminate "
6020                                         "with JUMP");
6021                 if (!attr->ingress)
6022                         return rte_flow_error_set
6023                                         (error, EINVAL,
6024                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6025                                         "tunnel flows for ingress traffic only");
6026         }
6027         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6028                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6029                                             MLX5_FLOW_ACTION_MARK    |
6030                                             MLX5_FLOW_ACTION_SET_TAG |
6031                                             MLX5_FLOW_ACTION_SET_META;
6032
6033                 if (action_flags & bad_actions_mask)
6034                         return rte_flow_error_set
6035                                         (error, EINVAL,
6036                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6037                                         "Invalid RTE action in tunnel "
6038                                         "set match rule");
6039         }
6040         /*
6041          * Validate the drop action mutual exclusion with other actions.
6042          * Drop action is mutually-exclusive with any other action, except for
6043          * Count action.
6044          */
6045         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6046             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6047                 return rte_flow_error_set(error, EINVAL,
6048                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6049                                           "Drop action is mutually-exclusive "
6050                                           "with any other action, except for "
6051                                           "Count action");
6052         /* Eswitch has few restrictions on using items and actions */
6053         if (attr->transfer) {
6054                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6055                     action_flags & MLX5_FLOW_ACTION_FLAG)
6056                         return rte_flow_error_set(error, ENOTSUP,
6057                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6058                                                   NULL,
6059                                                   "unsupported action FLAG");
6060                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6061                     action_flags & MLX5_FLOW_ACTION_MARK)
6062                         return rte_flow_error_set(error, ENOTSUP,
6063                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6064                                                   NULL,
6065                                                   "unsupported action MARK");
6066                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6067                         return rte_flow_error_set(error, ENOTSUP,
6068                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6069                                                   NULL,
6070                                                   "unsupported action QUEUE");
6071                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6072                         return rte_flow_error_set(error, ENOTSUP,
6073                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6074                                                   NULL,
6075                                                   "unsupported action RSS");
6076                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6077                         return rte_flow_error_set(error, EINVAL,
6078                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6079                                                   actions,
6080                                                   "no fate action is found");
6081         } else {
6082                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6083                         return rte_flow_error_set(error, EINVAL,
6084                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6085                                                   actions,
6086                                                   "no fate action is found");
6087         }
6088         /*
6089          * Continue validation for Xcap and VLAN actions.
6090          * If hairpin is working in explicit TX rule mode, there is no actions
6091          * splitting and the validation of hairpin ingress flow should be the
6092          * same as other standard flows.
6093          */
6094         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6095                              MLX5_FLOW_VLAN_ACTIONS)) &&
6096             (queue_index == 0xFFFF ||
6097              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6098              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6099              conf->tx_explicit != 0))) {
6100                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6101                     MLX5_FLOW_XCAP_ACTIONS)
6102                         return rte_flow_error_set(error, ENOTSUP,
6103                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6104                                                   NULL, "encap and decap "
6105                                                   "combination aren't supported");
6106                 if (!attr->transfer && attr->ingress) {
6107                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6108                                 return rte_flow_error_set
6109                                                 (error, ENOTSUP,
6110                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6111                                                  NULL, "encap is not supported"
6112                                                  " for ingress traffic");
6113                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6114                                 return rte_flow_error_set
6115                                                 (error, ENOTSUP,
6116                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6117                                                  NULL, "push VLAN action not "
6118                                                  "supported for ingress");
6119                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6120                                         MLX5_FLOW_VLAN_ACTIONS)
6121                                 return rte_flow_error_set
6122                                                 (error, ENOTSUP,
6123                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6124                                                  NULL, "no support for "
6125                                                  "multiple VLAN actions");
6126                 }
6127         }
6128         /*
6129          * Hairpin flow will add one more TAG action in TX implicit mode.
6130          * In TX explicit mode, there will be no hairpin flow ID.
6131          */
6132         if (hairpin > 0)
6133                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6134         /* extra metadata enabled: one more TAG action will be add. */
6135         if (dev_conf->dv_flow_en &&
6136             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6137             mlx5_flow_ext_mreg_supported(dev))
6138                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6139         if ((uint32_t)rw_act_num >
6140                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6141                 return rte_flow_error_set(error, ENOTSUP,
6142                                           RTE_FLOW_ERROR_TYPE_ACTION,
6143                                           NULL, "too many header modify"
6144                                           " actions to support");
6145         }
6146         return 0;
6147 }
6148
6149 /**
6150  * Internal preparation function. Allocates the DV flow size,
6151  * this size is constant.
6152  *
6153  * @param[in] dev
6154  *   Pointer to the rte_eth_dev structure.
6155  * @param[in] attr
6156  *   Pointer to the flow attributes.
6157  * @param[in] items
6158  *   Pointer to the list of items.
6159  * @param[in] actions
6160  *   Pointer to the list of actions.
6161  * @param[out] error
6162  *   Pointer to the error structure.
6163  *
6164  * @return
6165  *   Pointer to mlx5_flow object on success,
6166  *   otherwise NULL and rte_errno is set.
6167  */
6168 static struct mlx5_flow *
6169 flow_dv_prepare(struct rte_eth_dev *dev,
6170                 const struct rte_flow_attr *attr __rte_unused,
6171                 const struct rte_flow_item items[] __rte_unused,
6172                 const struct rte_flow_action actions[] __rte_unused,
6173                 struct rte_flow_error *error)
6174 {
6175         uint32_t handle_idx = 0;
6176         struct mlx5_flow *dev_flow;
6177         struct mlx5_flow_handle *dev_handle;
6178         struct mlx5_priv *priv = dev->data->dev_private;
6179         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6180
6181         MLX5_ASSERT(wks);
6182         /* In case of corrupting the memory. */
6183         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6184                 rte_flow_error_set(error, ENOSPC,
6185                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6186                                    "not free temporary device flow");
6187                 return NULL;
6188         }
6189         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6190                                    &handle_idx);
6191         if (!dev_handle) {
6192                 rte_flow_error_set(error, ENOMEM,
6193                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6194                                    "not enough memory to create flow handle");
6195                 return NULL;
6196         }
6197         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6198         dev_flow = &wks->flows[wks->flow_idx++];
6199         dev_flow->handle = dev_handle;
6200         dev_flow->handle_idx = handle_idx;
6201         /*
6202          * In some old rdma-core releases, before continuing, a check of the
6203          * length of matching parameter will be done at first. It needs to use
6204          * the length without misc4 param. If the flow has misc4 support, then
6205          * the length needs to be adjusted accordingly. Each param member is
6206          * aligned with a 64B boundary naturally.
6207          */
6208         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6209                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6210         /*
6211          * The matching value needs to be cleared to 0 before using. In the
6212          * past, it will be automatically cleared when using rte_*alloc
6213          * API. The time consumption will be almost the same as before.
6214          */
6215         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6216         dev_flow->ingress = attr->ingress;
6217         dev_flow->dv.transfer = attr->transfer;
6218         return dev_flow;
6219 }
6220
6221 #ifdef RTE_LIBRTE_MLX5_DEBUG
6222 /**
6223  * Sanity check for match mask and value. Similar to check_valid_spec() in
6224  * kernel driver. If unmasked bit is present in value, it returns failure.
6225  *
6226  * @param match_mask
6227  *   pointer to match mask buffer.
6228  * @param match_value
6229  *   pointer to match value buffer.
6230  *
6231  * @return
6232  *   0 if valid, -EINVAL otherwise.
6233  */
6234 static int
6235 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6236 {
6237         uint8_t *m = match_mask;
6238         uint8_t *v = match_value;
6239         unsigned int i;
6240
6241         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6242                 if (v[i] & ~m[i]) {
6243                         DRV_LOG(ERR,
6244                                 "match_value differs from match_criteria"
6245                                 " %p[%u] != %p[%u]",
6246                                 match_value, i, match_mask, i);
6247                         return -EINVAL;
6248                 }
6249         }
6250         return 0;
6251 }
6252 #endif
6253
6254 /**
6255  * Add match of ip_version.
6256  *
6257  * @param[in] group
6258  *   Flow group.
6259  * @param[in] headers_v
6260  *   Values header pointer.
6261  * @param[in] headers_m
6262  *   Masks header pointer.
6263  * @param[in] ip_version
6264  *   The IP version to set.
6265  */
6266 static inline void
6267 flow_dv_set_match_ip_version(uint32_t group,
6268                              void *headers_v,
6269                              void *headers_m,
6270                              uint8_t ip_version)
6271 {
6272         if (group == 0)
6273                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6274         else
6275                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6276                          ip_version);
6277         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6278         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6279         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6280 }
6281
6282 /**
6283  * Add Ethernet item to matcher and to the value.
6284  *
6285  * @param[in, out] matcher
6286  *   Flow matcher.
6287  * @param[in, out] key
6288  *   Flow matcher value.
6289  * @param[in] item
6290  *   Flow pattern to translate.
6291  * @param[in] inner
6292  *   Item is inner pattern.
6293  */
6294 static void
6295 flow_dv_translate_item_eth(void *matcher, void *key,
6296                            const struct rte_flow_item *item, int inner,
6297                            uint32_t group)
6298 {
6299         const struct rte_flow_item_eth *eth_m = item->mask;
6300         const struct rte_flow_item_eth *eth_v = item->spec;
6301         const struct rte_flow_item_eth nic_mask = {
6302                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6303                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6304                 .type = RTE_BE16(0xffff),
6305                 .has_vlan = 0,
6306         };
6307         void *hdrs_m;
6308         void *hdrs_v;
6309         char *l24_v;
6310         unsigned int i;
6311
6312         if (!eth_v)
6313                 return;
6314         if (!eth_m)
6315                 eth_m = &nic_mask;
6316         if (inner) {
6317                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6318                                          inner_headers);
6319                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6320         } else {
6321                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6322                                          outer_headers);
6323                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6324         }
6325         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6326                &eth_m->dst, sizeof(eth_m->dst));
6327         /* The value must be in the range of the mask. */
6328         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6329         for (i = 0; i < sizeof(eth_m->dst); ++i)
6330                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6331         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6332                &eth_m->src, sizeof(eth_m->src));
6333         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6334         /* The value must be in the range of the mask. */
6335         for (i = 0; i < sizeof(eth_m->dst); ++i)
6336                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6337         /*
6338          * HW supports match on one Ethertype, the Ethertype following the last
6339          * VLAN tag of the packet (see PRM).
6340          * Set match on ethertype only if ETH header is not followed by VLAN.
6341          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6342          * ethertype, and use ip_version field instead.
6343          * eCPRI over Ether layer will use type value 0xAEFE.
6344          */
6345         if (eth_m->type == 0xFFFF) {
6346                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6347                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6348                 switch (eth_v->type) {
6349                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6350                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6351                         return;
6352                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6353                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6354                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6355                         return;
6356                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6357                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6358                         return;
6359                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6360                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6361                         return;
6362                 default:
6363                         break;
6364                 }
6365         }
6366         if (eth_m->has_vlan) {
6367                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6368                 if (eth_v->has_vlan) {
6369                         /*
6370                          * Here, when also has_more_vlan field in VLAN item is
6371                          * not set, only single-tagged packets will be matched.
6372                          */
6373                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6374                         return;
6375                 }
6376         }
6377         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6378                  rte_be_to_cpu_16(eth_m->type));
6379         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6380         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6381 }
6382
6383 /**
6384  * Add VLAN item to matcher and to the value.
6385  *
6386  * @param[in, out] dev_flow
6387  *   Flow descriptor.
6388  * @param[in, out] matcher
6389  *   Flow matcher.
6390  * @param[in, out] key
6391  *   Flow matcher value.
6392  * @param[in] item
6393  *   Flow pattern to translate.
6394  * @param[in] inner
6395  *   Item is inner pattern.
6396  */
6397 static void
6398 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6399                             void *matcher, void *key,
6400                             const struct rte_flow_item *item,
6401                             int inner, uint32_t group)
6402 {
6403         const struct rte_flow_item_vlan *vlan_m = item->mask;
6404         const struct rte_flow_item_vlan *vlan_v = item->spec;
6405         void *hdrs_m;
6406         void *hdrs_v;
6407         uint16_t tci_m;
6408         uint16_t tci_v;
6409
6410         if (inner) {
6411                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6412                                          inner_headers);
6413                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6414         } else {
6415                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6416                                          outer_headers);
6417                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6418                 /*
6419                  * This is workaround, masks are not supported,
6420                  * and pre-validated.
6421                  */
6422                 if (vlan_v)
6423                         dev_flow->handle->vf_vlan.tag =
6424                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6425         }
6426         /*
6427          * When VLAN item exists in flow, mark packet as tagged,
6428          * even if TCI is not specified.
6429          */
6430         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6431                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6432                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6433         }
6434         if (!vlan_v)
6435                 return;
6436         if (!vlan_m)
6437                 vlan_m = &rte_flow_item_vlan_mask;
6438         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6439         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6440         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6441         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6442         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6443         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6444         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6445         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6446         /*
6447          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6448          * ethertype, and use ip_version field instead.
6449          */
6450         if (vlan_m->inner_type == 0xFFFF) {
6451                 switch (vlan_v->inner_type) {
6452                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6453                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6454                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6455                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6456                         return;
6457                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6458                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6459                         return;
6460                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6461                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6462                         return;
6463                 default:
6464                         break;
6465                 }
6466         }
6467         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6468                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6469                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6470                 /* Only one vlan_tag bit can be set. */
6471                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6472                 return;
6473         }
6474         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6475                  rte_be_to_cpu_16(vlan_m->inner_type));
6476         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6477                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6478 }
6479
6480 /**
6481  * Add IPV4 item to matcher and to the value.
6482  *
6483  * @param[in, out] matcher
6484  *   Flow matcher.
6485  * @param[in, out] key
6486  *   Flow matcher value.
6487  * @param[in] item
6488  *   Flow pattern to translate.
6489  * @param[in] inner
6490  *   Item is inner pattern.
6491  * @param[in] group
6492  *   The group to insert the rule.
6493  */
6494 static void
6495 flow_dv_translate_item_ipv4(void *matcher, void *key,
6496                             const struct rte_flow_item *item,
6497                             int inner, uint32_t group)
6498 {
6499         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6500         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6501         const struct rte_flow_item_ipv4 nic_mask = {
6502                 .hdr = {
6503                         .src_addr = RTE_BE32(0xffffffff),
6504                         .dst_addr = RTE_BE32(0xffffffff),
6505                         .type_of_service = 0xff,
6506                         .next_proto_id = 0xff,
6507                         .time_to_live = 0xff,
6508                 },
6509         };
6510         void *headers_m;
6511         void *headers_v;
6512         char *l24_m;
6513         char *l24_v;
6514         uint8_t tos;
6515
6516         if (inner) {
6517                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6518                                          inner_headers);
6519                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6520         } else {
6521                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6522                                          outer_headers);
6523                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6524         }
6525         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6526         if (!ipv4_v)
6527                 return;
6528         if (!ipv4_m)
6529                 ipv4_m = &nic_mask;
6530         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6531                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6532         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6533                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6534         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6535         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6536         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6537                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6538         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6539                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6540         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6541         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6542         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6543         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6544                  ipv4_m->hdr.type_of_service);
6545         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6546         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6547                  ipv4_m->hdr.type_of_service >> 2);
6548         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6549         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6550                  ipv4_m->hdr.next_proto_id);
6551         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6552                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6553         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6554                  ipv4_m->hdr.time_to_live);
6555         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6556                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6557         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6558                  !!(ipv4_m->hdr.fragment_offset));
6559         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6560                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6561 }
6562
6563 /**
6564  * Add IPV6 item to matcher and to the value.
6565  *
6566  * @param[in, out] matcher
6567  *   Flow matcher.
6568  * @param[in, out] key
6569  *   Flow matcher value.
6570  * @param[in] item
6571  *   Flow pattern to translate.
6572  * @param[in] inner
6573  *   Item is inner pattern.
6574  * @param[in] group
6575  *   The group to insert the rule.
6576  */
6577 static void
6578 flow_dv_translate_item_ipv6(void *matcher, void *key,
6579                             const struct rte_flow_item *item,
6580                             int inner, uint32_t group)
6581 {
6582         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6583         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6584         const struct rte_flow_item_ipv6 nic_mask = {
6585                 .hdr = {
6586                         .src_addr =
6587                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6588                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6589                         .dst_addr =
6590                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6591                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6592                         .vtc_flow = RTE_BE32(0xffffffff),
6593                         .proto = 0xff,
6594                         .hop_limits = 0xff,
6595                 },
6596         };
6597         void *headers_m;
6598         void *headers_v;
6599         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6600         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6601         char *l24_m;
6602         char *l24_v;
6603         uint32_t vtc_m;
6604         uint32_t vtc_v;
6605         int i;
6606         int size;
6607
6608         if (inner) {
6609                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6610                                          inner_headers);
6611                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6612         } else {
6613                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6614                                          outer_headers);
6615                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6616         }
6617         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6618         if (!ipv6_v)
6619                 return;
6620         if (!ipv6_m)
6621                 ipv6_m = &nic_mask;
6622         size = sizeof(ipv6_m->hdr.dst_addr);
6623         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6624                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6625         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6626                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6627         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6628         for (i = 0; i < size; ++i)
6629                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6630         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6631                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6632         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6633                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6634         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6635         for (i = 0; i < size; ++i)
6636                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6637         /* TOS. */
6638         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6639         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6640         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6641         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6642         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6643         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6644         /* Label. */
6645         if (inner) {
6646                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6647                          vtc_m);
6648                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6649                          vtc_v);
6650         } else {
6651                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6652                          vtc_m);
6653                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6654                          vtc_v);
6655         }
6656         /* Protocol. */
6657         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6658                  ipv6_m->hdr.proto);
6659         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6660                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6661         /* Hop limit. */
6662         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6663                  ipv6_m->hdr.hop_limits);
6664         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6665                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6666         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6667                  !!(ipv6_m->has_frag_ext));
6668         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6669                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6670 }
6671
6672 /**
6673  * Add IPV6 fragment extension item to matcher and to the value.
6674  *
6675  * @param[in, out] matcher
6676  *   Flow matcher.
6677  * @param[in, out] key
6678  *   Flow matcher value.
6679  * @param[in] item
6680  *   Flow pattern to translate.
6681  * @param[in] inner
6682  *   Item is inner pattern.
6683  */
6684 static void
6685 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6686                                      const struct rte_flow_item *item,
6687                                      int inner)
6688 {
6689         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6690         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6691         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6692                 .hdr = {
6693                         .next_header = 0xff,
6694                         .frag_data = RTE_BE16(0xffff),
6695                 },
6696         };
6697         void *headers_m;
6698         void *headers_v;
6699
6700         if (inner) {
6701                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6702                                          inner_headers);
6703                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6704         } else {
6705                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6706                                          outer_headers);
6707                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6708         }
6709         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6710         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6711         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6712         if (!ipv6_frag_ext_v)
6713                 return;
6714         if (!ipv6_frag_ext_m)
6715                 ipv6_frag_ext_m = &nic_mask;
6716         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6717                  ipv6_frag_ext_m->hdr.next_header);
6718         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6719                  ipv6_frag_ext_v->hdr.next_header &
6720                  ipv6_frag_ext_m->hdr.next_header);
6721 }
6722
6723 /**
6724  * Add TCP item to matcher and to the value.
6725  *
6726  * @param[in, out] matcher
6727  *   Flow matcher.
6728  * @param[in, out] key
6729  *   Flow matcher value.
6730  * @param[in] item
6731  *   Flow pattern to translate.
6732  * @param[in] inner
6733  *   Item is inner pattern.
6734  */
6735 static void
6736 flow_dv_translate_item_tcp(void *matcher, void *key,
6737                            const struct rte_flow_item *item,
6738                            int inner)
6739 {
6740         const struct rte_flow_item_tcp *tcp_m = item->mask;
6741         const struct rte_flow_item_tcp *tcp_v = item->spec;
6742         void *headers_m;
6743         void *headers_v;
6744
6745         if (inner) {
6746                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6747                                          inner_headers);
6748                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6749         } else {
6750                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6751                                          outer_headers);
6752                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6753         }
6754         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6755         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6756         if (!tcp_v)
6757                 return;
6758         if (!tcp_m)
6759                 tcp_m = &rte_flow_item_tcp_mask;
6760         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6761                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6762         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6763                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6764         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6765                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6766         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6767                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6768         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6769                  tcp_m->hdr.tcp_flags);
6770         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6771                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6772 }
6773
6774 /**
6775  * Add UDP item to matcher and to the value.
6776  *
6777  * @param[in, out] matcher
6778  *   Flow matcher.
6779  * @param[in, out] key
6780  *   Flow matcher value.
6781  * @param[in] item
6782  *   Flow pattern to translate.
6783  * @param[in] inner
6784  *   Item is inner pattern.
6785  */
6786 static void
6787 flow_dv_translate_item_udp(void *matcher, void *key,
6788                            const struct rte_flow_item *item,
6789                            int inner)
6790 {
6791         const struct rte_flow_item_udp *udp_m = item->mask;
6792         const struct rte_flow_item_udp *udp_v = item->spec;
6793         void *headers_m;
6794         void *headers_v;
6795
6796         if (inner) {
6797                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6798                                          inner_headers);
6799                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6800         } else {
6801                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6802                                          outer_headers);
6803                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6804         }
6805         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6806         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6807         if (!udp_v)
6808                 return;
6809         if (!udp_m)
6810                 udp_m = &rte_flow_item_udp_mask;
6811         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6812                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6813         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6814                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6815         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6816                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6817         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6818                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6819 }
6820
6821 /**
6822  * Add GRE optional Key item to matcher and to the value.
6823  *
6824  * @param[in, out] matcher
6825  *   Flow matcher.
6826  * @param[in, out] key
6827  *   Flow matcher value.
6828  * @param[in] item
6829  *   Flow pattern to translate.
6830  * @param[in] inner
6831  *   Item is inner pattern.
6832  */
6833 static void
6834 flow_dv_translate_item_gre_key(void *matcher, void *key,
6835                                    const struct rte_flow_item *item)
6836 {
6837         const rte_be32_t *key_m = item->mask;
6838         const rte_be32_t *key_v = item->spec;
6839         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6840         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6841         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6842
6843         /* GRE K bit must be on and should already be validated */
6844         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6845         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6846         if (!key_v)
6847                 return;
6848         if (!key_m)
6849                 key_m = &gre_key_default_mask;
6850         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6851                  rte_be_to_cpu_32(*key_m) >> 8);
6852         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6853                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6854         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6855                  rte_be_to_cpu_32(*key_m) & 0xFF);
6856         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6857                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6858 }
6859
6860 /**
6861  * Add GRE item to matcher and to the value.
6862  *
6863  * @param[in, out] matcher
6864  *   Flow matcher.
6865  * @param[in, out] key
6866  *   Flow matcher value.
6867  * @param[in] item
6868  *   Flow pattern to translate.
6869  * @param[in] inner
6870  *   Item is inner pattern.
6871  */
6872 static void
6873 flow_dv_translate_item_gre(void *matcher, void *key,
6874                            const struct rte_flow_item *item,
6875                            int inner)
6876 {
6877         const struct rte_flow_item_gre *gre_m = item->mask;
6878         const struct rte_flow_item_gre *gre_v = item->spec;
6879         void *headers_m;
6880         void *headers_v;
6881         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6882         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6883         struct {
6884                 union {
6885                         __extension__
6886                         struct {
6887                                 uint16_t version:3;
6888                                 uint16_t rsvd0:9;
6889                                 uint16_t s_present:1;
6890                                 uint16_t k_present:1;
6891                                 uint16_t rsvd_bit1:1;
6892                                 uint16_t c_present:1;
6893                         };
6894                         uint16_t value;
6895                 };
6896         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6897
6898         if (inner) {
6899                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6900                                          inner_headers);
6901                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6902         } else {
6903                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6904                                          outer_headers);
6905                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6906         }
6907         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6908         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6909         if (!gre_v)
6910                 return;
6911         if (!gre_m)
6912                 gre_m = &rte_flow_item_gre_mask;
6913         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6914                  rte_be_to_cpu_16(gre_m->protocol));
6915         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6916                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6917         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6918         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6919         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6920                  gre_crks_rsvd0_ver_m.c_present);
6921         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6922                  gre_crks_rsvd0_ver_v.c_present &
6923                  gre_crks_rsvd0_ver_m.c_present);
6924         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6925                  gre_crks_rsvd0_ver_m.k_present);
6926         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6927                  gre_crks_rsvd0_ver_v.k_present &
6928                  gre_crks_rsvd0_ver_m.k_present);
6929         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6930                  gre_crks_rsvd0_ver_m.s_present);
6931         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6932                  gre_crks_rsvd0_ver_v.s_present &
6933                  gre_crks_rsvd0_ver_m.s_present);
6934 }
6935
6936 /**
6937  * Add NVGRE item to matcher and to the value.
6938  *
6939  * @param[in, out] matcher
6940  *   Flow matcher.
6941  * @param[in, out] key
6942  *   Flow matcher value.
6943  * @param[in] item
6944  *   Flow pattern to translate.
6945  * @param[in] inner
6946  *   Item is inner pattern.
6947  */
6948 static void
6949 flow_dv_translate_item_nvgre(void *matcher, void *key,
6950                              const struct rte_flow_item *item,
6951                              int inner)
6952 {
6953         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6954         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6955         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6956         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6957         const char *tni_flow_id_m;
6958         const char *tni_flow_id_v;
6959         char *gre_key_m;
6960         char *gre_key_v;
6961         int size;
6962         int i;
6963
6964         /* For NVGRE, GRE header fields must be set with defined values. */
6965         const struct rte_flow_item_gre gre_spec = {
6966                 .c_rsvd0_ver = RTE_BE16(0x2000),
6967                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6968         };
6969         const struct rte_flow_item_gre gre_mask = {
6970                 .c_rsvd0_ver = RTE_BE16(0xB000),
6971                 .protocol = RTE_BE16(UINT16_MAX),
6972         };
6973         const struct rte_flow_item gre_item = {
6974                 .spec = &gre_spec,
6975                 .mask = &gre_mask,
6976                 .last = NULL,
6977         };
6978         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6979         if (!nvgre_v)
6980                 return;
6981         if (!nvgre_m)
6982                 nvgre_m = &rte_flow_item_nvgre_mask;
6983         tni_flow_id_m = (const char *)nvgre_m->tni;
6984         tni_flow_id_v = (const char *)nvgre_v->tni;
6985         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6986         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6987         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
6988         memcpy(gre_key_m, tni_flow_id_m, size);
6989         for (i = 0; i < size; ++i)
6990                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
6991 }
6992
6993 /**
6994  * Add VXLAN item to matcher and to the value.
6995  *
6996  * @param[in, out] matcher
6997  *   Flow matcher.
6998  * @param[in, out] key
6999  *   Flow matcher value.
7000  * @param[in] item
7001  *   Flow pattern to translate.
7002  * @param[in] inner
7003  *   Item is inner pattern.
7004  */
7005 static void
7006 flow_dv_translate_item_vxlan(void *matcher, void *key,
7007                              const struct rte_flow_item *item,
7008                              int inner)
7009 {
7010         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7011         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7012         void *headers_m;
7013         void *headers_v;
7014         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7015         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7016         char *vni_m;
7017         char *vni_v;
7018         uint16_t dport;
7019         int size;
7020         int i;
7021
7022         if (inner) {
7023                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7024                                          inner_headers);
7025                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7026         } else {
7027                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7028                                          outer_headers);
7029                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7030         }
7031         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7032                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7033         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7034                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7035                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7036         }
7037         if (!vxlan_v)
7038                 return;
7039         if (!vxlan_m)
7040                 vxlan_m = &rte_flow_item_vxlan_mask;
7041         size = sizeof(vxlan_m->vni);
7042         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7043         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7044         memcpy(vni_m, vxlan_m->vni, size);
7045         for (i = 0; i < size; ++i)
7046                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7047 }
7048
7049 /**
7050  * Add VXLAN-GPE item to matcher and to the value.
7051  *
7052  * @param[in, out] matcher
7053  *   Flow matcher.
7054  * @param[in, out] key
7055  *   Flow matcher value.
7056  * @param[in] item
7057  *   Flow pattern to translate.
7058  * @param[in] inner
7059  *   Item is inner pattern.
7060  */
7061
7062 static void
7063 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7064                                  const struct rte_flow_item *item, int inner)
7065 {
7066         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7067         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7068         void *headers_m;
7069         void *headers_v;
7070         void *misc_m =
7071                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7072         void *misc_v =
7073                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7074         char *vni_m;
7075         char *vni_v;
7076         uint16_t dport;
7077         int size;
7078         int i;
7079         uint8_t flags_m = 0xff;
7080         uint8_t flags_v = 0xc;
7081
7082         if (inner) {
7083                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7084                                          inner_headers);
7085                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7086         } else {
7087                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7088                                          outer_headers);
7089                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7090         }
7091         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7092                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7093         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7094                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7095                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7096         }
7097         if (!vxlan_v)
7098                 return;
7099         if (!vxlan_m)
7100                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7101         size = sizeof(vxlan_m->vni);
7102         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7103         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7104         memcpy(vni_m, vxlan_m->vni, size);
7105         for (i = 0; i < size; ++i)
7106                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7107         if (vxlan_m->flags) {
7108                 flags_m = vxlan_m->flags;
7109                 flags_v = vxlan_v->flags;
7110         }
7111         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7112         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7113         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7114                  vxlan_m->protocol);
7115         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7116                  vxlan_v->protocol);
7117 }
7118
7119 /**
7120  * Add Geneve item to matcher and to the value.
7121  *
7122  * @param[in, out] matcher
7123  *   Flow matcher.
7124  * @param[in, out] key
7125  *   Flow matcher value.
7126  * @param[in] item
7127  *   Flow pattern to translate.
7128  * @param[in] inner
7129  *   Item is inner pattern.
7130  */
7131
7132 static void
7133 flow_dv_translate_item_geneve(void *matcher, void *key,
7134                               const struct rte_flow_item *item, int inner)
7135 {
7136         const struct rte_flow_item_geneve *geneve_m = item->mask;
7137         const struct rte_flow_item_geneve *geneve_v = item->spec;
7138         void *headers_m;
7139         void *headers_v;
7140         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7141         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7142         uint16_t dport;
7143         uint16_t gbhdr_m;
7144         uint16_t gbhdr_v;
7145         char *vni_m;
7146         char *vni_v;
7147         size_t size, i;
7148
7149         if (inner) {
7150                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7151                                          inner_headers);
7152                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7153         } else {
7154                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7155                                          outer_headers);
7156                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7157         }
7158         dport = MLX5_UDP_PORT_GENEVE;
7159         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7160                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7161                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7162         }
7163         if (!geneve_v)
7164                 return;
7165         if (!geneve_m)
7166                 geneve_m = &rte_flow_item_geneve_mask;
7167         size = sizeof(geneve_m->vni);
7168         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7169         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7170         memcpy(vni_m, geneve_m->vni, size);
7171         for (i = 0; i < size; ++i)
7172                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7173         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7174                  rte_be_to_cpu_16(geneve_m->protocol));
7175         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7176                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7177         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7178         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7179         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7180                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7181         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7182                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7183         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7184                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7185         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7186                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7187                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7188 }
7189
7190 /**
7191  * Add MPLS item to matcher and to the value.
7192  *
7193  * @param[in, out] matcher
7194  *   Flow matcher.
7195  * @param[in, out] key
7196  *   Flow matcher value.
7197  * @param[in] item
7198  *   Flow pattern to translate.
7199  * @param[in] prev_layer
7200  *   The protocol layer indicated in previous item.
7201  * @param[in] inner
7202  *   Item is inner pattern.
7203  */
7204 static void
7205 flow_dv_translate_item_mpls(void *matcher, void *key,
7206                             const struct rte_flow_item *item,
7207                             uint64_t prev_layer,
7208                             int inner)
7209 {
7210         const uint32_t *in_mpls_m = item->mask;
7211         const uint32_t *in_mpls_v = item->spec;
7212         uint32_t *out_mpls_m = 0;
7213         uint32_t *out_mpls_v = 0;
7214         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7215         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7216         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7217                                      misc_parameters_2);
7218         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7219         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7220         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7221
7222         switch (prev_layer) {
7223         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7224                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7225                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7226                          MLX5_UDP_PORT_MPLS);
7227                 break;
7228         case MLX5_FLOW_LAYER_GRE:
7229                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7230                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7231                          RTE_ETHER_TYPE_MPLS);
7232                 break;
7233         default:
7234                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7235                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7236                          IPPROTO_MPLS);
7237                 break;
7238         }
7239         if (!in_mpls_v)
7240                 return;
7241         if (!in_mpls_m)
7242                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7243         switch (prev_layer) {
7244         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7245                 out_mpls_m =
7246                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7247                                                  outer_first_mpls_over_udp);
7248                 out_mpls_v =
7249                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7250                                                  outer_first_mpls_over_udp);
7251                 break;
7252         case MLX5_FLOW_LAYER_GRE:
7253                 out_mpls_m =
7254                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7255                                                  outer_first_mpls_over_gre);
7256                 out_mpls_v =
7257                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7258                                                  outer_first_mpls_over_gre);
7259                 break;
7260         default:
7261                 /* Inner MPLS not over GRE is not supported. */
7262                 if (!inner) {
7263                         out_mpls_m =
7264                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7265                                                          misc2_m,
7266                                                          outer_first_mpls);
7267                         out_mpls_v =
7268                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7269                                                          misc2_v,
7270                                                          outer_first_mpls);
7271                 }
7272                 break;
7273         }
7274         if (out_mpls_m && out_mpls_v) {
7275                 *out_mpls_m = *in_mpls_m;
7276                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7277         }
7278 }
7279
7280 /**
7281  * Add metadata register item to matcher
7282  *
7283  * @param[in, out] matcher
7284  *   Flow matcher.
7285  * @param[in, out] key
7286  *   Flow matcher value.
7287  * @param[in] reg_type
7288  *   Type of device metadata register
7289  * @param[in] value
7290  *   Register value
7291  * @param[in] mask
7292  *   Register mask
7293  */
7294 static void
7295 flow_dv_match_meta_reg(void *matcher, void *key,
7296                        enum modify_reg reg_type,
7297                        uint32_t data, uint32_t mask)
7298 {
7299         void *misc2_m =
7300                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7301         void *misc2_v =
7302                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7303         uint32_t temp;
7304
7305         data &= mask;
7306         switch (reg_type) {
7307         case REG_A:
7308                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7309                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7310                 break;
7311         case REG_B:
7312                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7313                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7314                 break;
7315         case REG_C_0:
7316                 /*
7317                  * The metadata register C0 field might be divided into
7318                  * source vport index and META item value, we should set
7319                  * this field according to specified mask, not as whole one.
7320                  */
7321                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7322                 temp |= mask;
7323                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7324                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7325                 temp &= ~mask;
7326                 temp |= data;
7327                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7328                 break;
7329         case REG_C_1:
7330                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7331                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7332                 break;
7333         case REG_C_2:
7334                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7335                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7336                 break;
7337         case REG_C_3:
7338                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7339                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7340                 break;
7341         case REG_C_4:
7342                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7343                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7344                 break;
7345         case REG_C_5:
7346                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7347                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7348                 break;
7349         case REG_C_6:
7350                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7351                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7352                 break;
7353         case REG_C_7:
7354                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7355                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7356                 break;
7357         default:
7358                 MLX5_ASSERT(false);
7359                 break;
7360         }
7361 }
7362
7363 /**
7364  * Add MARK item to matcher
7365  *
7366  * @param[in] dev
7367  *   The device to configure through.
7368  * @param[in, out] matcher
7369  *   Flow matcher.
7370  * @param[in, out] key
7371  *   Flow matcher value.
7372  * @param[in] item
7373  *   Flow pattern to translate.
7374  */
7375 static void
7376 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7377                             void *matcher, void *key,
7378                             const struct rte_flow_item *item)
7379 {
7380         struct mlx5_priv *priv = dev->data->dev_private;
7381         const struct rte_flow_item_mark *mark;
7382         uint32_t value;
7383         uint32_t mask;
7384
7385         mark = item->mask ? (const void *)item->mask :
7386                             &rte_flow_item_mark_mask;
7387         mask = mark->id & priv->sh->dv_mark_mask;
7388         mark = (const void *)item->spec;
7389         MLX5_ASSERT(mark);
7390         value = mark->id & priv->sh->dv_mark_mask & mask;
7391         if (mask) {
7392                 enum modify_reg reg;
7393
7394                 /* Get the metadata register index for the mark. */
7395                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7396                 MLX5_ASSERT(reg > 0);
7397                 if (reg == REG_C_0) {
7398                         struct mlx5_priv *priv = dev->data->dev_private;
7399                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7400                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7401
7402                         mask &= msk_c0;
7403                         mask <<= shl_c0;
7404                         value <<= shl_c0;
7405                 }
7406                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7407         }
7408 }
7409
7410 /**
7411  * Add META item to matcher
7412  *
7413  * @param[in] dev
7414  *   The devich to configure through.
7415  * @param[in, out] matcher
7416  *   Flow matcher.
7417  * @param[in, out] key
7418  *   Flow matcher value.
7419  * @param[in] attr
7420  *   Attributes of flow that includes this item.
7421  * @param[in] item
7422  *   Flow pattern to translate.
7423  */
7424 static void
7425 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7426                             void *matcher, void *key,
7427                             const struct rte_flow_attr *attr,
7428                             const struct rte_flow_item *item)
7429 {
7430         const struct rte_flow_item_meta *meta_m;
7431         const struct rte_flow_item_meta *meta_v;
7432
7433         meta_m = (const void *)item->mask;
7434         if (!meta_m)
7435                 meta_m = &rte_flow_item_meta_mask;
7436         meta_v = (const void *)item->spec;
7437         if (meta_v) {
7438                 int reg;
7439                 uint32_t value = meta_v->data;
7440                 uint32_t mask = meta_m->data;
7441
7442                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7443                 if (reg < 0)
7444                         return;
7445                 /*
7446                  * In datapath code there is no endianness
7447                  * coversions for perfromance reasons, all
7448                  * pattern conversions are done in rte_flow.
7449                  */
7450                 value = rte_cpu_to_be_32(value);
7451                 mask = rte_cpu_to_be_32(mask);
7452                 if (reg == REG_C_0) {
7453                         struct mlx5_priv *priv = dev->data->dev_private;
7454                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7455                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7456 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7457                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7458
7459                         value >>= shr_c0;
7460                         mask >>= shr_c0;
7461 #endif
7462                         value <<= shl_c0;
7463                         mask <<= shl_c0;
7464                         MLX5_ASSERT(msk_c0);
7465                         MLX5_ASSERT(!(~msk_c0 & mask));
7466                 }
7467                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7468         }
7469 }
7470
7471 /**
7472  * Add vport metadata Reg C0 item to matcher
7473  *
7474  * @param[in, out] matcher
7475  *   Flow matcher.
7476  * @param[in, out] key
7477  *   Flow matcher value.
7478  * @param[in] reg
7479  *   Flow pattern to translate.
7480  */
7481 static void
7482 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7483                                   uint32_t value, uint32_t mask)
7484 {
7485         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7486 }
7487
7488 /**
7489  * Add tag item to matcher
7490  *
7491  * @param[in] dev
7492  *   The devich to configure through.
7493  * @param[in, out] matcher
7494  *   Flow matcher.
7495  * @param[in, out] key
7496  *   Flow matcher value.
7497  * @param[in] item
7498  *   Flow pattern to translate.
7499  */
7500 static void
7501 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7502                                 void *matcher, void *key,
7503                                 const struct rte_flow_item *item)
7504 {
7505         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7506         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7507         uint32_t mask, value;
7508
7509         MLX5_ASSERT(tag_v);
7510         value = tag_v->data;
7511         mask = tag_m ? tag_m->data : UINT32_MAX;
7512         if (tag_v->id == REG_C_0) {
7513                 struct mlx5_priv *priv = dev->data->dev_private;
7514                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7515                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7516
7517                 mask &= msk_c0;
7518                 mask <<= shl_c0;
7519                 value <<= shl_c0;
7520         }
7521         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7522 }
7523
7524 /**
7525  * Add TAG item to matcher
7526  *
7527  * @param[in] dev
7528  *   The devich to configure through.
7529  * @param[in, out] matcher
7530  *   Flow matcher.
7531  * @param[in, out] key
7532  *   Flow matcher value.
7533  * @param[in] item
7534  *   Flow pattern to translate.
7535  */
7536 static void
7537 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7538                            void *matcher, void *key,
7539                            const struct rte_flow_item *item)
7540 {
7541         const struct rte_flow_item_tag *tag_v = item->spec;
7542         const struct rte_flow_item_tag *tag_m = item->mask;
7543         enum modify_reg reg;
7544
7545         MLX5_ASSERT(tag_v);
7546         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7547         /* Get the metadata register index for the tag. */
7548         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7549         MLX5_ASSERT(reg > 0);
7550         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7551 }
7552
7553 /**
7554  * Add source vport match to the specified matcher.
7555  *
7556  * @param[in, out] matcher
7557  *   Flow matcher.
7558  * @param[in, out] key
7559  *   Flow matcher value.
7560  * @param[in] port
7561  *   Source vport value to match
7562  * @param[in] mask
7563  *   Mask
7564  */
7565 static void
7566 flow_dv_translate_item_source_vport(void *matcher, void *key,
7567                                     int16_t port, uint16_t mask)
7568 {
7569         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7570         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7571
7572         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7573         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7574 }
7575
7576 /**
7577  * Translate port-id item to eswitch match on  port-id.
7578  *
7579  * @param[in] dev
7580  *   The devich to configure through.
7581  * @param[in, out] matcher
7582  *   Flow matcher.
7583  * @param[in, out] key
7584  *   Flow matcher value.
7585  * @param[in] item
7586  *   Flow pattern to translate.
7587  *
7588  * @return
7589  *   0 on success, a negative errno value otherwise.
7590  */
7591 static int
7592 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7593                                void *key, const struct rte_flow_item *item)
7594 {
7595         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7596         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7597         struct mlx5_priv *priv;
7598         uint16_t mask, id;
7599
7600         mask = pid_m ? pid_m->id : 0xffff;
7601         id = pid_v ? pid_v->id : dev->data->port_id;
7602         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7603         if (!priv)
7604                 return -rte_errno;
7605         /* Translate to vport field or to metadata, depending on mode. */
7606         if (priv->vport_meta_mask)
7607                 flow_dv_translate_item_meta_vport(matcher, key,
7608                                                   priv->vport_meta_tag,
7609                                                   priv->vport_meta_mask);
7610         else
7611                 flow_dv_translate_item_source_vport(matcher, key,
7612                                                     priv->vport_id, mask);
7613         return 0;
7614 }
7615
7616 /**
7617  * Add ICMP6 item to matcher and to the value.
7618  *
7619  * @param[in, out] matcher
7620  *   Flow matcher.
7621  * @param[in, out] key
7622  *   Flow matcher value.
7623  * @param[in] item
7624  *   Flow pattern to translate.
7625  * @param[in] inner
7626  *   Item is inner pattern.
7627  */
7628 static void
7629 flow_dv_translate_item_icmp6(void *matcher, void *key,
7630                               const struct rte_flow_item *item,
7631                               int inner)
7632 {
7633         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7634         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7635         void *headers_m;
7636         void *headers_v;
7637         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7638                                      misc_parameters_3);
7639         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7640         if (inner) {
7641                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7642                                          inner_headers);
7643                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7644         } else {
7645                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7646                                          outer_headers);
7647                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7648         }
7649         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7650         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7651         if (!icmp6_v)
7652                 return;
7653         if (!icmp6_m)
7654                 icmp6_m = &rte_flow_item_icmp6_mask;
7655         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7656         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7657                  icmp6_v->type & icmp6_m->type);
7658         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7659         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7660                  icmp6_v->code & icmp6_m->code);
7661 }
7662
7663 /**
7664  * Add ICMP item to matcher and to the value.
7665  *
7666  * @param[in, out] matcher
7667  *   Flow matcher.
7668  * @param[in, out] key
7669  *   Flow matcher value.
7670  * @param[in] item
7671  *   Flow pattern to translate.
7672  * @param[in] inner
7673  *   Item is inner pattern.
7674  */
7675 static void
7676 flow_dv_translate_item_icmp(void *matcher, void *key,
7677                             const struct rte_flow_item *item,
7678                             int inner)
7679 {
7680         const struct rte_flow_item_icmp *icmp_m = item->mask;
7681         const struct rte_flow_item_icmp *icmp_v = item->spec;
7682         uint32_t icmp_header_data_m = 0;
7683         uint32_t icmp_header_data_v = 0;
7684         void *headers_m;
7685         void *headers_v;
7686         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7687                                      misc_parameters_3);
7688         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7689         if (inner) {
7690                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7691                                          inner_headers);
7692                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7693         } else {
7694                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7695                                          outer_headers);
7696                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7697         }
7698         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7699         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7700         if (!icmp_v)
7701                 return;
7702         if (!icmp_m)
7703                 icmp_m = &rte_flow_item_icmp_mask;
7704         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7705                  icmp_m->hdr.icmp_type);
7706         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7707                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7708         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7709                  icmp_m->hdr.icmp_code);
7710         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7711                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7712         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7713         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7714         if (icmp_header_data_m) {
7715                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7716                 icmp_header_data_v |=
7717                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7718                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7719                          icmp_header_data_m);
7720                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7721                          icmp_header_data_v & icmp_header_data_m);
7722         }
7723 }
7724
7725 /**
7726  * Add GTP item to matcher and to the value.
7727  *
7728  * @param[in, out] matcher
7729  *   Flow matcher.
7730  * @param[in, out] key
7731  *   Flow matcher value.
7732  * @param[in] item
7733  *   Flow pattern to translate.
7734  * @param[in] inner
7735  *   Item is inner pattern.
7736  */
7737 static void
7738 flow_dv_translate_item_gtp(void *matcher, void *key,
7739                            const struct rte_flow_item *item, int inner)
7740 {
7741         const struct rte_flow_item_gtp *gtp_m = item->mask;
7742         const struct rte_flow_item_gtp *gtp_v = item->spec;
7743         void *headers_m;
7744         void *headers_v;
7745         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7746                                      misc_parameters_3);
7747         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7748         uint16_t dport = RTE_GTPU_UDP_PORT;
7749
7750         if (inner) {
7751                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7752                                          inner_headers);
7753                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7754         } else {
7755                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7756                                          outer_headers);
7757                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7758         }
7759         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7760                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7761                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7762         }
7763         if (!gtp_v)
7764                 return;
7765         if (!gtp_m)
7766                 gtp_m = &rte_flow_item_gtp_mask;
7767         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7768                  gtp_m->v_pt_rsv_flags);
7769         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7770                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7771         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7772         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7773                  gtp_v->msg_type & gtp_m->msg_type);
7774         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7775                  rte_be_to_cpu_32(gtp_m->teid));
7776         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7777                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7778 }
7779
7780 /**
7781  * Add eCPRI item to matcher and to the value.
7782  *
7783  * @param[in] dev
7784  *   The devich to configure through.
7785  * @param[in, out] matcher
7786  *   Flow matcher.
7787  * @param[in, out] key
7788  *   Flow matcher value.
7789  * @param[in] item
7790  *   Flow pattern to translate.
7791  * @param[in] samples
7792  *   Sample IDs to be used in the matching.
7793  */
7794 static void
7795 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7796                              void *key, const struct rte_flow_item *item)
7797 {
7798         struct mlx5_priv *priv = dev->data->dev_private;
7799         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7800         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7801         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7802                                      misc_parameters_4);
7803         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7804         uint32_t *samples;
7805         void *dw_m;
7806         void *dw_v;
7807
7808         if (!ecpri_v)
7809                 return;
7810         if (!ecpri_m)
7811                 ecpri_m = &rte_flow_item_ecpri_mask;
7812         /*
7813          * Maximal four DW samples are supported in a single matching now.
7814          * Two are used now for a eCPRI matching:
7815          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7816          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7817          *    if any.
7818          */
7819         if (!ecpri_m->hdr.common.u32)
7820                 return;
7821         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7822         /* Need to take the whole DW as the mask to fill the entry. */
7823         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7824                             prog_sample_field_value_0);
7825         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7826                             prog_sample_field_value_0);
7827         /* Already big endian (network order) in the header. */
7828         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7829         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7830         /* Sample#0, used for matching type, offset 0. */
7831         MLX5_SET(fte_match_set_misc4, misc4_m,
7832                  prog_sample_field_id_0, samples[0]);
7833         /* It makes no sense to set the sample ID in the mask field. */
7834         MLX5_SET(fte_match_set_misc4, misc4_v,
7835                  prog_sample_field_id_0, samples[0]);
7836         /*
7837          * Checking if message body part needs to be matched.
7838          * Some wildcard rules only matching type field should be supported.
7839          */
7840         if (ecpri_m->hdr.dummy[0]) {
7841                 switch (ecpri_v->hdr.common.type) {
7842                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7843                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7844                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7845                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7846                                             prog_sample_field_value_1);
7847                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7848                                             prog_sample_field_value_1);
7849                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7850                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7851                         /* Sample#1, to match message body, offset 4. */
7852                         MLX5_SET(fte_match_set_misc4, misc4_m,
7853                                  prog_sample_field_id_1, samples[1]);
7854                         MLX5_SET(fte_match_set_misc4, misc4_v,
7855                                  prog_sample_field_id_1, samples[1]);
7856                         break;
7857                 default:
7858                         /* Others, do not match any sample ID. */
7859                         break;
7860                 }
7861         }
7862 }
7863
7864 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7865
7866 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7867         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7868                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7869
7870 /**
7871  * Calculate flow matcher enable bitmap.
7872  *
7873  * @param match_criteria
7874  *   Pointer to flow matcher criteria.
7875  *
7876  * @return
7877  *   Bitmap of enabled fields.
7878  */
7879 static uint8_t
7880 flow_dv_matcher_enable(uint32_t *match_criteria)
7881 {
7882         uint8_t match_criteria_enable;
7883
7884         match_criteria_enable =
7885                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7886                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7887         match_criteria_enable |=
7888                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7889                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7890         match_criteria_enable |=
7891                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7892                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7893         match_criteria_enable |=
7894                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7895                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7896         match_criteria_enable |=
7897                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7898                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7899         match_criteria_enable |=
7900                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7901                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7902         return match_criteria_enable;
7903 }
7904
7905 struct mlx5_hlist_entry *
7906 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7907 {
7908         struct mlx5_dev_ctx_shared *sh = list->ctx;
7909         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7910         struct rte_eth_dev *dev = ctx->dev;
7911         struct mlx5_flow_tbl_data_entry *tbl_data;
7912         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7913         struct rte_flow_error *error = ctx->error;
7914         union mlx5_flow_tbl_key key = { .v64 = key64 };
7915         struct mlx5_flow_tbl_resource *tbl;
7916         void *domain;
7917         uint32_t idx = 0;
7918         int ret;
7919
7920         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7921         if (!tbl_data) {
7922                 rte_flow_error_set(error, ENOMEM,
7923                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7924                                    NULL,
7925                                    "cannot allocate flow table data entry");
7926                 return NULL;
7927         }
7928         tbl_data->idx = idx;
7929         tbl_data->tunnel = tt_prm->tunnel;
7930         tbl_data->group_id = tt_prm->group_id;
7931         tbl_data->external = tt_prm->external;
7932         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7933         tbl = &tbl_data->tbl;
7934         if (key.dummy)
7935                 return &tbl_data->entry;
7936         if (key.domain)
7937                 domain = sh->fdb_domain;
7938         else if (key.direction)
7939                 domain = sh->tx_domain;
7940         else
7941                 domain = sh->rx_domain;
7942         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7943         if (ret) {
7944                 rte_flow_error_set(error, ENOMEM,
7945                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7946                                    NULL, "cannot create flow table object");
7947                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7948                 return NULL;
7949         }
7950         if (key.table_id) {
7951                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7952                                         (tbl->obj, &tbl_data->jump.action);
7953                 if (ret) {
7954                         rte_flow_error_set(error, ENOMEM,
7955                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7956                                            NULL,
7957                                            "cannot create flow jump action");
7958                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7959                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7960                         return NULL;
7961                 }
7962         }
7963         return &tbl_data->entry;
7964 }
7965
7966 /**
7967  * Get a flow table.
7968  *
7969  * @param[in, out] dev
7970  *   Pointer to rte_eth_dev structure.
7971  * @param[in] table_id
7972  *   Table id to use.
7973  * @param[in] egress
7974  *   Direction of the table.
7975  * @param[in] transfer
7976  *   E-Switch or NIC flow.
7977  * @param[in] dummy
7978  *   Dummy entry for dv API.
7979  * @param[out] error
7980  *   pointer to error structure.
7981  *
7982  * @return
7983  *   Returns tables resource based on the index, NULL in case of failed.
7984  */
7985 struct mlx5_flow_tbl_resource *
7986 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
7987                          uint32_t table_id, uint8_t egress,
7988                          uint8_t transfer,
7989                          bool external,
7990                          const struct mlx5_flow_tunnel *tunnel,
7991                          uint32_t group_id, uint8_t dummy,
7992                          struct rte_flow_error *error)
7993 {
7994         struct mlx5_priv *priv = dev->data->dev_private;
7995         union mlx5_flow_tbl_key table_key = {
7996                 {
7997                         .table_id = table_id,
7998                         .dummy = dummy,
7999                         .domain = !!transfer,
8000                         .direction = !!egress,
8001                 }
8002         };
8003         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8004                 .tunnel = tunnel,
8005                 .group_id = group_id,
8006                 .external = external,
8007         };
8008         struct mlx5_flow_cb_ctx ctx = {
8009                 .dev = dev,
8010                 .error = error,
8011                 .data = &tt_prm,
8012         };
8013         struct mlx5_hlist_entry *entry;
8014         struct mlx5_flow_tbl_data_entry *tbl_data;
8015
8016         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8017         if (!entry) {
8018                 rte_flow_error_set(error, ENOMEM,
8019                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8020                                    "cannot get table");
8021                 return NULL;
8022         }
8023         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8024         return &tbl_data->tbl;
8025 }
8026
8027 void
8028 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8029                       struct mlx5_hlist_entry *entry)
8030 {
8031         struct mlx5_dev_ctx_shared *sh = list->ctx;
8032         struct mlx5_flow_tbl_data_entry *tbl_data =
8033                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8034
8035         MLX5_ASSERT(entry && sh);
8036         if (tbl_data->jump.action)
8037                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8038         if (tbl_data->tbl.obj)
8039                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8040         if (tbl_data->tunnel_offload && tbl_data->external) {
8041                 struct mlx5_hlist_entry *he;
8042                 struct mlx5_hlist *tunnel_grp_hash;
8043                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8044                 union tunnel_tbl_key tunnel_key = {
8045                         .tunnel_id = tbl_data->tunnel ?
8046                                         tbl_data->tunnel->tunnel_id : 0,
8047                         .group = tbl_data->group_id
8048                 };
8049                 union mlx5_flow_tbl_key table_key = {
8050                         .v64 = entry->key
8051                 };
8052                 uint32_t table_id = table_key.table_id;
8053
8054                 tunnel_grp_hash = tbl_data->tunnel ?
8055                                         tbl_data->tunnel->groups :
8056                                         thub->groups;
8057                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8058                 if (he) {
8059                         struct tunnel_tbl_entry *tte;
8060                         tte = container_of(he, typeof(*tte), hash);
8061                         MLX5_ASSERT(tte->flow_table == table_id);
8062                         mlx5_hlist_remove(tunnel_grp_hash, he);
8063                         mlx5_free(tte);
8064                 }
8065                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8066                                 tunnel_flow_tbl_to_id(table_id));
8067                 DRV_LOG(DEBUG,
8068                         "Table_id %#x tunnel %u group %u released.",
8069                         table_id,
8070                         tbl_data->tunnel ?
8071                         tbl_data->tunnel->tunnel_id : 0,
8072                         tbl_data->group_id);
8073         }
8074         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8075 }
8076
8077 /**
8078  * Release a flow table.
8079  *
8080  * @param[in] dev
8081  *   Pointer to rte_eth_dev structure.
8082  * @param[in] tbl
8083  *   Table resource to be released.
8084  *
8085  * @return
8086  *   Returns 0 if table was released, else return 1;
8087  */
8088 static int
8089 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
8090                              struct mlx5_flow_tbl_resource *tbl)
8091 {
8092         struct mlx5_priv *priv = dev->data->dev_private;
8093         struct mlx5_dev_ctx_shared *sh = priv->sh;
8094         struct mlx5_flow_tbl_data_entry *tbl_data =
8095                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8096
8097         if (!tbl)
8098                 return 0;
8099         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8100 }
8101
8102 /**
8103  * Register the flow matcher.
8104  *
8105  * @param[in, out] dev
8106  *   Pointer to rte_eth_dev structure.
8107  * @param[in, out] matcher
8108  *   Pointer to flow matcher.
8109  * @param[in, out] key
8110  *   Pointer to flow table key.
8111  * @parm[in, out] dev_flow
8112  *   Pointer to the dev_flow.
8113  * @param[out] error
8114  *   pointer to error structure.
8115  *
8116  * @return
8117  *   0 on success otherwise -errno and errno is set.
8118  */
8119 static int
8120 flow_dv_matcher_register(struct rte_eth_dev *dev,
8121                          struct mlx5_flow_dv_matcher *matcher,
8122                          union mlx5_flow_tbl_key *key,
8123                          struct mlx5_flow *dev_flow,
8124                          struct rte_flow_error *error)
8125 {
8126         struct mlx5_priv *priv = dev->data->dev_private;
8127         struct mlx5_dev_ctx_shared *sh = priv->sh;
8128         struct mlx5_flow_dv_matcher *cache_matcher;
8129         struct mlx5dv_flow_matcher_attr dv_attr = {
8130                 .type = IBV_FLOW_ATTR_NORMAL,
8131                 .match_mask = (void *)&matcher->mask,
8132         };
8133         struct mlx5_flow_tbl_resource *tbl;
8134         struct mlx5_flow_tbl_data_entry *tbl_data;
8135         int ret;
8136
8137         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8138                                        key->domain, false, NULL, 0, 0, error);
8139         if (!tbl)
8140                 return -rte_errno;      /* No need to refill the error info */
8141         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8142         /* Lookup from cache. */
8143         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
8144                 if (matcher->crc == cache_matcher->crc &&
8145                     matcher->priority == cache_matcher->priority &&
8146                     !memcmp((const void *)matcher->mask.buf,
8147                             (const void *)cache_matcher->mask.buf,
8148                             cache_matcher->mask.size)) {
8149                         DRV_LOG(DEBUG,
8150                                 "%s group %u priority %hd use %s "
8151                                 "matcher %p: refcnt %d++",
8152                                 key->domain ? "FDB" : "NIC", key->table_id,
8153                                 cache_matcher->priority,
8154                                 key->direction ? "tx" : "rx",
8155                                 (void *)cache_matcher,
8156                                 __atomic_load_n(&cache_matcher->refcnt,
8157                                                 __ATOMIC_RELAXED));
8158                         __atomic_fetch_add(&cache_matcher->refcnt, 1,
8159                                            __ATOMIC_RELAXED);
8160                         dev_flow->handle->dvh.matcher = cache_matcher;
8161                         /* old matcher should not make the table ref++. */
8162                         flow_dv_tbl_resource_release(dev, tbl);
8163                         return 0;
8164                 }
8165         }
8166         /* Register new matcher. */
8167         cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
8168                                     SOCKET_ID_ANY);
8169         if (!cache_matcher) {
8170                 flow_dv_tbl_resource_release(dev, tbl);
8171                 return rte_flow_error_set(error, ENOMEM,
8172                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8173                                           "cannot allocate matcher memory");
8174         }
8175         *cache_matcher = *matcher;
8176         dv_attr.match_criteria_enable =
8177                 flow_dv_matcher_enable(cache_matcher->mask.buf);
8178         dv_attr.priority = matcher->priority;
8179         if (key->direction)
8180                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8181         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
8182                                                &cache_matcher->matcher_object);
8183         if (ret) {
8184                 mlx5_free(cache_matcher);
8185 #ifdef HAVE_MLX5DV_DR
8186                 flow_dv_tbl_resource_release(dev, tbl);
8187 #endif
8188                 return rte_flow_error_set(error, ENOMEM,
8189                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8190                                           NULL, "cannot create matcher");
8191         }
8192         /* Save the table information */
8193         cache_matcher->tbl = tbl;
8194         /* only matcher ref++, table ref++ already done above in get API. */
8195         __atomic_store_n(&cache_matcher->refcnt, 1, __ATOMIC_RELAXED);
8196         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
8197         dev_flow->handle->dvh.matcher = cache_matcher;
8198         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
8199                 key->domain ? "FDB" : "NIC", key->table_id,
8200                 cache_matcher->priority,
8201                 key->direction ? "tx" : "rx", (void *)cache_matcher,
8202                 __atomic_load_n(&cache_matcher->refcnt, __ATOMIC_RELAXED));
8203         return 0;
8204 }
8205
8206 struct mlx5_hlist_entry *
8207 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8208 {
8209         struct mlx5_dev_ctx_shared *sh = list->ctx;
8210         struct rte_flow_error *error = ctx;
8211         struct mlx5_flow_dv_tag_resource *entry;
8212         uint32_t idx = 0;
8213         int ret;
8214
8215         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8216         if (!entry) {
8217                 rte_flow_error_set(error, ENOMEM,
8218                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8219                                    "cannot allocate resource memory");
8220                 return NULL;
8221         }
8222         entry->idx = idx;
8223         ret = mlx5_flow_os_create_flow_action_tag(key,
8224                                                   &entry->action);
8225         if (ret) {
8226                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8227                 rte_flow_error_set(error, ENOMEM,
8228                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8229                                    NULL, "cannot create action");
8230                 return NULL;
8231         }
8232         return &entry->entry;
8233 }
8234
8235 /**
8236  * Find existing tag resource or create and register a new one.
8237  *
8238  * @param dev[in, out]
8239  *   Pointer to rte_eth_dev structure.
8240  * @param[in, out] tag_be24
8241  *   Tag value in big endian then R-shift 8.
8242  * @parm[in, out] dev_flow
8243  *   Pointer to the dev_flow.
8244  * @param[out] error
8245  *   pointer to error structure.
8246  *
8247  * @return
8248  *   0 on success otherwise -errno and errno is set.
8249  */
8250 static int
8251 flow_dv_tag_resource_register
8252                         (struct rte_eth_dev *dev,
8253                          uint32_t tag_be24,
8254                          struct mlx5_flow *dev_flow,
8255                          struct rte_flow_error *error)
8256 {
8257         struct mlx5_priv *priv = dev->data->dev_private;
8258         struct mlx5_flow_dv_tag_resource *cache_resource;
8259         struct mlx5_hlist_entry *entry;
8260
8261         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8262         if (entry) {
8263                 cache_resource = container_of
8264                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8265                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8266                 dev_flow->dv.tag_resource = cache_resource;
8267                 return 0;
8268         }
8269         return -rte_errno;
8270 }
8271
8272 void
8273 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8274                       struct mlx5_hlist_entry *entry)
8275 {
8276         struct mlx5_dev_ctx_shared *sh = list->ctx;
8277         struct mlx5_flow_dv_tag_resource *tag =
8278                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8279
8280         MLX5_ASSERT(tag && sh && tag->action);
8281         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8282         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8283         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8284 }
8285
8286 /**
8287  * Release the tag.
8288  *
8289  * @param dev
8290  *   Pointer to Ethernet device.
8291  * @param tag_idx
8292  *   Tag index.
8293  *
8294  * @return
8295  *   1 while a reference on it exists, 0 when freed.
8296  */
8297 static int
8298 flow_dv_tag_release(struct rte_eth_dev *dev,
8299                     uint32_t tag_idx)
8300 {
8301         struct mlx5_priv *priv = dev->data->dev_private;
8302         struct mlx5_flow_dv_tag_resource *tag;
8303
8304         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8305         if (!tag)
8306                 return 0;
8307         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8308                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8309         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8310 }
8311
8312 /**
8313  * Translate port ID action to vport.
8314  *
8315  * @param[in] dev
8316  *   Pointer to rte_eth_dev structure.
8317  * @param[in] action
8318  *   Pointer to the port ID action.
8319  * @param[out] dst_port_id
8320  *   The target port ID.
8321  * @param[out] error
8322  *   Pointer to the error structure.
8323  *
8324  * @return
8325  *   0 on success, a negative errno value otherwise and rte_errno is set.
8326  */
8327 static int
8328 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8329                                  const struct rte_flow_action *action,
8330                                  uint32_t *dst_port_id,
8331                                  struct rte_flow_error *error)
8332 {
8333         uint32_t port;
8334         struct mlx5_priv *priv;
8335         const struct rte_flow_action_port_id *conf =
8336                         (const struct rte_flow_action_port_id *)action->conf;
8337
8338         port = conf->original ? dev->data->port_id : conf->id;
8339         priv = mlx5_port_to_eswitch_info(port, false);
8340         if (!priv)
8341                 return rte_flow_error_set(error, -rte_errno,
8342                                           RTE_FLOW_ERROR_TYPE_ACTION,
8343                                           NULL,
8344                                           "No eswitch info was found for port");
8345 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8346         /*
8347          * This parameter is transferred to
8348          * mlx5dv_dr_action_create_dest_ib_port().
8349          */
8350         *dst_port_id = priv->dev_port;
8351 #else
8352         /*
8353          * Legacy mode, no LAG configurations is supported.
8354          * This parameter is transferred to
8355          * mlx5dv_dr_action_create_dest_vport().
8356          */
8357         *dst_port_id = priv->vport_id;
8358 #endif
8359         return 0;
8360 }
8361
8362 /**
8363  * Create a counter with aging configuration.
8364  *
8365  * @param[in] dev
8366  *   Pointer to rte_eth_dev structure.
8367  * @param[out] count
8368  *   Pointer to the counter action configuration.
8369  * @param[in] age
8370  *   Pointer to the aging action configuration.
8371  *
8372  * @return
8373  *   Index to flow counter on success, 0 otherwise.
8374  */
8375 static uint32_t
8376 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8377                                 struct mlx5_flow *dev_flow,
8378                                 const struct rte_flow_action_count *count,
8379                                 const struct rte_flow_action_age *age)
8380 {
8381         uint32_t counter;
8382         struct mlx5_age_param *age_param;
8383
8384         if (count && count->shared)
8385                 counter = flow_dv_counter_get_shared(dev, count->id);
8386         else
8387                 counter = flow_dv_counter_alloc(dev, !!age);
8388         if (!counter || age == NULL)
8389                 return counter;
8390         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8391         age_param->context = age->context ? age->context :
8392                 (void *)(uintptr_t)(dev_flow->flow_idx);
8393         age_param->timeout = age->timeout;
8394         age_param->port_id = dev->data->port_id;
8395         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8396         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8397         return counter;
8398 }
8399 /**
8400  * Add Tx queue matcher
8401  *
8402  * @param[in] dev
8403  *   Pointer to the dev struct.
8404  * @param[in, out] matcher
8405  *   Flow matcher.
8406  * @param[in, out] key
8407  *   Flow matcher value.
8408  * @param[in] item
8409  *   Flow pattern to translate.
8410  * @param[in] inner
8411  *   Item is inner pattern.
8412  */
8413 static void
8414 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8415                                 void *matcher, void *key,
8416                                 const struct rte_flow_item *item)
8417 {
8418         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8419         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8420         void *misc_m =
8421                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8422         void *misc_v =
8423                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8424         struct mlx5_txq_ctrl *txq;
8425         uint32_t queue;
8426
8427
8428         queue_m = (const void *)item->mask;
8429         if (!queue_m)
8430                 return;
8431         queue_v = (const void *)item->spec;
8432         if (!queue_v)
8433                 return;
8434         txq = mlx5_txq_get(dev, queue_v->queue);
8435         if (!txq)
8436                 return;
8437         queue = txq->obj->sq->id;
8438         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8439         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8440                  queue & queue_m->queue);
8441         mlx5_txq_release(dev, queue_v->queue);
8442 }
8443
8444 /**
8445  * Set the hash fields according to the @p flow information.
8446  *
8447  * @param[in] dev_flow
8448  *   Pointer to the mlx5_flow.
8449  * @param[in] rss_desc
8450  *   Pointer to the mlx5_flow_rss_desc.
8451  */
8452 static void
8453 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8454                        struct mlx5_flow_rss_desc *rss_desc)
8455 {
8456         uint64_t items = dev_flow->handle->layers;
8457         int rss_inner = 0;
8458         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8459
8460         dev_flow->hash_fields = 0;
8461 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8462         if (rss_desc->level >= 2) {
8463                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8464                 rss_inner = 1;
8465         }
8466 #endif
8467         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8468             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8469                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8470                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8471                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8472                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8473                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8474                         else
8475                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8476                 }
8477         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8478                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8479                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8480                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8481                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8482                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8483                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8484                         else
8485                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8486                 }
8487         }
8488         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8489             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8490                 if (rss_types & ETH_RSS_UDP) {
8491                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8492                                 dev_flow->hash_fields |=
8493                                                 IBV_RX_HASH_SRC_PORT_UDP;
8494                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8495                                 dev_flow->hash_fields |=
8496                                                 IBV_RX_HASH_DST_PORT_UDP;
8497                         else
8498                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8499                 }
8500         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8501                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8502                 if (rss_types & ETH_RSS_TCP) {
8503                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8504                                 dev_flow->hash_fields |=
8505                                                 IBV_RX_HASH_SRC_PORT_TCP;
8506                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8507                                 dev_flow->hash_fields |=
8508                                                 IBV_RX_HASH_DST_PORT_TCP;
8509                         else
8510                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8511                 }
8512         }
8513 }
8514
8515 /**
8516  * Create an Rx Hash queue.
8517  *
8518  * @param dev
8519  *   Pointer to Ethernet device.
8520  * @param[in] dev_flow
8521  *   Pointer to the mlx5_flow.
8522  * @param[in] rss_desc
8523  *   Pointer to the mlx5_flow_rss_desc.
8524  * @param[out] hrxq_idx
8525  *   Hash Rx queue index.
8526  *
8527  * @return
8528  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8529  */
8530 static struct mlx5_hrxq *
8531 flow_dv_handle_rx_queue(struct rte_eth_dev *dev,
8532                         struct mlx5_flow *dev_flow,
8533                         struct mlx5_flow_rss_desc *rss_desc,
8534                         uint32_t *hrxq_idx)
8535 {
8536         struct mlx5_priv *priv = dev->data->dev_private;
8537         struct mlx5_flow_handle *dh = dev_flow->handle;
8538         struct mlx5_hrxq *hrxq;
8539
8540         MLX5_ASSERT(rss_desc->queue_num);
8541         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
8542                                   dev_flow->hash_fields,
8543                                   rss_desc->queue, rss_desc->queue_num);
8544         if (!*hrxq_idx) {
8545                 *hrxq_idx = mlx5_hrxq_new
8546                                 (dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN,
8547                                  dev_flow->hash_fields,
8548                                  rss_desc->queue, rss_desc->queue_num,
8549                                  !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL),
8550                                  false);
8551                 if (!*hrxq_idx)
8552                         return NULL;
8553         }
8554         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8555                               *hrxq_idx);
8556         return hrxq;
8557 }
8558
8559 /**
8560  * Find existing sample resource or create and register a new one.
8561  *
8562  * @param[in, out] dev
8563  *   Pointer to rte_eth_dev structure.
8564  * @param[in] attr
8565  *   Attributes of flow that includes this item.
8566  * @param[in] resource
8567  *   Pointer to sample resource.
8568  * @parm[in, out] dev_flow
8569  *   Pointer to the dev_flow.
8570  * @param[in, out] sample_dv_actions
8571  *   Pointer to sample actions list.
8572  * @param[out] error
8573  *   pointer to error structure.
8574  *
8575  * @return
8576  *   0 on success otherwise -errno and errno is set.
8577  */
8578 static int
8579 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8580                          const struct rte_flow_attr *attr,
8581                          struct mlx5_flow_dv_sample_resource *resource,
8582                          struct mlx5_flow *dev_flow,
8583                          void **sample_dv_actions,
8584                          struct rte_flow_error *error)
8585 {
8586         struct mlx5_flow_dv_sample_resource *cache_resource;
8587         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8588         struct mlx5_priv *priv = dev->data->dev_private;
8589         struct mlx5_dev_ctx_shared *sh = priv->sh;
8590         struct mlx5_flow_tbl_resource *tbl;
8591         uint32_t idx = 0;
8592         const uint32_t next_ft_step = 1;
8593         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8594
8595         /* Lookup a matching resource from cache. */
8596         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
8597                       idx, cache_resource, next) {
8598                 if (resource->ratio == cache_resource->ratio &&
8599                     resource->ft_type == cache_resource->ft_type &&
8600                     resource->ft_id == cache_resource->ft_id &&
8601                     resource->set_action == cache_resource->set_action &&
8602                     !memcmp((void *)&resource->sample_act,
8603                             (void *)&cache_resource->sample_act,
8604                             sizeof(struct mlx5_flow_sub_actions_list))) {
8605                         DRV_LOG(DEBUG, "sample resource %p: refcnt %d++",
8606                                 (void *)cache_resource,
8607                                 __atomic_load_n(&cache_resource->refcnt,
8608                                                 __ATOMIC_RELAXED));
8609                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8610                                            __ATOMIC_RELAXED);
8611                         dev_flow->handle->dvh.rix_sample = idx;
8612                         dev_flow->dv.sample_res = cache_resource;
8613                         return 0;
8614                 }
8615         }
8616         /* Register new sample resource. */
8617         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE],
8618                                        &dev_flow->handle->dvh.rix_sample);
8619         if (!cache_resource)
8620                 return rte_flow_error_set(error, ENOMEM,
8621                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8622                                           NULL,
8623                                           "cannot allocate resource memory");
8624         *cache_resource = *resource;
8625         /* Create normal path table level */
8626         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8627                                         attr->egress, attr->transfer,
8628                                         dev_flow->external, NULL, 0, 0, error);
8629         if (!tbl) {
8630                 rte_flow_error_set(error, ENOMEM,
8631                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8632                                           NULL,
8633                                           "fail to create normal path table "
8634                                           "for sample");
8635                 goto error;
8636         }
8637         cache_resource->normal_path_tbl = tbl;
8638         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8639                 cache_resource->default_miss =
8640                                 mlx5_glue->dr_create_flow_action_default_miss();
8641                 if (!cache_resource->default_miss) {
8642                         rte_flow_error_set(error, ENOMEM,
8643                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8644                                                 NULL,
8645                                                 "cannot create default miss "
8646                                                 "action");
8647                         goto error;
8648                 }
8649                 sample_dv_actions[resource->sample_act.actions_num++] =
8650                                                 cache_resource->default_miss;
8651         }
8652         /* Create a DR sample action */
8653         sampler_attr.sample_ratio = cache_resource->ratio;
8654         sampler_attr.default_next_table = tbl->obj;
8655         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8656         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8657                                                         &sample_dv_actions[0];
8658         sampler_attr.action = cache_resource->set_action;
8659         cache_resource->verbs_action =
8660                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8661         if (!cache_resource->verbs_action) {
8662                 rte_flow_error_set(error, ENOMEM,
8663                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8664                                         NULL, "cannot create sample action");
8665                 goto error;
8666         }
8667         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8668         ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list,
8669                      dev_flow->handle->dvh.rix_sample, cache_resource,
8670                      next);
8671         dev_flow->dv.sample_res = cache_resource;
8672         DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++",
8673                 (void *)cache_resource,
8674                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8675         return 0;
8676 error:
8677         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8678                 if (cache_resource->default_miss)
8679                         claim_zero(mlx5_glue->destroy_flow_action
8680                                 (cache_resource->default_miss));
8681         } else {
8682                 if (cache_resource->sample_idx.rix_hrxq &&
8683                     !mlx5_hrxq_release(dev,
8684                                 cache_resource->sample_idx.rix_hrxq))
8685                         cache_resource->sample_idx.rix_hrxq = 0;
8686                 if (cache_resource->sample_idx.rix_tag &&
8687                     !flow_dv_tag_release(dev,
8688                                 cache_resource->sample_idx.rix_tag))
8689                         cache_resource->sample_idx.rix_tag = 0;
8690                 if (cache_resource->sample_idx.cnt) {
8691                         flow_dv_counter_release(dev,
8692                                 cache_resource->sample_idx.cnt);
8693                         cache_resource->sample_idx.cnt = 0;
8694                 }
8695         }
8696         if (cache_resource->normal_path_tbl)
8697                 flow_dv_tbl_resource_release(dev,
8698                                 cache_resource->normal_path_tbl);
8699         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],
8700                                 dev_flow->handle->dvh.rix_sample);
8701         dev_flow->handle->dvh.rix_sample = 0;
8702         return -rte_errno;
8703 }
8704
8705 /**
8706  * Find existing destination array resource or create and register a new one.
8707  *
8708  * @param[in, out] dev
8709  *   Pointer to rte_eth_dev structure.
8710  * @param[in] attr
8711  *   Attributes of flow that includes this item.
8712  * @param[in] resource
8713  *   Pointer to destination array resource.
8714  * @parm[in, out] dev_flow
8715  *   Pointer to the dev_flow.
8716  * @param[out] error
8717  *   pointer to error structure.
8718  *
8719  * @return
8720  *   0 on success otherwise -errno and errno is set.
8721  */
8722 static int
8723 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8724                          const struct rte_flow_attr *attr,
8725                          struct mlx5_flow_dv_dest_array_resource *resource,
8726                          struct mlx5_flow *dev_flow,
8727                          struct rte_flow_error *error)
8728 {
8729         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8730         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8731         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8732         struct mlx5_priv *priv = dev->data->dev_private;
8733         struct mlx5_dev_ctx_shared *sh = priv->sh;
8734         struct mlx5_flow_sub_actions_list *sample_act;
8735         struct mlx5dv_dr_domain *domain;
8736         uint32_t idx = 0;
8737
8738         /* Lookup a matching resource from cache. */
8739         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8740                       sh->dest_array_list,
8741                       idx, cache_resource, next) {
8742                 if (resource->num_of_dest == cache_resource->num_of_dest &&
8743                     resource->ft_type == cache_resource->ft_type &&
8744                     !memcmp((void *)cache_resource->sample_act,
8745                             (void *)resource->sample_act,
8746                            (resource->num_of_dest *
8747                            sizeof(struct mlx5_flow_sub_actions_list)))) {
8748                         DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
8749                                 (void *)cache_resource,
8750                                 __atomic_load_n(&cache_resource->refcnt,
8751                                                 __ATOMIC_RELAXED));
8752                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8753                                            __ATOMIC_RELAXED);
8754                         dev_flow->handle->dvh.rix_dest_array = idx;
8755                         dev_flow->dv.dest_array_res = cache_resource;
8756                         return 0;
8757                 }
8758         }
8759         /* Register new destination array resource. */
8760         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8761                                        &dev_flow->handle->dvh.rix_dest_array);
8762         if (!cache_resource)
8763                 return rte_flow_error_set(error, ENOMEM,
8764                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8765                                           NULL,
8766                                           "cannot allocate resource memory");
8767         *cache_resource = *resource;
8768         if (attr->transfer)
8769                 domain = sh->fdb_domain;
8770         else if (attr->ingress)
8771                 domain = sh->rx_domain;
8772         else
8773                 domain = sh->tx_domain;
8774         for (idx = 0; idx < resource->num_of_dest; idx++) {
8775                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8776                                  mlx5_malloc(MLX5_MEM_ZERO,
8777                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8778                                  0, SOCKET_ID_ANY);
8779                 if (!dest_attr[idx]) {
8780                         rte_flow_error_set(error, ENOMEM,
8781                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8782                                            NULL,
8783                                            "cannot allocate resource memory");
8784                         goto error;
8785                 }
8786                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8787                 sample_act = &resource->sample_act[idx];
8788                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8789                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8790                 } else if (sample_act->action_flags ==
8791                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8792                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8793                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8794                         dest_attr[idx]->dest_reformat->reformat =
8795                                         sample_act->dr_encap_action;
8796                         dest_attr[idx]->dest_reformat->dest =
8797                                         sample_act->dr_port_id_action;
8798                 } else if (sample_act->action_flags ==
8799                            MLX5_FLOW_ACTION_PORT_ID) {
8800                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8801                 }
8802         }
8803         /* create a dest array actioin */
8804         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8805                                                 (domain,
8806                                                  cache_resource->num_of_dest,
8807                                                  dest_attr);
8808         if (!cache_resource->action) {
8809                 rte_flow_error_set(error, ENOMEM,
8810                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8811                                    NULL,
8812                                    "cannot create destination array action");
8813                 goto error;
8814         }
8815         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8816         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8817                      &sh->dest_array_list,
8818                      dev_flow->handle->dvh.rix_dest_array, cache_resource,
8819                      next);
8820         dev_flow->dv.dest_array_res = cache_resource;
8821         DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
8822                 (void *)cache_resource,
8823                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8824         for (idx = 0; idx < resource->num_of_dest; idx++)
8825                 mlx5_free(dest_attr[idx]);
8826         return 0;
8827 error:
8828         for (idx = 0; idx < resource->num_of_dest; idx++) {
8829                 struct mlx5_flow_sub_actions_idx *act_res =
8830                                         &cache_resource->sample_idx[idx];
8831                 if (act_res->rix_hrxq &&
8832                     !mlx5_hrxq_release(dev,
8833                                 act_res->rix_hrxq))
8834                         act_res->rix_hrxq = 0;
8835                 if (act_res->rix_encap_decap &&
8836                         !flow_dv_encap_decap_resource_release(dev,
8837                                 act_res->rix_encap_decap))
8838                         act_res->rix_encap_decap = 0;
8839                 if (act_res->rix_port_id_action &&
8840                         !flow_dv_port_id_action_resource_release(dev,
8841                                 act_res->rix_port_id_action))
8842                         act_res->rix_port_id_action = 0;
8843                 if (dest_attr[idx])
8844                         mlx5_free(dest_attr[idx]);
8845         }
8846
8847         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8848                                 dev_flow->handle->dvh.rix_dest_array);
8849         dev_flow->handle->dvh.rix_dest_array = 0;
8850         return -rte_errno;
8851 }
8852
8853 /**
8854  * Convert Sample action to DV specification.
8855  *
8856  * @param[in] dev
8857  *   Pointer to rte_eth_dev structure.
8858  * @param[in] action
8859  *   Pointer to action structure.
8860  * @param[in, out] dev_flow
8861  *   Pointer to the mlx5_flow.
8862  * @param[in] attr
8863  *   Pointer to the flow attributes.
8864  * @param[in, out] num_of_dest
8865  *   Pointer to the num of destination.
8866  * @param[in, out] sample_actions
8867  *   Pointer to sample actions list.
8868  * @param[in, out] res
8869  *   Pointer to sample resource.
8870  * @param[out] error
8871  *   Pointer to the error structure.
8872  *
8873  * @return
8874  *   0 on success, a negative errno value otherwise and rte_errno is set.
8875  */
8876 static int
8877 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8878                                 const struct rte_flow_action *action,
8879                                 struct mlx5_flow *dev_flow,
8880                                 const struct rte_flow_attr *attr,
8881                                 uint32_t *num_of_dest,
8882                                 void **sample_actions,
8883                                 struct mlx5_flow_dv_sample_resource *res,
8884                                 struct rte_flow_error *error)
8885 {
8886         struct mlx5_priv *priv = dev->data->dev_private;
8887         const struct rte_flow_action_sample *sample_action;
8888         const struct rte_flow_action *sub_actions;
8889         const struct rte_flow_action_queue *queue;
8890         struct mlx5_flow_sub_actions_list *sample_act;
8891         struct mlx5_flow_sub_actions_idx *sample_idx;
8892         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8893         struct mlx5_flow_rss_desc *rss_desc;
8894         uint64_t action_flags = 0;
8895
8896         MLX5_ASSERT(wks);
8897         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
8898         sample_act = &res->sample_act;
8899         sample_idx = &res->sample_idx;
8900         sample_action = (const struct rte_flow_action_sample *)action->conf;
8901         res->ratio = sample_action->ratio;
8902         sub_actions = sample_action->actions;
8903         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
8904                 int type = sub_actions->type;
8905                 uint32_t pre_rix = 0;
8906                 void *pre_r;
8907                 switch (type) {
8908                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8909                 {
8910                         struct mlx5_hrxq *hrxq;
8911                         uint32_t hrxq_idx;
8912
8913                         queue = sub_actions->conf;
8914                         rss_desc->queue_num = 1;
8915                         rss_desc->queue[0] = queue->index;
8916                         hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
8917                                         rss_desc, &hrxq_idx);
8918                         if (!hrxq)
8919                                 return rte_flow_error_set
8920                                         (error, rte_errno,
8921                                          RTE_FLOW_ERROR_TYPE_ACTION,
8922                                          NULL,
8923                                          "cannot create fate queue");
8924                         sample_act->dr_queue_action = hrxq->action;
8925                         sample_idx->rix_hrxq = hrxq_idx;
8926                         sample_actions[sample_act->actions_num++] =
8927                                                 hrxq->action;
8928                         (*num_of_dest)++;
8929                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8930                         if (action_flags & MLX5_FLOW_ACTION_MARK)
8931                                 dev_flow->handle->rix_hrxq = hrxq_idx;
8932                         dev_flow->handle->fate_action =
8933                                         MLX5_FLOW_FATE_QUEUE;
8934                         break;
8935                 }
8936                 case RTE_FLOW_ACTION_TYPE_MARK:
8937                 {
8938                         uint32_t tag_be = mlx5_flow_mark_set
8939                                 (((const struct rte_flow_action_mark *)
8940                                 (sub_actions->conf))->id);
8941
8942                         dev_flow->handle->mark = 1;
8943                         pre_rix = dev_flow->handle->dvh.rix_tag;
8944                         /* Save the mark resource before sample */
8945                         pre_r = dev_flow->dv.tag_resource;
8946                         if (flow_dv_tag_resource_register(dev, tag_be,
8947                                                   dev_flow, error))
8948                                 return -rte_errno;
8949                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8950                         sample_act->dr_tag_action =
8951                                 dev_flow->dv.tag_resource->action;
8952                         sample_idx->rix_tag =
8953                                 dev_flow->handle->dvh.rix_tag;
8954                         sample_actions[sample_act->actions_num++] =
8955                                                 sample_act->dr_tag_action;
8956                         /* Recover the mark resource after sample */
8957                         dev_flow->dv.tag_resource = pre_r;
8958                         dev_flow->handle->dvh.rix_tag = pre_rix;
8959                         action_flags |= MLX5_FLOW_ACTION_MARK;
8960                         break;
8961                 }
8962                 case RTE_FLOW_ACTION_TYPE_COUNT:
8963                 {
8964                         uint32_t counter;
8965
8966                         counter = flow_dv_translate_create_counter(dev,
8967                                         dev_flow, sub_actions->conf, 0);
8968                         if (!counter)
8969                                 return rte_flow_error_set
8970                                                 (error, rte_errno,
8971                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8972                                                  NULL,
8973                                                  "cannot create counter"
8974                                                  " object.");
8975                         sample_idx->cnt = counter;
8976                         sample_act->dr_cnt_action =
8977                                   (flow_dv_counter_get_by_idx(dev,
8978                                   counter, NULL))->action;
8979                         sample_actions[sample_act->actions_num++] =
8980                                                 sample_act->dr_cnt_action;
8981                         action_flags |= MLX5_FLOW_ACTION_COUNT;
8982                         break;
8983                 }
8984                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
8985                 {
8986                         struct mlx5_flow_dv_port_id_action_resource
8987                                         port_id_resource;
8988                         uint32_t port_id = 0;
8989
8990                         memset(&port_id_resource, 0, sizeof(port_id_resource));
8991                         /* Save the port id resource before sample */
8992                         pre_rix = dev_flow->handle->rix_port_id_action;
8993                         pre_r = dev_flow->dv.port_id_action;
8994                         if (flow_dv_translate_action_port_id(dev, sub_actions,
8995                                                              &port_id, error))
8996                                 return -rte_errno;
8997                         port_id_resource.port_id = port_id;
8998                         if (flow_dv_port_id_action_resource_register
8999                             (dev, &port_id_resource, dev_flow, error))
9000                                 return -rte_errno;
9001                         sample_act->dr_port_id_action =
9002                                 dev_flow->dv.port_id_action->action;
9003                         sample_idx->rix_port_id_action =
9004                                 dev_flow->handle->rix_port_id_action;
9005                         sample_actions[sample_act->actions_num++] =
9006                                                 sample_act->dr_port_id_action;
9007                         /* Recover the port id resource after sample */
9008                         dev_flow->dv.port_id_action = pre_r;
9009                         dev_flow->handle->rix_port_id_action = pre_rix;
9010                         (*num_of_dest)++;
9011                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9012                         break;
9013                 }
9014                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9015                         /* Save the encap resource before sample */
9016                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9017                         pre_r = dev_flow->dv.encap_decap;
9018                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9019                                                            dev_flow,
9020                                                            attr->transfer,
9021                                                            error))
9022                                 return -rte_errno;
9023                         sample_act->dr_encap_action =
9024                                 dev_flow->dv.encap_decap->action;
9025                         sample_idx->rix_encap_decap =
9026                                 dev_flow->handle->dvh.rix_encap_decap;
9027                         sample_actions[sample_act->actions_num++] =
9028                                                 sample_act->dr_encap_action;
9029                         /* Recover the encap resource after sample */
9030                         dev_flow->dv.encap_decap = pre_r;
9031                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9032                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9033                         break;
9034                 default:
9035                         return rte_flow_error_set(error, EINVAL,
9036                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9037                                 NULL,
9038                                 "Not support for sampler action");
9039                 }
9040         }
9041         sample_act->action_flags = action_flags;
9042         res->ft_id = dev_flow->dv.group;
9043         if (attr->transfer) {
9044                 union {
9045                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9046                         uint64_t set_action;
9047                 } action_ctx = { .set_action = 0 };
9048
9049                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9050                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9051                          MLX5_MODIFICATION_TYPE_SET);
9052                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9053                          MLX5_MODI_META_REG_C_0);
9054                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9055                          priv->vport_meta_tag);
9056                 res->set_action = action_ctx.set_action;
9057         } else if (attr->ingress) {
9058                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9059         }
9060         return 0;
9061 }
9062
9063 /**
9064  * Convert Sample action to DV specification.
9065  *
9066  * @param[in] dev
9067  *   Pointer to rte_eth_dev structure.
9068  * @param[in, out] dev_flow
9069  *   Pointer to the mlx5_flow.
9070  * @param[in] attr
9071  *   Pointer to the flow attributes.
9072  * @param[in] num_of_dest
9073  *   The num of destination.
9074  * @param[in, out] res
9075  *   Pointer to sample resource.
9076  * @param[in, out] mdest_res
9077  *   Pointer to destination array resource.
9078  * @param[in] sample_actions
9079  *   Pointer to sample path actions list.
9080  * @param[in] action_flags
9081  *   Holds the actions detected until now.
9082  * @param[out] error
9083  *   Pointer to the error structure.
9084  *
9085  * @return
9086  *   0 on success, a negative errno value otherwise and rte_errno is set.
9087  */
9088 static int
9089 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9090                              struct mlx5_flow *dev_flow,
9091                              const struct rte_flow_attr *attr,
9092                              uint32_t num_of_dest,
9093                              struct mlx5_flow_dv_sample_resource *res,
9094                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9095                              void **sample_actions,
9096                              uint64_t action_flags,
9097                              struct rte_flow_error *error)
9098 {
9099         /* update normal path action resource into last index of array */
9100         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9101         struct mlx5_flow_sub_actions_list *sample_act =
9102                                         &mdest_res->sample_act[dest_index];
9103         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9104         struct mlx5_flow_rss_desc *rss_desc;
9105         uint32_t normal_idx = 0;
9106         struct mlx5_hrxq *hrxq;
9107         uint32_t hrxq_idx;
9108
9109         MLX5_ASSERT(wks);
9110         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9111         if (num_of_dest > 1) {
9112                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9113                         /* Handle QP action for mirroring */
9114                         hrxq = flow_dv_handle_rx_queue(dev, dev_flow,
9115                                                        rss_desc, &hrxq_idx);
9116                         if (!hrxq)
9117                                 return rte_flow_error_set
9118                                      (error, rte_errno,
9119                                       RTE_FLOW_ERROR_TYPE_ACTION,
9120                                       NULL,
9121                                       "cannot create rx queue");
9122                         normal_idx++;
9123                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9124                         sample_act->dr_queue_action = hrxq->action;
9125                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9126                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9127                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9128                 }
9129                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9130                         normal_idx++;
9131                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9132                                 dev_flow->handle->dvh.rix_encap_decap;
9133                         sample_act->dr_encap_action =
9134                                 dev_flow->dv.encap_decap->action;
9135                 }
9136                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9137                         normal_idx++;
9138                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9139                                 dev_flow->handle->rix_port_id_action;
9140                         sample_act->dr_port_id_action =
9141                                 dev_flow->dv.port_id_action->action;
9142                 }
9143                 sample_act->actions_num = normal_idx;
9144                 /* update sample action resource into first index of array */
9145                 mdest_res->ft_type = res->ft_type;
9146                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9147                                 sizeof(struct mlx5_flow_sub_actions_idx));
9148                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9149                                 sizeof(struct mlx5_flow_sub_actions_list));
9150                 mdest_res->num_of_dest = num_of_dest;
9151                 if (flow_dv_dest_array_resource_register(dev, attr, mdest_res,
9152                                                          dev_flow, error))
9153                         return rte_flow_error_set(error, EINVAL,
9154                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9155                                                   NULL, "can't create sample "
9156                                                   "action");
9157         } else {
9158                 if (flow_dv_sample_resource_register(dev, attr, res, dev_flow,
9159                                                      sample_actions, error))
9160                         return rte_flow_error_set(error, EINVAL,
9161                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9162                                                   NULL,
9163                                                   "can't create sample action");
9164         }
9165         return 0;
9166 }
9167
9168 /**
9169  * Fill the flow with DV spec, lock free
9170  * (mutex should be acquired by caller).
9171  *
9172  * @param[in] dev
9173  *   Pointer to rte_eth_dev structure.
9174  * @param[in, out] dev_flow
9175  *   Pointer to the sub flow.
9176  * @param[in] attr
9177  *   Pointer to the flow attributes.
9178  * @param[in] items
9179  *   Pointer to the list of items.
9180  * @param[in] actions
9181  *   Pointer to the list of actions.
9182  * @param[out] error
9183  *   Pointer to the error structure.
9184  *
9185  * @return
9186  *   0 on success, a negative errno value otherwise and rte_errno is set.
9187  */
9188 static int
9189 __flow_dv_translate(struct rte_eth_dev *dev,
9190                     struct mlx5_flow *dev_flow,
9191                     const struct rte_flow_attr *attr,
9192                     const struct rte_flow_item items[],
9193                     const struct rte_flow_action actions[],
9194                     struct rte_flow_error *error)
9195 {
9196         struct mlx5_priv *priv = dev->data->dev_private;
9197         struct mlx5_dev_config *dev_conf = &priv->config;
9198         struct rte_flow *flow = dev_flow->flow;
9199         struct mlx5_flow_handle *handle = dev_flow->handle;
9200         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9201         struct mlx5_flow_rss_desc *rss_desc;
9202         uint64_t item_flags = 0;
9203         uint64_t last_item = 0;
9204         uint64_t action_flags = 0;
9205         uint64_t priority = attr->priority;
9206         struct mlx5_flow_dv_matcher matcher = {
9207                 .mask = {
9208                         .size = sizeof(matcher.mask.buf) -
9209                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9210                 },
9211         };
9212         int actions_n = 0;
9213         bool actions_end = false;
9214         union {
9215                 struct mlx5_flow_dv_modify_hdr_resource res;
9216                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9217                             sizeof(struct mlx5_modification_cmd) *
9218                             (MLX5_MAX_MODIFY_NUM + 1)];
9219         } mhdr_dummy;
9220         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9221         const struct rte_flow_action_count *count = NULL;
9222         const struct rte_flow_action_age *age = NULL;
9223         union flow_dv_attr flow_attr = { .attr = 0 };
9224         uint32_t tag_be;
9225         union mlx5_flow_tbl_key tbl_key;
9226         uint32_t modify_action_position = UINT32_MAX;
9227         void *match_mask = matcher.mask.buf;
9228         void *match_value = dev_flow->dv.value.buf;
9229         uint8_t next_protocol = 0xff;
9230         struct rte_vlan_hdr vlan = { 0 };
9231         struct mlx5_flow_dv_dest_array_resource mdest_res;
9232         struct mlx5_flow_dv_sample_resource sample_res;
9233         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9234         struct mlx5_flow_sub_actions_list *sample_act;
9235         uint32_t sample_act_pos = UINT32_MAX;
9236         uint32_t num_of_dest = 0;
9237         int tmp_actions_n = 0;
9238         uint32_t table;
9239         int ret = 0;
9240         const struct mlx5_flow_tunnel *tunnel;
9241         struct flow_grp_info grp_info = {
9242                 .external = !!dev_flow->external,
9243                 .transfer = !!attr->transfer,
9244                 .fdb_def_rule = !!priv->fdb_def_rule,
9245         };
9246
9247         MLX5_ASSERT(wks);
9248         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9249         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9250         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9251         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9252                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9253         /* update normal path action resource into last index of array */
9254         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9255         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9256                  flow_items_to_tunnel(items) :
9257                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9258                  flow_actions_to_tunnel(actions) :
9259                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9260         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9261                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9262         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9263                                 (dev, tunnel, attr, items, actions);
9264         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9265                                        grp_info, error);
9266         if (ret)
9267                 return ret;
9268         dev_flow->dv.group = table;
9269         if (attr->transfer)
9270                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9271         if (priority == MLX5_FLOW_PRIO_RSVD)
9272                 priority = dev_conf->flow_prio - 1;
9273         /* number of actions must be set to 0 in case of dirty stack. */
9274         mhdr_res->actions_num = 0;
9275         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9276                 /*
9277                  * do not add decap action if match rule drops packet
9278                  * HW rejects rules with decap & drop
9279                  */
9280                 bool add_decap = true;
9281                 const struct rte_flow_action *ptr = actions;
9282                 struct mlx5_flow_tbl_resource *tbl;
9283
9284                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9285                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9286                                 add_decap = false;
9287                                 break;
9288                         }
9289                 }
9290                 if (add_decap) {
9291                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9292                                                            attr->transfer,
9293                                                            error))
9294                                 return -rte_errno;
9295                         dev_flow->dv.actions[actions_n++] =
9296                                         dev_flow->dv.encap_decap->action;
9297                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9298                 }
9299                 /*
9300                  * bind table_id with <group, table> for tunnel match rule.
9301                  * Tunnel set rule establishes that bind in JUMP action handler.
9302                  * Required for scenario when application creates tunnel match
9303                  * rule before tunnel set rule.
9304                  */
9305                 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9306                                                attr->transfer,
9307                                                !!dev_flow->external, tunnel,
9308                                                attr->group, 0, error);
9309                 if (!tbl)
9310                         return rte_flow_error_set
9311                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9312                                actions, "cannot register tunnel group");
9313         }
9314         for (; !actions_end ; actions++) {
9315                 const struct rte_flow_action_queue *queue;
9316                 const struct rte_flow_action_rss *rss;
9317                 const struct rte_flow_action *action = actions;
9318                 const uint8_t *rss_key;
9319                 const struct rte_flow_action_meter *mtr;
9320                 struct mlx5_flow_tbl_resource *tbl;
9321                 uint32_t port_id = 0;
9322                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9323                 int action_type = actions->type;
9324                 const struct rte_flow_action *found_action = NULL;
9325                 struct mlx5_flow_meter *fm = NULL;
9326                 uint32_t jump_group = 0;
9327
9328                 if (!mlx5_flow_os_action_supported(action_type))
9329                         return rte_flow_error_set(error, ENOTSUP,
9330                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9331                                                   actions,
9332                                                   "action not supported");
9333                 switch (action_type) {
9334                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9335                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9336                         break;
9337                 case RTE_FLOW_ACTION_TYPE_VOID:
9338                         break;
9339                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9340                         if (flow_dv_translate_action_port_id(dev, action,
9341                                                              &port_id, error))
9342                                 return -rte_errno;
9343                         port_id_resource.port_id = port_id;
9344                         MLX5_ASSERT(!handle->rix_port_id_action);
9345                         if (flow_dv_port_id_action_resource_register
9346                             (dev, &port_id_resource, dev_flow, error))
9347                                 return -rte_errno;
9348                         dev_flow->dv.actions[actions_n++] =
9349                                         dev_flow->dv.port_id_action->action;
9350                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9351                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9352                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9353                         num_of_dest++;
9354                         break;
9355                 case RTE_FLOW_ACTION_TYPE_FLAG:
9356                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9357                         dev_flow->handle->mark = 1;
9358                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9359                                 struct rte_flow_action_mark mark = {
9360                                         .id = MLX5_FLOW_MARK_DEFAULT,
9361                                 };
9362
9363                                 if (flow_dv_convert_action_mark(dev, &mark,
9364                                                                 mhdr_res,
9365                                                                 error))
9366                                         return -rte_errno;
9367                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9368                                 break;
9369                         }
9370                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9371                         /*
9372                          * Only one FLAG or MARK is supported per device flow
9373                          * right now. So the pointer to the tag resource must be
9374                          * zero before the register process.
9375                          */
9376                         MLX5_ASSERT(!handle->dvh.rix_tag);
9377                         if (flow_dv_tag_resource_register(dev, tag_be,
9378                                                           dev_flow, error))
9379                                 return -rte_errno;
9380                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9381                         dev_flow->dv.actions[actions_n++] =
9382                                         dev_flow->dv.tag_resource->action;
9383                         break;
9384                 case RTE_FLOW_ACTION_TYPE_MARK:
9385                         action_flags |= MLX5_FLOW_ACTION_MARK;
9386                         dev_flow->handle->mark = 1;
9387                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9388                                 const struct rte_flow_action_mark *mark =
9389                                         (const struct rte_flow_action_mark *)
9390                                                 actions->conf;
9391
9392                                 if (flow_dv_convert_action_mark(dev, mark,
9393                                                                 mhdr_res,
9394                                                                 error))
9395                                         return -rte_errno;
9396                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9397                                 break;
9398                         }
9399                         /* Fall-through */
9400                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9401                         /* Legacy (non-extensive) MARK action. */
9402                         tag_be = mlx5_flow_mark_set
9403                               (((const struct rte_flow_action_mark *)
9404                                (actions->conf))->id);
9405                         MLX5_ASSERT(!handle->dvh.rix_tag);
9406                         if (flow_dv_tag_resource_register(dev, tag_be,
9407                                                           dev_flow, error))
9408                                 return -rte_errno;
9409                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9410                         dev_flow->dv.actions[actions_n++] =
9411                                         dev_flow->dv.tag_resource->action;
9412                         break;
9413                 case RTE_FLOW_ACTION_TYPE_SET_META:
9414                         if (flow_dv_convert_action_set_meta
9415                                 (dev, mhdr_res, attr,
9416                                  (const struct rte_flow_action_set_meta *)
9417                                   actions->conf, error))
9418                                 return -rte_errno;
9419                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9420                         break;
9421                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9422                         if (flow_dv_convert_action_set_tag
9423                                 (dev, mhdr_res,
9424                                  (const struct rte_flow_action_set_tag *)
9425                                   actions->conf, error))
9426                                 return -rte_errno;
9427                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9428                         break;
9429                 case RTE_FLOW_ACTION_TYPE_DROP:
9430                         action_flags |= MLX5_FLOW_ACTION_DROP;
9431                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9432                         break;
9433                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9434                         queue = actions->conf;
9435                         rss_desc->queue_num = 1;
9436                         rss_desc->queue[0] = queue->index;
9437                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9438                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9439                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9440                         num_of_dest++;
9441                         break;
9442                 case RTE_FLOW_ACTION_TYPE_RSS:
9443                         rss = actions->conf;
9444                         memcpy(rss_desc->queue, rss->queue,
9445                                rss->queue_num * sizeof(uint16_t));
9446                         rss_desc->queue_num = rss->queue_num;
9447                         /* NULL RSS key indicates default RSS key. */
9448                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9449                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9450                         /*
9451                          * rss->level and rss.types should be set in advance
9452                          * when expanding items for RSS.
9453                          */
9454                         action_flags |= MLX5_FLOW_ACTION_RSS;
9455                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9456                         break;
9457                 case RTE_FLOW_ACTION_TYPE_AGE:
9458                 case RTE_FLOW_ACTION_TYPE_COUNT:
9459                         if (!dev_conf->devx) {
9460                                 return rte_flow_error_set
9461                                               (error, ENOTSUP,
9462                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9463                                                NULL,
9464                                                "count action not supported");
9465                         }
9466                         /* Save information first, will apply later. */
9467                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9468                                 count = action->conf;
9469                         else
9470                                 age = action->conf;
9471                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9472                         break;
9473                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9474                         dev_flow->dv.actions[actions_n++] =
9475                                                 priv->sh->pop_vlan_action;
9476                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9477                         break;
9478                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9479                         if (!(action_flags &
9480                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9481                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9482                         vlan.eth_proto = rte_be_to_cpu_16
9483                              ((((const struct rte_flow_action_of_push_vlan *)
9484                                                    actions->conf)->ethertype));
9485                         found_action = mlx5_flow_find_action
9486                                         (actions + 1,
9487                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9488                         if (found_action)
9489                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9490                         found_action = mlx5_flow_find_action
9491                                         (actions + 1,
9492                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9493                         if (found_action)
9494                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9495                         if (flow_dv_create_action_push_vlan
9496                                             (dev, attr, &vlan, dev_flow, error))
9497                                 return -rte_errno;
9498                         dev_flow->dv.actions[actions_n++] =
9499                                         dev_flow->dv.push_vlan_res->action;
9500                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9501                         break;
9502                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9503                         /* of_vlan_push action handled this action */
9504                         MLX5_ASSERT(action_flags &
9505                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9506                         break;
9507                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9508                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9509                                 break;
9510                         flow_dev_get_vlan_info_from_items(items, &vlan);
9511                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9512                         /* If no VLAN push - this is a modify header action */
9513                         if (flow_dv_convert_action_modify_vlan_vid
9514                                                 (mhdr_res, actions, error))
9515                                 return -rte_errno;
9516                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9517                         break;
9518                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9519                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9520                         if (flow_dv_create_action_l2_encap(dev, actions,
9521                                                            dev_flow,
9522                                                            attr->transfer,
9523                                                            error))
9524                                 return -rte_errno;
9525                         dev_flow->dv.actions[actions_n++] =
9526                                         dev_flow->dv.encap_decap->action;
9527                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9528                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9529                                 sample_act->action_flags |=
9530                                                         MLX5_FLOW_ACTION_ENCAP;
9531                         break;
9532                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9533                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9534                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9535                                                            attr->transfer,
9536                                                            error))
9537                                 return -rte_errno;
9538                         dev_flow->dv.actions[actions_n++] =
9539                                         dev_flow->dv.encap_decap->action;
9540                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9541                         break;
9542                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9543                         /* Handle encap with preceding decap. */
9544                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9545                                 if (flow_dv_create_action_raw_encap
9546                                         (dev, actions, dev_flow, attr, error))
9547                                         return -rte_errno;
9548                                 dev_flow->dv.actions[actions_n++] =
9549                                         dev_flow->dv.encap_decap->action;
9550                         } else {
9551                                 /* Handle encap without preceding decap. */
9552                                 if (flow_dv_create_action_l2_encap
9553                                     (dev, actions, dev_flow, attr->transfer,
9554                                      error))
9555                                         return -rte_errno;
9556                                 dev_flow->dv.actions[actions_n++] =
9557                                         dev_flow->dv.encap_decap->action;
9558                         }
9559                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9560                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9561                                 sample_act->action_flags |=
9562                                                         MLX5_FLOW_ACTION_ENCAP;
9563                         break;
9564                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9565                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9566                                 ;
9567                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9568                                 if (flow_dv_create_action_l2_decap
9569                                     (dev, dev_flow, attr->transfer, error))
9570                                         return -rte_errno;
9571                                 dev_flow->dv.actions[actions_n++] =
9572                                         dev_flow->dv.encap_decap->action;
9573                         }
9574                         /* If decap is followed by encap, handle it at encap. */
9575                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9576                         break;
9577                 case RTE_FLOW_ACTION_TYPE_JUMP:
9578                         jump_group = ((const struct rte_flow_action_jump *)
9579                                                         action->conf)->group;
9580                         grp_info.std_tbl_fix = 0;
9581                         ret = mlx5_flow_group_to_table(dev, tunnel,
9582                                                        jump_group,
9583                                                        &table,
9584                                                        grp_info, error);
9585                         if (ret)
9586                                 return ret;
9587                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9588                                                        attr->transfer,
9589                                                        !!dev_flow->external,
9590                                                        tunnel, jump_group, 0,
9591                                                        error);
9592                         if (!tbl)
9593                                 return rte_flow_error_set
9594                                                 (error, errno,
9595                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9596                                                  NULL,
9597                                                  "cannot create jump action.");
9598                         if (flow_dv_jump_tbl_resource_register
9599                             (dev, tbl, dev_flow, error)) {
9600                                 flow_dv_tbl_resource_release(dev, tbl);
9601                                 return rte_flow_error_set
9602                                                 (error, errno,
9603                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9604                                                  NULL,
9605                                                  "cannot create jump action.");
9606                         }
9607                         dev_flow->dv.actions[actions_n++] =
9608                                         dev_flow->dv.jump->action;
9609                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9610                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9611                         break;
9612                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9613                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9614                         if (flow_dv_convert_action_modify_mac
9615                                         (mhdr_res, actions, error))
9616                                 return -rte_errno;
9617                         action_flags |= actions->type ==
9618                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9619                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
9620                                         MLX5_FLOW_ACTION_SET_MAC_DST;
9621                         break;
9622                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9623                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9624                         if (flow_dv_convert_action_modify_ipv4
9625                                         (mhdr_res, actions, error))
9626                                 return -rte_errno;
9627                         action_flags |= actions->type ==
9628                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9629                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
9630                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
9631                         break;
9632                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9633                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9634                         if (flow_dv_convert_action_modify_ipv6
9635                                         (mhdr_res, actions, error))
9636                                 return -rte_errno;
9637                         action_flags |= actions->type ==
9638                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9639                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
9640                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
9641                         break;
9642                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9643                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9644                         if (flow_dv_convert_action_modify_tp
9645                                         (mhdr_res, actions, items,
9646                                          &flow_attr, dev_flow, !!(action_flags &
9647                                          MLX5_FLOW_ACTION_DECAP), error))
9648                                 return -rte_errno;
9649                         action_flags |= actions->type ==
9650                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9651                                         MLX5_FLOW_ACTION_SET_TP_SRC :
9652                                         MLX5_FLOW_ACTION_SET_TP_DST;
9653                         break;
9654                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9655                         if (flow_dv_convert_action_modify_dec_ttl
9656                                         (mhdr_res, items, &flow_attr, dev_flow,
9657                                          !!(action_flags &
9658                                          MLX5_FLOW_ACTION_DECAP), error))
9659                                 return -rte_errno;
9660                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9661                         break;
9662                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9663                         if (flow_dv_convert_action_modify_ttl
9664                                         (mhdr_res, actions, items, &flow_attr,
9665                                          dev_flow, !!(action_flags &
9666                                          MLX5_FLOW_ACTION_DECAP), error))
9667                                 return -rte_errno;
9668                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9669                         break;
9670                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9671                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9672                         if (flow_dv_convert_action_modify_tcp_seq
9673                                         (mhdr_res, actions, error))
9674                                 return -rte_errno;
9675                         action_flags |= actions->type ==
9676                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9677                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
9678                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9679                         break;
9680
9681                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9682                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9683                         if (flow_dv_convert_action_modify_tcp_ack
9684                                         (mhdr_res, actions, error))
9685                                 return -rte_errno;
9686                         action_flags |= actions->type ==
9687                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9688                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
9689                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
9690                         break;
9691                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9692                         if (flow_dv_convert_action_set_reg
9693                                         (mhdr_res, actions, error))
9694                                 return -rte_errno;
9695                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9696                         break;
9697                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9698                         if (flow_dv_convert_action_copy_mreg
9699                                         (dev, mhdr_res, actions, error))
9700                                 return -rte_errno;
9701                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9702                         break;
9703                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9704                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9705                         dev_flow->handle->fate_action =
9706                                         MLX5_FLOW_FATE_DEFAULT_MISS;
9707                         break;
9708                 case RTE_FLOW_ACTION_TYPE_METER:
9709                         mtr = actions->conf;
9710                         if (!flow->meter) {
9711                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9712                                                             attr, error);
9713                                 if (!fm)
9714                                         return rte_flow_error_set(error,
9715                                                 rte_errno,
9716                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9717                                                 NULL,
9718                                                 "meter not found "
9719                                                 "or invalid parameters");
9720                                 flow->meter = fm->idx;
9721                         }
9722                         /* Set the meter action. */
9723                         if (!fm) {
9724                                 fm = mlx5_ipool_get(priv->sh->ipool
9725                                                 [MLX5_IPOOL_MTR], flow->meter);
9726                                 if (!fm)
9727                                         return rte_flow_error_set(error,
9728                                                 rte_errno,
9729                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9730                                                 NULL,
9731                                                 "meter not found "
9732                                                 "or invalid parameters");
9733                         }
9734                         dev_flow->dv.actions[actions_n++] =
9735                                 fm->mfts->meter_action;
9736                         action_flags |= MLX5_FLOW_ACTION_METER;
9737                         break;
9738                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9739                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9740                                                               actions, error))
9741                                 return -rte_errno;
9742                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9743                         break;
9744                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9745                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9746                                                               actions, error))
9747                                 return -rte_errno;
9748                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9749                         break;
9750                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9751                         sample_act_pos = actions_n;
9752                         ret = flow_dv_translate_action_sample(dev,
9753                                                               actions,
9754                                                               dev_flow, attr,
9755                                                               &num_of_dest,
9756                                                               sample_actions,
9757                                                               &sample_res,
9758                                                               error);
9759                         if (ret < 0)
9760                                 return ret;
9761                         actions_n++;
9762                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9763                         /* put encap action into group if work with port id */
9764                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9765                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9766                                 sample_act->action_flags |=
9767                                                         MLX5_FLOW_ACTION_ENCAP;
9768                         break;
9769                 case RTE_FLOW_ACTION_TYPE_END:
9770                         actions_end = true;
9771                         if (mhdr_res->actions_num) {
9772                                 /* create modify action if needed. */
9773                                 if (flow_dv_modify_hdr_resource_register
9774                                         (dev, mhdr_res, dev_flow, error))
9775                                         return -rte_errno;
9776                                 dev_flow->dv.actions[modify_action_position] =
9777                                         handle->dvh.modify_hdr->action;
9778                         }
9779                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9780                                 flow->counter =
9781                                         flow_dv_translate_create_counter(dev,
9782                                                 dev_flow, count, age);
9783
9784                                 if (!flow->counter)
9785                                         return rte_flow_error_set
9786                                                 (error, rte_errno,
9787                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9788                                                 NULL,
9789                                                 "cannot create counter"
9790                                                 " object.");
9791                                 dev_flow->dv.actions[actions_n] =
9792                                           (flow_dv_counter_get_by_idx(dev,
9793                                           flow->counter, NULL))->action;
9794                                 actions_n++;
9795                         }
9796                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9797                                 ret = flow_dv_create_action_sample(dev,
9798                                                           dev_flow, attr,
9799                                                           num_of_dest,
9800                                                           &sample_res,
9801                                                           &mdest_res,
9802                                                           sample_actions,
9803                                                           action_flags,
9804                                                           error);
9805                                 if (ret < 0)
9806                                         return rte_flow_error_set
9807                                                 (error, rte_errno,
9808                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9809                                                 NULL,
9810                                                 "cannot create sample action");
9811                                 if (num_of_dest > 1) {
9812                                         dev_flow->dv.actions[sample_act_pos] =
9813                                         dev_flow->dv.dest_array_res->action;
9814                                 } else {
9815                                         dev_flow->dv.actions[sample_act_pos] =
9816                                         dev_flow->dv.sample_res->verbs_action;
9817                                 }
9818                         }
9819                         break;
9820                 default:
9821                         break;
9822                 }
9823                 if (mhdr_res->actions_num &&
9824                     modify_action_position == UINT32_MAX)
9825                         modify_action_position = actions_n++;
9826         }
9827         /*
9828          * For multiple destination (sample action with ratio=1), the encap
9829          * action and port id action will be combined into group action.
9830          * So need remove the original these actions in the flow and only
9831          * use the sample action instead of.
9832          */
9833         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9834                 int i;
9835                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9836
9837                 for (i = 0; i < actions_n; i++) {
9838                         if ((sample_act->dr_encap_action &&
9839                                 sample_act->dr_encap_action ==
9840                                 dev_flow->dv.actions[i]) ||
9841                                 (sample_act->dr_port_id_action &&
9842                                 sample_act->dr_port_id_action ==
9843                                 dev_flow->dv.actions[i]))
9844                                 continue;
9845                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9846                 }
9847                 memcpy((void *)dev_flow->dv.actions,
9848                                 (void *)temp_actions,
9849                                 tmp_actions_n * sizeof(void *));
9850                 actions_n = tmp_actions_n;
9851         }
9852         dev_flow->dv.actions_n = actions_n;
9853         dev_flow->act_flags = action_flags;
9854         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9855                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9856                 int item_type = items->type;
9857
9858                 if (!mlx5_flow_os_item_supported(item_type))
9859                         return rte_flow_error_set(error, ENOTSUP,
9860                                                   RTE_FLOW_ERROR_TYPE_ITEM,
9861                                                   NULL, "item not supported");
9862                 switch (item_type) {
9863                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9864                         flow_dv_translate_item_port_id(dev, match_mask,
9865                                                        match_value, items);
9866                         last_item = MLX5_FLOW_ITEM_PORT_ID;
9867                         break;
9868                 case RTE_FLOW_ITEM_TYPE_ETH:
9869                         flow_dv_translate_item_eth(match_mask, match_value,
9870                                                    items, tunnel,
9871                                                    dev_flow->dv.group);
9872                         matcher.priority = action_flags &
9873                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
9874                                         !dev_flow->external ?
9875                                         MLX5_PRIORITY_MAP_L3 :
9876                                         MLX5_PRIORITY_MAP_L2;
9877                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9878                                              MLX5_FLOW_LAYER_OUTER_L2;
9879                         break;
9880                 case RTE_FLOW_ITEM_TYPE_VLAN:
9881                         flow_dv_translate_item_vlan(dev_flow,
9882                                                     match_mask, match_value,
9883                                                     items, tunnel,
9884                                                     dev_flow->dv.group);
9885                         matcher.priority = MLX5_PRIORITY_MAP_L2;
9886                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
9887                                               MLX5_FLOW_LAYER_INNER_VLAN) :
9888                                              (MLX5_FLOW_LAYER_OUTER_L2 |
9889                                               MLX5_FLOW_LAYER_OUTER_VLAN);
9890                         break;
9891                 case RTE_FLOW_ITEM_TYPE_IPV4:
9892                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9893                                                   &item_flags, &tunnel);
9894                         flow_dv_translate_item_ipv4(match_mask, match_value,
9895                                                     items, tunnel,
9896                                                     dev_flow->dv.group);
9897                         matcher.priority = MLX5_PRIORITY_MAP_L3;
9898                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
9899                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
9900                         if (items->mask != NULL &&
9901                             ((const struct rte_flow_item_ipv4 *)
9902                              items->mask)->hdr.next_proto_id) {
9903                                 next_protocol =
9904                                         ((const struct rte_flow_item_ipv4 *)
9905                                          (items->spec))->hdr.next_proto_id;
9906                                 next_protocol &=
9907                                         ((const struct rte_flow_item_ipv4 *)
9908                                          (items->mask))->hdr.next_proto_id;
9909                         } else {
9910                                 /* Reset for inner layer. */
9911                                 next_protocol = 0xff;
9912                         }
9913                         break;
9914                 case RTE_FLOW_ITEM_TYPE_IPV6:
9915                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9916                                                   &item_flags, &tunnel);
9917                         flow_dv_translate_item_ipv6(match_mask, match_value,
9918                                                     items, tunnel,
9919                                                     dev_flow->dv.group);
9920                         matcher.priority = MLX5_PRIORITY_MAP_L3;
9921                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
9922                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
9923                         if (items->mask != NULL &&
9924                             ((const struct rte_flow_item_ipv6 *)
9925                              items->mask)->hdr.proto) {
9926                                 next_protocol =
9927                                         ((const struct rte_flow_item_ipv6 *)
9928                                          items->spec)->hdr.proto;
9929                                 next_protocol &=
9930                                         ((const struct rte_flow_item_ipv6 *)
9931                                          items->mask)->hdr.proto;
9932                         } else {
9933                                 /* Reset for inner layer. */
9934                                 next_protocol = 0xff;
9935                         }
9936                         break;
9937                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
9938                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
9939                                                              match_value,
9940                                                              items, tunnel);
9941                         last_item = tunnel ?
9942                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
9943                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
9944                         if (items->mask != NULL &&
9945                             ((const struct rte_flow_item_ipv6_frag_ext *)
9946                              items->mask)->hdr.next_header) {
9947                                 next_protocol =
9948                                 ((const struct rte_flow_item_ipv6_frag_ext *)
9949                                  items->spec)->hdr.next_header;
9950                                 next_protocol &=
9951                                 ((const struct rte_flow_item_ipv6_frag_ext *)
9952                                  items->mask)->hdr.next_header;
9953                         } else {
9954                                 /* Reset for inner layer. */
9955                                 next_protocol = 0xff;
9956                         }
9957                         break;
9958                 case RTE_FLOW_ITEM_TYPE_TCP:
9959                         flow_dv_translate_item_tcp(match_mask, match_value,
9960                                                    items, tunnel);
9961                         matcher.priority = MLX5_PRIORITY_MAP_L4;
9962                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
9963                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
9964                         break;
9965                 case RTE_FLOW_ITEM_TYPE_UDP:
9966                         flow_dv_translate_item_udp(match_mask, match_value,
9967                                                    items, tunnel);
9968                         matcher.priority = MLX5_PRIORITY_MAP_L4;
9969                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
9970                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
9971                         break;
9972                 case RTE_FLOW_ITEM_TYPE_GRE:
9973                         flow_dv_translate_item_gre(match_mask, match_value,
9974                                                    items, tunnel);
9975                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
9976                         last_item = MLX5_FLOW_LAYER_GRE;
9977                         break;
9978                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
9979                         flow_dv_translate_item_gre_key(match_mask,
9980                                                        match_value, items);
9981                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
9982                         break;
9983                 case RTE_FLOW_ITEM_TYPE_NVGRE:
9984                         flow_dv_translate_item_nvgre(match_mask, match_value,
9985                                                      items, tunnel);
9986                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
9987                         last_item = MLX5_FLOW_LAYER_GRE;
9988                         break;
9989                 case RTE_FLOW_ITEM_TYPE_VXLAN:
9990                         flow_dv_translate_item_vxlan(match_mask, match_value,
9991                                                      items, tunnel);
9992                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
9993                         last_item = MLX5_FLOW_LAYER_VXLAN;
9994                         break;
9995                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
9996                         flow_dv_translate_item_vxlan_gpe(match_mask,
9997                                                          match_value, items,
9998                                                          tunnel);
9999                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10000                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10001                         break;
10002                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10003                         flow_dv_translate_item_geneve(match_mask, match_value,
10004                                                       items, tunnel);
10005                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10006                         last_item = MLX5_FLOW_LAYER_GENEVE;
10007                         break;
10008                 case RTE_FLOW_ITEM_TYPE_MPLS:
10009                         flow_dv_translate_item_mpls(match_mask, match_value,
10010                                                     items, last_item, tunnel);
10011                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10012                         last_item = MLX5_FLOW_LAYER_MPLS;
10013                         break;
10014                 case RTE_FLOW_ITEM_TYPE_MARK:
10015                         flow_dv_translate_item_mark(dev, match_mask,
10016                                                     match_value, items);
10017                         last_item = MLX5_FLOW_ITEM_MARK;
10018                         break;
10019                 case RTE_FLOW_ITEM_TYPE_META:
10020                         flow_dv_translate_item_meta(dev, match_mask,
10021                                                     match_value, attr, items);
10022                         last_item = MLX5_FLOW_ITEM_METADATA;
10023                         break;
10024                 case RTE_FLOW_ITEM_TYPE_ICMP:
10025                         flow_dv_translate_item_icmp(match_mask, match_value,
10026                                                     items, tunnel);
10027                         last_item = MLX5_FLOW_LAYER_ICMP;
10028                         break;
10029                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10030                         flow_dv_translate_item_icmp6(match_mask, match_value,
10031                                                       items, tunnel);
10032                         last_item = MLX5_FLOW_LAYER_ICMP6;
10033                         break;
10034                 case RTE_FLOW_ITEM_TYPE_TAG:
10035                         flow_dv_translate_item_tag(dev, match_mask,
10036                                                    match_value, items);
10037                         last_item = MLX5_FLOW_ITEM_TAG;
10038                         break;
10039                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10040                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10041                                                         match_value, items);
10042                         last_item = MLX5_FLOW_ITEM_TAG;
10043                         break;
10044                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10045                         flow_dv_translate_item_tx_queue(dev, match_mask,
10046                                                         match_value,
10047                                                         items);
10048                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10049                         break;
10050                 case RTE_FLOW_ITEM_TYPE_GTP:
10051                         flow_dv_translate_item_gtp(match_mask, match_value,
10052                                                    items, tunnel);
10053                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10054                         last_item = MLX5_FLOW_LAYER_GTP;
10055                         break;
10056                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10057                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10058                                 /* Create it only the first time to be used. */
10059                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10060                                 if (ret)
10061                                         return rte_flow_error_set
10062                                                 (error, -ret,
10063                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10064                                                 NULL,
10065                                                 "cannot create eCPRI parser");
10066                         }
10067                         /* Adjust the length matcher and device flow value. */
10068                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10069                         dev_flow->dv.value.size =
10070                                         MLX5_ST_SZ_BYTES(fte_match_param);
10071                         flow_dv_translate_item_ecpri(dev, match_mask,
10072                                                      match_value, items);
10073                         /* No other protocol should follow eCPRI layer. */
10074                         last_item = MLX5_FLOW_LAYER_ECPRI;
10075                         break;
10076                 default:
10077                         break;
10078                 }
10079                 item_flags |= last_item;
10080         }
10081         /*
10082          * When E-Switch mode is enabled, we have two cases where we need to
10083          * set the source port manually.
10084          * The first one, is in case of Nic steering rule, and the second is
10085          * E-Switch rule where no port_id item was found. In both cases
10086          * the source port is set according the current port in use.
10087          */
10088         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10089             (priv->representor || priv->master)) {
10090                 if (flow_dv_translate_item_port_id(dev, match_mask,
10091                                                    match_value, NULL))
10092                         return -rte_errno;
10093         }
10094 #ifdef RTE_LIBRTE_MLX5_DEBUG
10095         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10096                                               dev_flow->dv.value.buf));
10097 #endif
10098         /*
10099          * Layers may be already initialized from prefix flow if this dev_flow
10100          * is the suffix flow.
10101          */
10102         handle->layers |= item_flags;
10103         if (action_flags & MLX5_FLOW_ACTION_RSS)
10104                 flow_dv_hashfields_set(dev_flow, rss_desc);
10105         /* Register matcher. */
10106         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10107                                     matcher.mask.size);
10108         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10109                                                      matcher.priority);
10110         /* reserved field no needs to be set to 0 here. */
10111         tbl_key.domain = attr->transfer;
10112         tbl_key.direction = attr->egress;
10113         tbl_key.table_id = dev_flow->dv.group;
10114         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10115                 return -rte_errno;
10116         return 0;
10117 }
10118
10119 /**
10120  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10121  * and tunnel.
10122  *
10123  * @param[in, out] action
10124  *   Shred RSS action holding hash RX queue objects.
10125  * @param[in] hash_fields
10126  *   Defines combination of packet fields to participate in RX hash.
10127  * @param[in] tunnel
10128  *   Tunnel type
10129  * @param[in] hrxq_idx
10130  *   Hash RX queue index to set.
10131  *
10132  * @return
10133  *   0 on success, otherwise negative errno value.
10134  */
10135 static int
10136 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10137                               const uint64_t hash_fields,
10138                               const int tunnel,
10139                               uint32_t hrxq_idx)
10140 {
10141         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10142
10143         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10144         case MLX5_RSS_HASH_IPV4:
10145                 hrxqs[0] = hrxq_idx;
10146                 return 0;
10147         case MLX5_RSS_HASH_IPV4_TCP:
10148                 hrxqs[1] = hrxq_idx;
10149                 return 0;
10150         case MLX5_RSS_HASH_IPV4_UDP:
10151                 hrxqs[2] = hrxq_idx;
10152                 return 0;
10153         case MLX5_RSS_HASH_IPV6:
10154                 hrxqs[3] = hrxq_idx;
10155                 return 0;
10156         case MLX5_RSS_HASH_IPV6_TCP:
10157                 hrxqs[4] = hrxq_idx;
10158                 return 0;
10159         case MLX5_RSS_HASH_IPV6_UDP:
10160                 hrxqs[5] = hrxq_idx;
10161                 return 0;
10162         case MLX5_RSS_HASH_NONE:
10163                 hrxqs[6] = hrxq_idx;
10164                 return 0;
10165         default:
10166                 return -1;
10167         }
10168 }
10169
10170 /**
10171  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10172  * and tunnel.
10173  *
10174  * @param[in] action
10175  *   Shred RSS action holding hash RX queue objects.
10176  * @param[in] hash_fields
10177  *   Defines combination of packet fields to participate in RX hash.
10178  * @param[in] tunnel
10179  *   Tunnel type
10180  *
10181  * @return
10182  *   Valid hash RX queue index, otherwise 0.
10183  */
10184 static uint32_t
10185 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10186                                  const uint64_t hash_fields,
10187                                  const int tunnel)
10188 {
10189         const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10190
10191         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10192         case MLX5_RSS_HASH_IPV4:
10193                 return hrxqs[0];
10194         case MLX5_RSS_HASH_IPV4_TCP:
10195                 return hrxqs[1];
10196         case MLX5_RSS_HASH_IPV4_UDP:
10197                 return hrxqs[2];
10198         case MLX5_RSS_HASH_IPV6:
10199                 return hrxqs[3];
10200         case MLX5_RSS_HASH_IPV6_TCP:
10201                 return hrxqs[4];
10202         case MLX5_RSS_HASH_IPV6_UDP:
10203                 return hrxqs[5];
10204         case MLX5_RSS_HASH_NONE:
10205                 return hrxqs[6];
10206         default:
10207                 return 0;
10208         }
10209 }
10210
10211 /**
10212  * Retrieves hash RX queue suitable for the *flow*.
10213  * If shared action configured for *flow* suitable hash RX queue will be
10214  * retrieved from attached shared action.
10215  *
10216  * @param[in] flow
10217  *   Shred RSS action holding hash RX queue objects.
10218  * @param[in] dev_flow
10219  *   Pointer to the sub flow.
10220  * @param[out] hrxq
10221  *   Pointer to retrieved hash RX queue object.
10222  *
10223  * @return
10224  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10225  */
10226 static uint32_t
10227 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10228                            struct mlx5_flow *dev_flow,
10229                            struct mlx5_hrxq **hrxq)
10230 {
10231         struct mlx5_priv *priv = dev->data->dev_private;
10232         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10233         uint32_t hrxq_idx;
10234
10235         if (flow->shared_rss) {
10236                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10237                                 (flow->shared_rss, dev_flow->hash_fields,
10238                                  !!(dev_flow->handle->layers &
10239                                     MLX5_FLOW_LAYER_TUNNEL));
10240                 if (hrxq_idx) {
10241                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10242                                                hrxq_idx);
10243                         __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10244                                            __ATOMIC_RELAXED);
10245                 }
10246         } else {
10247                 struct mlx5_flow_rss_desc *rss_desc =
10248                                 &wks->rss_desc[!!wks->flow_nested_idx];
10249
10250                 MLX5_ASSERT(rss_desc->queue_num);
10251                 hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
10252                                          MLX5_RSS_HASH_KEY_LEN,
10253                                          dev_flow->hash_fields,
10254                                          rss_desc->queue, rss_desc->queue_num);
10255                 if (!hrxq_idx) {
10256                         hrxq_idx = mlx5_hrxq_new(dev,
10257                                                  rss_desc->key,
10258                                                  MLX5_RSS_HASH_KEY_LEN,
10259                                                  dev_flow->hash_fields,
10260                                                  rss_desc->queue,
10261                                                  rss_desc->queue_num,
10262                                                  !!(dev_flow->handle->layers &
10263                                                  MLX5_FLOW_LAYER_TUNNEL),
10264                                                  false);
10265                 }
10266                 *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10267                                        hrxq_idx);
10268         }
10269         return hrxq_idx;
10270 }
10271
10272 /**
10273  * Apply the flow to the NIC, lock free,
10274  * (mutex should be acquired by caller).
10275  *
10276  * @param[in] dev
10277  *   Pointer to the Ethernet device structure.
10278  * @param[in, out] flow
10279  *   Pointer to flow structure.
10280  * @param[out] error
10281  *   Pointer to error structure.
10282  *
10283  * @return
10284  *   0 on success, a negative errno value otherwise and rte_errno is set.
10285  */
10286 static int
10287 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10288                 struct rte_flow_error *error)
10289 {
10290         struct mlx5_flow_dv_workspace *dv;
10291         struct mlx5_flow_handle *dh;
10292         struct mlx5_flow_handle_dv *dv_h;
10293         struct mlx5_flow *dev_flow;
10294         struct mlx5_priv *priv = dev->data->dev_private;
10295         uint32_t handle_idx;
10296         int n;
10297         int err;
10298         int idx;
10299         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10300
10301         MLX5_ASSERT(wks);
10302         for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10303                 dev_flow = &wks->flows[idx];
10304                 dv = &dev_flow->dv;
10305                 dh = dev_flow->handle;
10306                 dv_h = &dh->dvh;
10307                 n = dv->actions_n;
10308                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10309                         if (dv->transfer) {
10310                                 dv->actions[n++] = priv->sh->esw_drop_action;
10311                         } else {
10312                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10313                                 dv->actions[n++] =
10314                                                 priv->drop_queue.hrxq->action;
10315                         }
10316                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10317                            !dv_h->rix_sample && !dv_h->rix_dest_array) {
10318                         struct mlx5_hrxq *hrxq = NULL;
10319                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10320                                                 (dev, flow, dev_flow, &hrxq);
10321
10322                         if (!hrxq) {
10323                                 rte_flow_error_set
10324                                         (error, rte_errno,
10325                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10326                                          "cannot get hash queue");
10327                                 goto error;
10328                         }
10329                         dh->rix_hrxq = hrxq_idx;
10330                         dv->actions[n++] = hrxq->action;
10331                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10332                         if (!priv->sh->default_miss_action) {
10333                                 rte_flow_error_set
10334                                         (error, rte_errno,
10335                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10336                                          "default miss action not be created.");
10337                                 goto error;
10338                         }
10339                         dv->actions[n++] = priv->sh->default_miss_action;
10340                 }
10341                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10342                                                (void *)&dv->value, n,
10343                                                dv->actions, &dh->drv_flow);
10344                 if (err) {
10345                         rte_flow_error_set(error, errno,
10346                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10347                                            NULL,
10348                                            "hardware refuses to create flow");
10349                         goto error;
10350                 }
10351                 if (priv->vmwa_context &&
10352                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10353                         /*
10354                          * The rule contains the VLAN pattern.
10355                          * For VF we are going to create VLAN
10356                          * interface to make hypervisor set correct
10357                          * e-Switch vport context.
10358                          */
10359                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10360                 }
10361         }
10362         return 0;
10363 error:
10364         err = rte_errno; /* Save rte_errno before cleanup. */
10365         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10366                        handle_idx, dh, next) {
10367                 /* hrxq is union, don't clear it if the flag is not set. */
10368                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10369                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10370                         dh->rix_hrxq = 0;
10371                 }
10372                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10373                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10374         }
10375         rte_errno = err; /* Restore rte_errno. */
10376         return -rte_errno;
10377 }
10378
10379 /**
10380  * Release the flow matcher.
10381  *
10382  * @param dev
10383  *   Pointer to Ethernet device.
10384  * @param handle
10385  *   Pointer to mlx5_flow_handle.
10386  *
10387  * @return
10388  *   1 while a reference on it exists, 0 when freed.
10389  */
10390 static int
10391 flow_dv_matcher_release(struct rte_eth_dev *dev,
10392                         struct mlx5_flow_handle *handle)
10393 {
10394         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10395
10396         MLX5_ASSERT(matcher->matcher_object);
10397         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
10398                 dev->data->port_id, (void *)matcher,
10399                 __atomic_load_n(&matcher->refcnt, __ATOMIC_RELAXED));
10400         if (__atomic_sub_fetch(&matcher->refcnt, 1, __ATOMIC_RELAXED) == 0) {
10401                 claim_zero(mlx5_flow_os_destroy_flow_matcher
10402                            (matcher->matcher_object));
10403                 LIST_REMOVE(matcher, next);
10404                 /* table ref-- in release interface. */
10405                 flow_dv_tbl_resource_release(dev, matcher->tbl);
10406                 mlx5_free(matcher);
10407                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
10408                         dev->data->port_id, (void *)matcher);
10409                 return 0;
10410         }
10411         return 1;
10412 }
10413
10414 /**
10415  * Release an encap/decap resource.
10416  *
10417  * @param dev
10418  *   Pointer to Ethernet device.
10419  * @param encap_decap_idx
10420  *   Index of encap decap resource.
10421  *
10422  * @return
10423  *   1 while a reference on it exists, 0 when freed.
10424  */
10425 static int
10426 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10427                                      uint32_t encap_decap_idx)
10428 {
10429         struct mlx5_priv *priv = dev->data->dev_private;
10430         uint32_t idx = encap_decap_idx;
10431         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10432
10433         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10434                          idx);
10435         if (!cache_resource)
10436                 return 0;
10437         MLX5_ASSERT(cache_resource->action);
10438         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
10439                 (void *)cache_resource,
10440                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10441         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10442                                __ATOMIC_RELAXED) == 0) {
10443                 claim_zero(mlx5_flow_os_destroy_flow_action
10444                                                 (cache_resource->action));
10445                 mlx5_hlist_remove(priv->sh->encaps_decaps,
10446                                   &cache_resource->entry);
10447                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
10448                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
10449                         (void *)cache_resource);
10450                 return 0;
10451         }
10452         return 1;
10453 }
10454
10455 /**
10456  * Release an jump to table action resource.
10457  *
10458  * @param dev
10459  *   Pointer to Ethernet device.
10460  * @param handle
10461  *   Pointer to mlx5_flow_handle.
10462  *
10463  * @return
10464  *   1 while a reference on it exists, 0 when freed.
10465  */
10466 static int
10467 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10468                                   struct mlx5_flow_handle *handle)
10469 {
10470         struct mlx5_priv *priv = dev->data->dev_private;
10471         struct mlx5_flow_tbl_data_entry *tbl_data;
10472
10473         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10474                              handle->rix_jump);
10475         if (!tbl_data)
10476                 return 0;
10477         return flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
10478 }
10479
10480 void
10481 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10482                          struct mlx5_hlist_entry *entry)
10483 {
10484         struct mlx5_flow_dv_modify_hdr_resource *res =
10485                 container_of(entry, typeof(*res), entry);
10486
10487         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10488         mlx5_free(entry);
10489 }
10490
10491 /**
10492  * Release a modify-header resource.
10493  *
10494  * @param dev
10495  *   Pointer to Ethernet device.
10496  * @param handle
10497  *   Pointer to mlx5_flow_handle.
10498  *
10499  * @return
10500  *   1 while a reference on it exists, 0 when freed.
10501  */
10502 static int
10503 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10504                                     struct mlx5_flow_handle *handle)
10505 {
10506         struct mlx5_priv *priv = dev->data->dev_private;
10507         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10508
10509         MLX5_ASSERT(entry->action);
10510         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10511 }
10512
10513 /**
10514  * Release port ID action resource.
10515  *
10516  * @param dev
10517  *   Pointer to Ethernet device.
10518  * @param handle
10519  *   Pointer to mlx5_flow_handle.
10520  *
10521  * @return
10522  *   1 while a reference on it exists, 0 when freed.
10523  */
10524 static int
10525 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10526                                         uint32_t port_id)
10527 {
10528         struct mlx5_priv *priv = dev->data->dev_private;
10529         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
10530         uint32_t idx = port_id;
10531
10532         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
10533                                         idx);
10534         if (!cache_resource)
10535                 return 0;
10536         MLX5_ASSERT(cache_resource->action);
10537         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
10538                 (void *)cache_resource,
10539                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10540         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10541                                __ATOMIC_RELAXED) == 0) {
10542                 claim_zero(mlx5_flow_os_destroy_flow_action
10543                                                 (cache_resource->action));
10544                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
10545                              &priv->sh->port_id_action_list, idx,
10546                              cache_resource, next);
10547                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
10548                 DRV_LOG(DEBUG, "port id action resource %p: removed",
10549                         (void *)cache_resource);
10550                 return 0;
10551         }
10552         return 1;
10553 }
10554
10555 /**
10556  * Release push vlan action resource.
10557  *
10558  * @param dev
10559  *   Pointer to Ethernet device.
10560  * @param handle
10561  *   Pointer to mlx5_flow_handle.
10562  *
10563  * @return
10564  *   1 while a reference on it exists, 0 when freed.
10565  */
10566 static int
10567 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10568                                           struct mlx5_flow_handle *handle)
10569 {
10570         struct mlx5_priv *priv = dev->data->dev_private;
10571         uint32_t idx = handle->dvh.rix_push_vlan;
10572         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
10573
10574         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
10575                                         idx);
10576         if (!cache_resource)
10577                 return 0;
10578         MLX5_ASSERT(cache_resource->action);
10579         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
10580                 (void *)cache_resource,
10581                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10582         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10583                                __ATOMIC_RELAXED) == 0) {
10584                 claim_zero(mlx5_flow_os_destroy_flow_action
10585                                                 (cache_resource->action));
10586                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
10587                              &priv->sh->push_vlan_action_list, idx,
10588                              cache_resource, next);
10589                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10590                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
10591                         (void *)cache_resource);
10592                 return 0;
10593         }
10594         return 1;
10595 }
10596
10597 /**
10598  * Release the fate resource.
10599  *
10600  * @param dev
10601  *   Pointer to Ethernet device.
10602  * @param handle
10603  *   Pointer to mlx5_flow_handle.
10604  */
10605 static void
10606 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10607                                struct mlx5_flow_handle *handle)
10608 {
10609         if (!handle->rix_fate)
10610                 return;
10611         switch (handle->fate_action) {
10612         case MLX5_FLOW_FATE_QUEUE:
10613                 mlx5_hrxq_release(dev, handle->rix_hrxq);
10614                 break;
10615         case MLX5_FLOW_FATE_JUMP:
10616                 flow_dv_jump_tbl_resource_release(dev, handle);
10617                 break;
10618         case MLX5_FLOW_FATE_PORT_ID:
10619                 flow_dv_port_id_action_resource_release(dev,
10620                                 handle->rix_port_id_action);
10621                 break;
10622         default:
10623                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10624                 break;
10625         }
10626         handle->rix_fate = 0;
10627 }
10628
10629 /**
10630  * Release an sample resource.
10631  *
10632  * @param dev
10633  *   Pointer to Ethernet device.
10634  * @param handle
10635  *   Pointer to mlx5_flow_handle.
10636  *
10637  * @return
10638  *   1 while a reference on it exists, 0 when freed.
10639  */
10640 static int
10641 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10642                                      struct mlx5_flow_handle *handle)
10643 {
10644         struct mlx5_priv *priv = dev->data->dev_private;
10645         uint32_t idx = handle->dvh.rix_sample;
10646         struct mlx5_flow_dv_sample_resource *cache_resource;
10647
10648         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10649                          idx);
10650         if (!cache_resource)
10651                 return 0;
10652         MLX5_ASSERT(cache_resource->verbs_action);
10653         DRV_LOG(DEBUG, "sample resource %p: refcnt %d--",
10654                 (void *)cache_resource,
10655                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10656         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10657                                __ATOMIC_RELAXED) == 0) {
10658                 if (cache_resource->verbs_action)
10659                         claim_zero(mlx5_glue->destroy_flow_action
10660                                         (cache_resource->verbs_action));
10661                 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10662                         if (cache_resource->default_miss)
10663                                 claim_zero(mlx5_glue->destroy_flow_action
10664                                   (cache_resource->default_miss));
10665                 }
10666                 if (cache_resource->normal_path_tbl)
10667                         flow_dv_tbl_resource_release(dev,
10668                                 cache_resource->normal_path_tbl);
10669         }
10670         if (cache_resource->sample_idx.rix_hrxq &&
10671                 !mlx5_hrxq_release(dev,
10672                         cache_resource->sample_idx.rix_hrxq))
10673                 cache_resource->sample_idx.rix_hrxq = 0;
10674         if (cache_resource->sample_idx.rix_tag &&
10675                 !flow_dv_tag_release(dev,
10676                         cache_resource->sample_idx.rix_tag))
10677                 cache_resource->sample_idx.rix_tag = 0;
10678         if (cache_resource->sample_idx.cnt) {
10679                 flow_dv_counter_release(dev,
10680                         cache_resource->sample_idx.cnt);
10681                 cache_resource->sample_idx.cnt = 0;
10682         }
10683         if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) {
10684                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10685                              &priv->sh->sample_action_list, idx,
10686                              cache_resource, next);
10687                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10688                 DRV_LOG(DEBUG, "sample resource %p: removed",
10689                         (void *)cache_resource);
10690                 return 0;
10691         }
10692         return 1;
10693 }
10694
10695 /**
10696  * Release an destination array resource.
10697  *
10698  * @param dev
10699  *   Pointer to Ethernet device.
10700  * @param handle
10701  *   Pointer to mlx5_flow_handle.
10702  *
10703  * @return
10704  *   1 while a reference on it exists, 0 when freed.
10705  */
10706 static int
10707 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10708                                      struct mlx5_flow_handle *handle)
10709 {
10710         struct mlx5_priv *priv = dev->data->dev_private;
10711         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10712         struct mlx5_flow_sub_actions_idx *mdest_act_res;
10713         uint32_t idx = handle->dvh.rix_dest_array;
10714         uint32_t i = 0;
10715
10716         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10717                          idx);
10718         if (!cache_resource)
10719                 return 0;
10720         MLX5_ASSERT(cache_resource->action);
10721         DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
10722                 (void *)cache_resource,
10723                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10724         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10725                                __ATOMIC_RELAXED) == 0) {
10726                 if (cache_resource->action)
10727                         claim_zero(mlx5_glue->destroy_flow_action
10728                                                 (cache_resource->action));
10729                 for (; i < cache_resource->num_of_dest; i++) {
10730                         mdest_act_res = &cache_resource->sample_idx[i];
10731                         if (mdest_act_res->rix_hrxq) {
10732                                 mlx5_hrxq_release(dev,
10733                                         mdest_act_res->rix_hrxq);
10734                                 mdest_act_res->rix_hrxq = 0;
10735                         }
10736                         if (mdest_act_res->rix_encap_decap) {
10737                                 flow_dv_encap_decap_resource_release(dev,
10738                                         mdest_act_res->rix_encap_decap);
10739                                 mdest_act_res->rix_encap_decap = 0;
10740                         }
10741                         if (mdest_act_res->rix_port_id_action) {
10742                                 flow_dv_port_id_action_resource_release(dev,
10743                                         mdest_act_res->rix_port_id_action);
10744                                 mdest_act_res->rix_port_id_action = 0;
10745                         }
10746                         if (mdest_act_res->rix_tag) {
10747                                 flow_dv_tag_release(dev,
10748                                         mdest_act_res->rix_tag);
10749                                 mdest_act_res->rix_tag = 0;
10750                         }
10751                 }
10752                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10753                              &priv->sh->dest_array_list, idx,
10754                              cache_resource, next);
10755                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
10756                 DRV_LOG(DEBUG, "destination array resource %p: removed",
10757                         (void *)cache_resource);
10758                 return 0;
10759         }
10760         return 1;
10761 }
10762
10763 /**
10764  * Remove the flow from the NIC but keeps it in memory.
10765  * Lock free, (mutex should be acquired by caller).
10766  *
10767  * @param[in] dev
10768  *   Pointer to Ethernet device.
10769  * @param[in, out] flow
10770  *   Pointer to flow structure.
10771  */
10772 static void
10773 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10774 {
10775         struct mlx5_flow_handle *dh;
10776         uint32_t handle_idx;
10777         struct mlx5_priv *priv = dev->data->dev_private;
10778
10779         if (!flow)
10780                 return;
10781         handle_idx = flow->dev_handles;
10782         while (handle_idx) {
10783                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10784                                     handle_idx);
10785                 if (!dh)
10786                         return;
10787                 if (dh->drv_flow) {
10788                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10789                         dh->drv_flow = NULL;
10790                 }
10791                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10792                         flow_dv_fate_resource_release(dev, dh);
10793                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10794                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10795                 handle_idx = dh->next.next;
10796         }
10797 }
10798
10799 /**
10800  * Remove the flow from the NIC and the memory.
10801  * Lock free, (mutex should be acquired by caller).
10802  *
10803  * @param[in] dev
10804  *   Pointer to the Ethernet device structure.
10805  * @param[in, out] flow
10806  *   Pointer to flow structure.
10807  */
10808 static void
10809 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10810 {
10811         struct rte_flow_shared_action *shared;
10812         struct mlx5_flow_handle *dev_handle;
10813         struct mlx5_priv *priv = dev->data->dev_private;
10814
10815         if (!flow)
10816                 return;
10817         __flow_dv_remove(dev, flow);
10818         shared = mlx5_flow_get_shared_rss(flow);
10819         if (shared)
10820                 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10821         if (flow->counter) {
10822                 flow_dv_counter_release(dev, flow->counter);
10823                 flow->counter = 0;
10824         }
10825         if (flow->meter) {
10826                 struct mlx5_flow_meter *fm;
10827
10828                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10829                                     flow->meter);
10830                 if (fm)
10831                         mlx5_flow_meter_detach(fm);
10832                 flow->meter = 0;
10833         }
10834         while (flow->dev_handles) {
10835                 uint32_t tmp_idx = flow->dev_handles;
10836
10837                 dev_handle = mlx5_ipool_get(priv->sh->ipool
10838                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10839                 if (!dev_handle)
10840                         return;
10841                 flow->dev_handles = dev_handle->next.next;
10842                 if (dev_handle->dvh.matcher)
10843                         flow_dv_matcher_release(dev, dev_handle);
10844                 if (dev_handle->dvh.rix_sample)
10845                         flow_dv_sample_resource_release(dev, dev_handle);
10846                 if (dev_handle->dvh.rix_dest_array)
10847                         flow_dv_dest_array_resource_release(dev, dev_handle);
10848                 if (dev_handle->dvh.rix_encap_decap)
10849                         flow_dv_encap_decap_resource_release(dev,
10850                                 dev_handle->dvh.rix_encap_decap);
10851                 if (dev_handle->dvh.modify_hdr)
10852                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
10853                 if (dev_handle->dvh.rix_push_vlan)
10854                         flow_dv_push_vlan_action_resource_release(dev,
10855                                                                   dev_handle);
10856                 if (dev_handle->dvh.rix_tag)
10857                         flow_dv_tag_release(dev,
10858                                             dev_handle->dvh.rix_tag);
10859                 flow_dv_fate_resource_release(dev, dev_handle);
10860                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10861                            tmp_idx);
10862         }
10863 }
10864
10865 /**
10866  * Release array of hash RX queue objects.
10867  * Helper function.
10868  *
10869  * @param[in] dev
10870  *   Pointer to the Ethernet device structure.
10871  * @param[in, out] hrxqs
10872  *   Array of hash RX queue objects.
10873  *
10874  * @return
10875  *   Total number of references to hash RX queue objects in *hrxqs* array
10876  *   after this operation.
10877  */
10878 static int
10879 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
10880                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
10881 {
10882         size_t i;
10883         int remaining = 0;
10884
10885         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
10886                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
10887
10888                 if (!ret)
10889                         (*hrxqs)[i] = 0;
10890                 remaining += ret;
10891         }
10892         return remaining;
10893 }
10894
10895 /**
10896  * Release all hash RX queue objects representing shared RSS action.
10897  *
10898  * @param[in] dev
10899  *   Pointer to the Ethernet device structure.
10900  * @param[in, out] action
10901  *   Shared RSS action to remove hash RX queue objects from.
10902  *
10903  * @return
10904  *   Total number of references to hash RX queue objects stored in *action*
10905  *   after this operation.
10906  *   Expected to be 0 if no external references held.
10907  */
10908 static int
10909 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
10910                                  struct mlx5_shared_action_rss *action)
10911 {
10912         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
10913                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
10914 }
10915
10916 /**
10917  * Setup shared RSS action.
10918  * Prepare set of hash RX queue objects sufficient to handle all valid
10919  * hash_fields combinations (see enum ibv_rx_hash_fields).
10920  *
10921  * @param[in] dev
10922  *   Pointer to the Ethernet device structure.
10923  * @param[in, out] action
10924  *   Partially initialized shared RSS action.
10925  * @param[out] error
10926  *   Perform verbose error reporting if not NULL. Initialized in case of
10927  *   error only.
10928  *
10929  * @return
10930  *   0 on success, otherwise negative errno value.
10931  */
10932 static int
10933 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
10934                         struct mlx5_shared_action_rss *action,
10935                         struct rte_flow_error *error)
10936 {
10937         size_t i;
10938         int err;
10939
10940         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
10941                 uint32_t hrxq_idx;
10942                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
10943                 int tunnel;
10944
10945                 for (tunnel = 0; tunnel < 2; tunnel++) {
10946                         hrxq_idx = mlx5_hrxq_new(dev, action->origin.key,
10947                                         MLX5_RSS_HASH_KEY_LEN,
10948                                         hash_fields,
10949                                         action->origin.queue,
10950                                         action->origin.queue_num,
10951                                         tunnel, true);
10952                         if (!hrxq_idx) {
10953                                 rte_flow_error_set
10954                                         (error, rte_errno,
10955                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10956                                          "cannot get hash queue");
10957                                 goto error_hrxq_new;
10958                         }
10959                         err = __flow_dv_action_rss_hrxq_set
10960                                 (action, hash_fields, tunnel, hrxq_idx);
10961                         MLX5_ASSERT(!err);
10962                 }
10963         }
10964         return 0;
10965 error_hrxq_new:
10966         err = rte_errno;
10967         __flow_dv_action_rss_hrxqs_release(dev, action);
10968         rte_errno = err;
10969         return -rte_errno;
10970 }
10971
10972 /**
10973  * Create shared RSS action.
10974  *
10975  * @param[in] dev
10976  *   Pointer to the Ethernet device structure.
10977  * @param[in] conf
10978  *   Shared action configuration.
10979  * @param[in] rss
10980  *   RSS action specification used to create shared action.
10981  * @param[out] error
10982  *   Perform verbose error reporting if not NULL. Initialized in case of
10983  *   error only.
10984  *
10985  * @return
10986  *   A valid shared action handle in case of success, NULL otherwise and
10987  *   rte_errno is set.
10988  */
10989 static struct rte_flow_shared_action *
10990 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
10991                             const struct rte_flow_shared_action_conf *conf,
10992                             const struct rte_flow_action_rss *rss,
10993                             struct rte_flow_error *error)
10994 {
10995         struct rte_flow_shared_action *shared_action = NULL;
10996         void *queue = NULL;
10997         struct mlx5_shared_action_rss *shared_rss;
10998         struct rte_flow_action_rss *origin;
10999         const uint8_t *rss_key;
11000         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11001
11002         RTE_SET_USED(conf);
11003         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11004                             0, SOCKET_ID_ANY);
11005         shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11006                                     SOCKET_ID_ANY);
11007         if (!shared_action || !queue) {
11008                 rte_flow_error_set(error, ENOMEM,
11009                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11010                                    "cannot allocate resource memory");
11011                 goto error_rss_init;
11012         }
11013         shared_rss = &shared_action->rss;
11014         shared_rss->queue = queue;
11015         origin = &shared_rss->origin;
11016         origin->func = rss->func;
11017         origin->level = rss->level;
11018         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11019         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11020         /* NULL RSS key indicates default RSS key. */
11021         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11022         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11023         origin->key = &shared_rss->key[0];
11024         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11025         memcpy(shared_rss->queue, rss->queue, queue_size);
11026         origin->queue = shared_rss->queue;
11027         origin->queue_num = rss->queue_num;
11028         if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11029                 goto error_rss_init;
11030         shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11031         return shared_action;
11032 error_rss_init:
11033         mlx5_free(shared_action);
11034         mlx5_free(queue);
11035         return NULL;
11036 }
11037
11038 /**
11039  * Destroy the shared RSS action.
11040  * Release related hash RX queue objects.
11041  *
11042  * @param[in] dev
11043  *   Pointer to the Ethernet device structure.
11044  * @param[in] shared_rss
11045  *   The shared RSS action object to be removed.
11046  * @param[out] error
11047  *   Perform verbose error reporting if not NULL. Initialized in case of
11048  *   error only.
11049  *
11050  * @return
11051  *   0 on success, otherwise negative errno value.
11052  */
11053 static int
11054 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11055                          struct mlx5_shared_action_rss *shared_rss,
11056                          struct rte_flow_error *error)
11057 {
11058         struct rte_flow_shared_action *shared_action = NULL;
11059         uint32_t old_refcnt = 1;
11060         int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11061
11062         if (remaining) {
11063                 return rte_flow_error_set(error, ETOOMANYREFS,
11064                                           RTE_FLOW_ERROR_TYPE_ACTION,
11065                                           NULL,
11066                                           "shared rss hrxq has references");
11067         }
11068         shared_action = container_of(shared_rss,
11069                                      struct rte_flow_shared_action, rss);
11070         if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11071                                          0, 0,
11072                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11073                 return rte_flow_error_set(error, ETOOMANYREFS,
11074                                           RTE_FLOW_ERROR_TYPE_ACTION,
11075                                           NULL,
11076                                           "shared rss has references");
11077         }
11078         rte_free(shared_rss->queue);
11079         return 0;
11080 }
11081
11082 /**
11083  * Create shared action, lock free,
11084  * (mutex should be acquired by caller).
11085  * Dispatcher for action type specific call.
11086  *
11087  * @param[in] dev
11088  *   Pointer to the Ethernet device structure.
11089  * @param[in] conf
11090  *   Shared action configuration.
11091  * @param[in] action
11092  *   Action specification used to create shared action.
11093  * @param[out] error
11094  *   Perform verbose error reporting if not NULL. Initialized in case of
11095  *   error only.
11096  *
11097  * @return
11098  *   A valid shared action handle in case of success, NULL otherwise and
11099  *   rte_errno is set.
11100  */
11101 static struct rte_flow_shared_action *
11102 __flow_dv_action_create(struct rte_eth_dev *dev,
11103                         const struct rte_flow_shared_action_conf *conf,
11104                         const struct rte_flow_action *action,
11105                         struct rte_flow_error *error)
11106 {
11107         struct rte_flow_shared_action *shared_action = NULL;
11108         struct mlx5_priv *priv = dev->data->dev_private;
11109
11110         switch (action->type) {
11111         case RTE_FLOW_ACTION_TYPE_RSS:
11112                 shared_action = __flow_dv_action_rss_create(dev, conf,
11113                                                             action->conf,
11114                                                             error);
11115                 break;
11116         default:
11117                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11118                                    NULL, "action type not supported");
11119                 break;
11120         }
11121         if (shared_action) {
11122                 __atomic_add_fetch(&shared_action->refcnt, 1,
11123                                    __ATOMIC_RELAXED);
11124                 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11125         }
11126         return shared_action;
11127 }
11128
11129 /**
11130  * Destroy the shared action.
11131  * Release action related resources on the NIC and the memory.
11132  * Lock free, (mutex should be acquired by caller).
11133  * Dispatcher for action type specific call.
11134  *
11135  * @param[in] dev
11136  *   Pointer to the Ethernet device structure.
11137  * @param[in] action
11138  *   The shared action object to be removed.
11139  * @param[out] error
11140  *   Perform verbose error reporting if not NULL. Initialized in case of
11141  *   error only.
11142  *
11143  * @return
11144  *   0 on success, otherwise negative errno value.
11145  */
11146 static int
11147 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11148                          struct rte_flow_shared_action *action,
11149                          struct rte_flow_error *error)
11150 {
11151         int ret;
11152
11153         switch (action->type) {
11154         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11155                 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11156                 break;
11157         default:
11158                 return rte_flow_error_set(error, ENOTSUP,
11159                                           RTE_FLOW_ERROR_TYPE_ACTION,
11160                                           NULL,
11161                                           "action type not supported");
11162         }
11163         if (ret)
11164                 return ret;
11165         LIST_REMOVE(action, next);
11166         rte_free(action);
11167         return 0;
11168 }
11169
11170 /**
11171  * Updates in place shared RSS action configuration.
11172  *
11173  * @param[in] dev
11174  *   Pointer to the Ethernet device structure.
11175  * @param[in] shared_rss
11176  *   The shared RSS action object to be updated.
11177  * @param[in] action_conf
11178  *   RSS action specification used to modify *shared_rss*.
11179  * @param[out] error
11180  *   Perform verbose error reporting if not NULL. Initialized in case of
11181  *   error only.
11182  *
11183  * @return
11184  *   0 on success, otherwise negative errno value.
11185  * @note: currently only support update of RSS queues.
11186  */
11187 static int
11188 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11189                             struct mlx5_shared_action_rss *shared_rss,
11190                             const struct rte_flow_action_rss *action_conf,
11191                             struct rte_flow_error *error)
11192 {
11193         size_t i;
11194         int ret;
11195         void *queue = NULL;
11196         const uint8_t *rss_key;
11197         uint32_t rss_key_len;
11198         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11199
11200         queue = mlx5_malloc(MLX5_MEM_ZERO,
11201                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11202                             0, SOCKET_ID_ANY);
11203         if (!queue)
11204                 return rte_flow_error_set(error, ENOMEM,
11205                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11206                                           NULL,
11207                                           "cannot allocate resource memory");
11208         if (action_conf->key) {
11209                 rss_key = action_conf->key;
11210                 rss_key_len = action_conf->key_len;
11211         } else {
11212                 rss_key = rss_hash_default_key;
11213                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11214         }
11215         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11216                 uint32_t hrxq_idx;
11217                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11218                 int tunnel;
11219
11220                 for (tunnel = 0; tunnel < 2; tunnel++) {
11221                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11222                                         (shared_rss, hash_fields, tunnel);
11223                         MLX5_ASSERT(hrxq_idx);
11224                         ret = mlx5_hrxq_modify
11225                                 (dev, hrxq_idx,
11226                                  rss_key, rss_key_len,
11227                                  hash_fields,
11228                                  action_conf->queue, action_conf->queue_num);
11229                         if (ret) {
11230                                 mlx5_free(queue);
11231                                 return rte_flow_error_set
11232                                         (error, rte_errno,
11233                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11234                                          "cannot update hash queue");
11235                         }
11236                 }
11237         }
11238         mlx5_free(shared_rss->queue);
11239         shared_rss->queue = queue;
11240         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11241         shared_rss->origin.queue = shared_rss->queue;
11242         shared_rss->origin.queue_num = action_conf->queue_num;
11243         return 0;
11244 }
11245
11246 /**
11247  * Updates in place shared action configuration, lock free,
11248  * (mutex should be acquired by caller).
11249  *
11250  * @param[in] dev
11251  *   Pointer to the Ethernet device structure.
11252  * @param[in] action
11253  *   The shared action object to be updated.
11254  * @param[in] action_conf
11255  *   Action specification used to modify *action*.
11256  *   *action_conf* should be of type correlating with type of the *action*,
11257  *   otherwise considered as invalid.
11258  * @param[out] error
11259  *   Perform verbose error reporting if not NULL. Initialized in case of
11260  *   error only.
11261  *
11262  * @return
11263  *   0 on success, otherwise negative errno value.
11264  */
11265 static int
11266 __flow_dv_action_update(struct rte_eth_dev *dev,
11267                         struct rte_flow_shared_action *action,
11268                         const void *action_conf,
11269                         struct rte_flow_error *error)
11270 {
11271         switch (action->type) {
11272         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11273                 return __flow_dv_action_rss_update(dev, &action->rss,
11274                                                    action_conf, error);
11275         default:
11276                 return rte_flow_error_set(error, ENOTSUP,
11277                                           RTE_FLOW_ERROR_TYPE_ACTION,
11278                                           NULL,
11279                                           "action type not supported");
11280         }
11281 }
11282 /**
11283  * Query a dv flow  rule for its statistics via devx.
11284  *
11285  * @param[in] dev
11286  *   Pointer to Ethernet device.
11287  * @param[in] flow
11288  *   Pointer to the sub flow.
11289  * @param[out] data
11290  *   data retrieved by the query.
11291  * @param[out] error
11292  *   Perform verbose error reporting if not NULL.
11293  *
11294  * @return
11295  *   0 on success, a negative errno value otherwise and rte_errno is set.
11296  */
11297 static int
11298 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11299                     void *data, struct rte_flow_error *error)
11300 {
11301         struct mlx5_priv *priv = dev->data->dev_private;
11302         struct rte_flow_query_count *qc = data;
11303
11304         if (!priv->config.devx)
11305                 return rte_flow_error_set(error, ENOTSUP,
11306                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11307                                           NULL,
11308                                           "counters are not supported");
11309         if (flow->counter) {
11310                 uint64_t pkts, bytes;
11311                 struct mlx5_flow_counter *cnt;
11312
11313                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11314                                                  NULL);
11315                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11316                                                &bytes);
11317
11318                 if (err)
11319                         return rte_flow_error_set(error, -err,
11320                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11321                                         NULL, "cannot read counters");
11322                 qc->hits_set = 1;
11323                 qc->bytes_set = 1;
11324                 qc->hits = pkts - cnt->hits;
11325                 qc->bytes = bytes - cnt->bytes;
11326                 if (qc->reset) {
11327                         cnt->hits = pkts;
11328                         cnt->bytes = bytes;
11329                 }
11330                 return 0;
11331         }
11332         return rte_flow_error_set(error, EINVAL,
11333                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11334                                   NULL,
11335                                   "counters are not available");
11336 }
11337
11338 /**
11339  * Query a flow rule AGE action for aging information.
11340  *
11341  * @param[in] dev
11342  *   Pointer to Ethernet device.
11343  * @param[in] flow
11344  *   Pointer to the sub flow.
11345  * @param[out] data
11346  *   data retrieved by the query.
11347  * @param[out] error
11348  *   Perform verbose error reporting if not NULL.
11349  *
11350  * @return
11351  *   0 on success, a negative errno value otherwise and rte_errno is set.
11352  */
11353 static int
11354 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11355                   void *data, struct rte_flow_error *error)
11356 {
11357         struct rte_flow_query_age *resp = data;
11358
11359         if (flow->counter) {
11360                 struct mlx5_age_param *age_param =
11361                                 flow_dv_counter_idx_get_age(dev, flow->counter);
11362
11363                 if (!age_param || !age_param->timeout)
11364                         return rte_flow_error_set
11365                                         (error, EINVAL,
11366                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11367                                          NULL, "cannot read age data");
11368                 resp->aged = __atomic_load_n(&age_param->state,
11369                                              __ATOMIC_RELAXED) ==
11370                                                         AGE_TMOUT ? 1 : 0;
11371                 resp->sec_since_last_hit_valid = !resp->aged;
11372                 if (resp->sec_since_last_hit_valid)
11373                         resp->sec_since_last_hit =
11374                                 __atomic_load_n(&age_param->sec_since_last_hit,
11375                                                 __ATOMIC_RELAXED);
11376                 return 0;
11377         }
11378         return rte_flow_error_set(error, EINVAL,
11379                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11380                                   NULL,
11381                                   "age data not available");
11382 }
11383
11384 /**
11385  * Query a flow.
11386  *
11387  * @see rte_flow_query()
11388  * @see rte_flow_ops
11389  */
11390 static int
11391 flow_dv_query(struct rte_eth_dev *dev,
11392               struct rte_flow *flow __rte_unused,
11393               const struct rte_flow_action *actions __rte_unused,
11394               void *data __rte_unused,
11395               struct rte_flow_error *error __rte_unused)
11396 {
11397         int ret = -EINVAL;
11398
11399         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11400                 switch (actions->type) {
11401                 case RTE_FLOW_ACTION_TYPE_VOID:
11402                         break;
11403                 case RTE_FLOW_ACTION_TYPE_COUNT:
11404                         ret = flow_dv_query_count(dev, flow, data, error);
11405                         break;
11406                 case RTE_FLOW_ACTION_TYPE_AGE:
11407                         ret = flow_dv_query_age(dev, flow, data, error);
11408                         break;
11409                 default:
11410                         return rte_flow_error_set(error, ENOTSUP,
11411                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11412                                                   actions,
11413                                                   "action not supported");
11414                 }
11415         }
11416         return ret;
11417 }
11418
11419 /**
11420  * Destroy the meter table set.
11421  * Lock free, (mutex should be acquired by caller).
11422  *
11423  * @param[in] dev
11424  *   Pointer to Ethernet device.
11425  * @param[in] tbl
11426  *   Pointer to the meter table set.
11427  *
11428  * @return
11429  *   Always 0.
11430  */
11431 static int
11432 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11433                         struct mlx5_meter_domains_infos *tbl)
11434 {
11435         struct mlx5_priv *priv = dev->data->dev_private;
11436         struct mlx5_meter_domains_infos *mtd =
11437                                 (struct mlx5_meter_domains_infos *)tbl;
11438
11439         if (!mtd || !priv->config.dv_flow_en)
11440                 return 0;
11441         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11442                 claim_zero(mlx5_flow_os_destroy_flow
11443                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11444         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11445                 claim_zero(mlx5_flow_os_destroy_flow
11446                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11447         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11448                 claim_zero(mlx5_flow_os_destroy_flow
11449                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11450         if (mtd->egress.color_matcher)
11451                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11452                            (mtd->egress.color_matcher));
11453         if (mtd->egress.any_matcher)
11454                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11455                            (mtd->egress.any_matcher));
11456         if (mtd->egress.tbl)
11457                 flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
11458         if (mtd->egress.sfx_tbl)
11459                 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
11460         if (mtd->ingress.color_matcher)
11461                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11462                            (mtd->ingress.color_matcher));
11463         if (mtd->ingress.any_matcher)
11464                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11465                            (mtd->ingress.any_matcher));
11466         if (mtd->ingress.tbl)
11467                 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
11468         if (mtd->ingress.sfx_tbl)
11469                 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
11470         if (mtd->transfer.color_matcher)
11471                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11472                            (mtd->transfer.color_matcher));
11473         if (mtd->transfer.any_matcher)
11474                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11475                            (mtd->transfer.any_matcher));
11476         if (mtd->transfer.tbl)
11477                 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
11478         if (mtd->transfer.sfx_tbl)
11479                 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
11480         if (mtd->drop_actn)
11481                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11482         mlx5_free(mtd);
11483         return 0;
11484 }
11485
11486 /* Number of meter flow actions, count and jump or count and drop. */
11487 #define METER_ACTIONS 2
11488
11489 /**
11490  * Create specify domain meter table and suffix table.
11491  *
11492  * @param[in] dev
11493  *   Pointer to Ethernet device.
11494  * @param[in,out] mtb
11495  *   Pointer to DV meter table set.
11496  * @param[in] egress
11497  *   Table attribute.
11498  * @param[in] transfer
11499  *   Table attribute.
11500  * @param[in] color_reg_c_idx
11501  *   Reg C index for color match.
11502  *
11503  * @return
11504  *   0 on success, -1 otherwise and rte_errno is set.
11505  */
11506 static int
11507 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11508                            struct mlx5_meter_domains_infos *mtb,
11509                            uint8_t egress, uint8_t transfer,
11510                            uint32_t color_reg_c_idx)
11511 {
11512         struct mlx5_priv *priv = dev->data->dev_private;
11513         struct mlx5_dev_ctx_shared *sh = priv->sh;
11514         struct mlx5_flow_dv_match_params mask = {
11515                 .size = sizeof(mask.buf),
11516         };
11517         struct mlx5_flow_dv_match_params value = {
11518                 .size = sizeof(value.buf),
11519         };
11520         struct mlx5dv_flow_matcher_attr dv_attr = {
11521                 .type = IBV_FLOW_ATTR_NORMAL,
11522                 .priority = 0,
11523                 .match_criteria_enable = 0,
11524                 .match_mask = (void *)&mask,
11525         };
11526         void *actions[METER_ACTIONS];
11527         struct mlx5_meter_domain_info *dtb;
11528         struct rte_flow_error error;
11529         int i = 0;
11530         int ret;
11531
11532         if (transfer)
11533                 dtb = &mtb->transfer;
11534         else if (egress)
11535                 dtb = &mtb->egress;
11536         else
11537                 dtb = &mtb->ingress;
11538         /* Create the meter table with METER level. */
11539         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11540                                             egress, transfer, false, NULL, 0,
11541                                             0, &error);
11542         if (!dtb->tbl) {
11543                 DRV_LOG(ERR, "Failed to create meter policer table.");
11544                 return -1;
11545         }
11546         /* Create the meter suffix table with SUFFIX level. */
11547         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11548                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11549                                             egress, transfer, false, NULL, 0,
11550                                             0, &error);
11551         if (!dtb->sfx_tbl) {
11552                 DRV_LOG(ERR, "Failed to create meter suffix table.");
11553                 return -1;
11554         }
11555         /* Create matchers, Any and Color. */
11556         dv_attr.priority = 3;
11557         dv_attr.match_criteria_enable = 0;
11558         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11559                                                &dtb->any_matcher);
11560         if (ret) {
11561                 DRV_LOG(ERR, "Failed to create meter"
11562                              " policer default matcher.");
11563                 goto error_exit;
11564         }
11565         dv_attr.priority = 0;
11566         dv_attr.match_criteria_enable =
11567                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11568         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11569                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11570         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11571                                                &dtb->color_matcher);
11572         if (ret) {
11573                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11574                 goto error_exit;
11575         }
11576         if (mtb->count_actns[RTE_MTR_DROPPED])
11577                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11578         actions[i++] = mtb->drop_actn;
11579         /* Default rule: lowest priority, match any, actions: drop. */
11580         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11581                                        actions,
11582                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
11583         if (ret) {
11584                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11585                 goto error_exit;
11586         }
11587         return 0;
11588 error_exit:
11589         return -1;
11590 }
11591
11592 /**
11593  * Create the needed meter and suffix tables.
11594  * Lock free, (mutex should be acquired by caller).
11595  *
11596  * @param[in] dev
11597  *   Pointer to Ethernet device.
11598  * @param[in] fm
11599  *   Pointer to the flow meter.
11600  *
11601  * @return
11602  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
11603  */
11604 static struct mlx5_meter_domains_infos *
11605 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11606                        const struct mlx5_flow_meter *fm)
11607 {
11608         struct mlx5_priv *priv = dev->data->dev_private;
11609         struct mlx5_meter_domains_infos *mtb;
11610         int ret;
11611         int i;
11612
11613         if (!priv->mtr_en) {
11614                 rte_errno = ENOTSUP;
11615                 return NULL;
11616         }
11617         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11618         if (!mtb) {
11619                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11620                 return NULL;
11621         }
11622         /* Create meter count actions */
11623         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11624                 struct mlx5_flow_counter *cnt;
11625                 if (!fm->policer_stats.cnt[i])
11626                         continue;
11627                 cnt = flow_dv_counter_get_by_idx(dev,
11628                       fm->policer_stats.cnt[i], NULL);
11629                 mtb->count_actns[i] = cnt->action;
11630         }
11631         /* Create drop action. */
11632         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11633         if (ret) {
11634                 DRV_LOG(ERR, "Failed to create drop action.");
11635                 goto error_exit;
11636         }
11637         /* Egress meter table. */
11638         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11639         if (ret) {
11640                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11641                 goto error_exit;
11642         }
11643         /* Ingress meter table. */
11644         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11645         if (ret) {
11646                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11647                 goto error_exit;
11648         }
11649         /* FDB meter table. */
11650         if (priv->config.dv_esw_en) {
11651                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11652                                                  priv->mtr_color_reg);
11653                 if (ret) {
11654                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11655                         goto error_exit;
11656                 }
11657         }
11658         return mtb;
11659 error_exit:
11660         flow_dv_destroy_mtr_tbl(dev, mtb);
11661         return NULL;
11662 }
11663
11664 /**
11665  * Destroy domain policer rule.
11666  *
11667  * @param[in] dt
11668  *   Pointer to domain table.
11669  */
11670 static void
11671 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11672 {
11673         int i;
11674
11675         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11676                 if (dt->policer_rules[i]) {
11677                         claim_zero(mlx5_flow_os_destroy_flow
11678                                    (dt->policer_rules[i]));
11679                         dt->policer_rules[i] = NULL;
11680                 }
11681         }
11682         if (dt->jump_actn) {
11683                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11684                 dt->jump_actn = NULL;
11685         }
11686 }
11687
11688 /**
11689  * Destroy policer rules.
11690  *
11691  * @param[in] dev
11692  *   Pointer to Ethernet device.
11693  * @param[in] fm
11694  *   Pointer to flow meter structure.
11695  * @param[in] attr
11696  *   Pointer to flow attributes.
11697  *
11698  * @return
11699  *   Always 0.
11700  */
11701 static int
11702 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11703                               const struct mlx5_flow_meter *fm,
11704                               const struct rte_flow_attr *attr)
11705 {
11706         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11707
11708         if (!mtb)
11709                 return 0;
11710         if (attr->egress)
11711                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11712         if (attr->ingress)
11713                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11714         if (attr->transfer)
11715                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11716         return 0;
11717 }
11718
11719 /**
11720  * Create specify domain meter policer rule.
11721  *
11722  * @param[in] fm
11723  *   Pointer to flow meter structure.
11724  * @param[in] mtb
11725  *   Pointer to DV meter table set.
11726  * @param[in] mtr_reg_c
11727  *   Color match REG_C.
11728  *
11729  * @return
11730  *   0 on success, -1 otherwise.
11731  */
11732 static int
11733 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11734                                     struct mlx5_meter_domain_info *dtb,
11735                                     uint8_t mtr_reg_c)
11736 {
11737         struct mlx5_flow_dv_match_params matcher = {
11738                 .size = sizeof(matcher.buf),
11739         };
11740         struct mlx5_flow_dv_match_params value = {
11741                 .size = sizeof(value.buf),
11742         };
11743         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11744         void *actions[METER_ACTIONS];
11745         int i;
11746         int ret = 0;
11747
11748         /* Create jump action. */
11749         if (!dtb->jump_actn)
11750                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11751                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11752         if (ret) {
11753                 DRV_LOG(ERR, "Failed to create policer jump action.");
11754                 goto error;
11755         }
11756         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11757                 int j = 0;
11758
11759                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11760                                        rte_col_2_mlx5_col(i), UINT8_MAX);
11761                 if (mtb->count_actns[i])
11762                         actions[j++] = mtb->count_actns[i];
11763                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11764                         actions[j++] = mtb->drop_actn;
11765                 else
11766                         actions[j++] = dtb->jump_actn;
11767                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11768                                                (void *)&value, j, actions,
11769                                                &dtb->policer_rules[i]);
11770                 if (ret) {
11771                         DRV_LOG(ERR, "Failed to create policer rule.");
11772                         goto error;
11773                 }
11774         }
11775         return 0;
11776 error:
11777         rte_errno = errno;
11778         return -1;
11779 }
11780
11781 /**
11782  * Create policer rules.
11783  *
11784  * @param[in] dev
11785  *   Pointer to Ethernet device.
11786  * @param[in] fm
11787  *   Pointer to flow meter structure.
11788  * @param[in] attr
11789  *   Pointer to flow attributes.
11790  *
11791  * @return
11792  *   0 on success, -1 otherwise.
11793  */
11794 static int
11795 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11796                              struct mlx5_flow_meter *fm,
11797                              const struct rte_flow_attr *attr)
11798 {
11799         struct mlx5_priv *priv = dev->data->dev_private;
11800         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11801         int ret;
11802
11803         if (attr->egress) {
11804                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11805                                                 priv->mtr_color_reg);
11806                 if (ret) {
11807                         DRV_LOG(ERR, "Failed to create egress policer.");
11808                         goto error;
11809                 }
11810         }
11811         if (attr->ingress) {
11812                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11813                                                 priv->mtr_color_reg);
11814                 if (ret) {
11815                         DRV_LOG(ERR, "Failed to create ingress policer.");
11816                         goto error;
11817                 }
11818         }
11819         if (attr->transfer) {
11820                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11821                                                 priv->mtr_color_reg);
11822                 if (ret) {
11823                         DRV_LOG(ERR, "Failed to create transfer policer.");
11824                         goto error;
11825                 }
11826         }
11827         return 0;
11828 error:
11829         flow_dv_destroy_policer_rules(dev, fm, attr);
11830         return -1;
11831 }
11832
11833 /**
11834  * Validate the batch counter support in root table.
11835  *
11836  * Create a simple flow with invalid counter and drop action on root table to
11837  * validate if batch counter with offset on root table is supported or not.
11838  *
11839  * @param[in] dev
11840  *   Pointer to rte_eth_dev structure.
11841  *
11842  * @return
11843  *   0 on success, a negative errno value otherwise and rte_errno is set.
11844  */
11845 int
11846 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11847 {
11848         struct mlx5_priv *priv = dev->data->dev_private;
11849         struct mlx5_dev_ctx_shared *sh = priv->sh;
11850         struct mlx5_flow_dv_match_params mask = {
11851                 .size = sizeof(mask.buf),
11852         };
11853         struct mlx5_flow_dv_match_params value = {
11854                 .size = sizeof(value.buf),
11855         };
11856         struct mlx5dv_flow_matcher_attr dv_attr = {
11857                 .type = IBV_FLOW_ATTR_NORMAL,
11858                 .priority = 0,
11859                 .match_criteria_enable = 0,
11860                 .match_mask = (void *)&mask,
11861         };
11862         void *actions[2] = { 0 };
11863         struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
11864         struct mlx5_devx_obj *dcs = NULL;
11865         void *matcher = NULL;
11866         void *flow = NULL;
11867         int i, ret = -1;
11868
11869         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
11870         if (!tbl)
11871                 goto err;
11872         dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
11873                                             NULL, 0, 0, NULL);
11874         if (!dest_tbl)
11875                 goto err;
11876         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
11877         if (!dcs)
11878                 goto err;
11879         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
11880                                                     &actions[0]);
11881         if (ret)
11882                 goto err;
11883         ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11884                                 (dest_tbl->obj, &actions[1]);
11885         if (ret)
11886                 goto err;
11887         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
11888         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
11889                                                &matcher);
11890         if (ret)
11891                 goto err;
11892         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
11893                                        actions, &flow);
11894 err:
11895         /*
11896          * If batch counter with offset is not supported, the driver will not
11897          * validate the invalid offset value, flow create should success.
11898          * In this case, it means batch counter is not supported in root table.
11899          *
11900          * Otherwise, if flow create is failed, counter offset is supported.
11901          */
11902         if (flow) {
11903                 DRV_LOG(INFO, "Batch counter is not supported in root "
11904                               "table. Switch to fallback mode.");
11905                 rte_errno = ENOTSUP;
11906                 ret = -rte_errno;
11907                 claim_zero(mlx5_flow_os_destroy_flow(flow));
11908         } else {
11909                 /* Check matcher to make sure validate fail at flow create. */
11910                 if (!matcher || (matcher && errno != EINVAL))
11911                         DRV_LOG(ERR, "Unexpected error in counter offset "
11912                                      "support detection");
11913                 ret = 0;
11914         }
11915         for (i = 0; i < 2; i++) {
11916                 if (actions[i])
11917                         claim_zero(mlx5_flow_os_destroy_flow_action
11918                                    (actions[i]));
11919         }
11920         if (matcher)
11921                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
11922         if (tbl)
11923                 flow_dv_tbl_resource_release(dev, tbl);
11924         if (dest_tbl)
11925                 flow_dv_tbl_resource_release(dev, dest_tbl);
11926         if (dcs)
11927                 claim_zero(mlx5_devx_cmd_destroy(dcs));
11928         return ret;
11929 }
11930
11931 /**
11932  * Query a devx counter.
11933  *
11934  * @param[in] dev
11935  *   Pointer to the Ethernet device structure.
11936  * @param[in] cnt
11937  *   Index to the flow counter.
11938  * @param[in] clear
11939  *   Set to clear the counter statistics.
11940  * @param[out] pkts
11941  *   The statistics value of packets.
11942  * @param[out] bytes
11943  *   The statistics value of bytes.
11944  *
11945  * @return
11946  *   0 on success, otherwise return -1.
11947  */
11948 static int
11949 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
11950                       uint64_t *pkts, uint64_t *bytes)
11951 {
11952         struct mlx5_priv *priv = dev->data->dev_private;
11953         struct mlx5_flow_counter *cnt;
11954         uint64_t inn_pkts, inn_bytes;
11955         int ret;
11956
11957         if (!priv->config.devx)
11958                 return -1;
11959
11960         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
11961         if (ret)
11962                 return -1;
11963         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
11964         *pkts = inn_pkts - cnt->hits;
11965         *bytes = inn_bytes - cnt->bytes;
11966         if (clear) {
11967                 cnt->hits = inn_pkts;
11968                 cnt->bytes = inn_bytes;
11969         }
11970         return 0;
11971 }
11972
11973 /**
11974  * Get aged-out flows.
11975  *
11976  * @param[in] dev
11977  *   Pointer to the Ethernet device structure.
11978  * @param[in] context
11979  *   The address of an array of pointers to the aged-out flows contexts.
11980  * @param[in] nb_contexts
11981  *   The length of context array pointers.
11982  * @param[out] error
11983  *   Perform verbose error reporting if not NULL. Initialized in case of
11984  *   error only.
11985  *
11986  * @return
11987  *   how many contexts get in success, otherwise negative errno value.
11988  *   if nb_contexts is 0, return the amount of all aged contexts.
11989  *   if nb_contexts is not 0 , return the amount of aged flows reported
11990  *   in the context array.
11991  * @note: only stub for now
11992  */
11993 static int
11994 flow_get_aged_flows(struct rte_eth_dev *dev,
11995                     void **context,
11996                     uint32_t nb_contexts,
11997                     struct rte_flow_error *error)
11998 {
11999         struct mlx5_priv *priv = dev->data->dev_private;
12000         struct mlx5_age_info *age_info;
12001         struct mlx5_age_param *age_param;
12002         struct mlx5_flow_counter *counter;
12003         int nb_flows = 0;
12004
12005         if (nb_contexts && !context)
12006                 return rte_flow_error_set(error, EINVAL,
12007                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12008                                           NULL,
12009                                           "Should assign at least one flow or"
12010                                           " context to get if nb_contexts != 0");
12011         age_info = GET_PORT_AGE_INFO(priv);
12012         rte_spinlock_lock(&age_info->aged_sl);
12013         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12014                 nb_flows++;
12015                 if (nb_contexts) {
12016                         age_param = MLX5_CNT_TO_AGE(counter);
12017                         context[nb_flows - 1] = age_param->context;
12018                         if (!(--nb_contexts))
12019                                 break;
12020                 }
12021         }
12022         rte_spinlock_unlock(&age_info->aged_sl);
12023         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12024         return nb_flows;
12025 }
12026
12027 /*
12028  * Mutex-protected thunk to lock-free  __flow_dv_translate().
12029  */
12030 static int
12031 flow_dv_translate(struct rte_eth_dev *dev,
12032                   struct mlx5_flow *dev_flow,
12033                   const struct rte_flow_attr *attr,
12034                   const struct rte_flow_item items[],
12035                   const struct rte_flow_action actions[],
12036                   struct rte_flow_error *error)
12037 {
12038         int ret;
12039
12040         flow_dv_shared_lock(dev);
12041         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12042         flow_dv_shared_unlock(dev);
12043         return ret;
12044 }
12045
12046 /*
12047  * Mutex-protected thunk to lock-free  __flow_dv_apply().
12048  */
12049 static int
12050 flow_dv_apply(struct rte_eth_dev *dev,
12051               struct rte_flow *flow,
12052               struct rte_flow_error *error)
12053 {
12054         int ret;
12055
12056         flow_dv_shared_lock(dev);
12057         ret = __flow_dv_apply(dev, flow, error);
12058         flow_dv_shared_unlock(dev);
12059         return ret;
12060 }
12061
12062 /*
12063  * Mutex-protected thunk to lock-free __flow_dv_remove().
12064  */
12065 static void
12066 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12067 {
12068         flow_dv_shared_lock(dev);
12069         __flow_dv_remove(dev, flow);
12070         flow_dv_shared_unlock(dev);
12071 }
12072
12073 /*
12074  * Mutex-protected thunk to lock-free __flow_dv_destroy().
12075  */
12076 static void
12077 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12078 {
12079         flow_dv_shared_lock(dev);
12080         __flow_dv_destroy(dev, flow);
12081         flow_dv_shared_unlock(dev);
12082 }
12083
12084 /*
12085  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12086  */
12087 static uint32_t
12088 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12089 {
12090         uint32_t cnt;
12091
12092         flow_dv_shared_lock(dev);
12093         cnt = flow_dv_counter_alloc(dev, 0);
12094         flow_dv_shared_unlock(dev);
12095         return cnt;
12096 }
12097
12098 /*
12099  * Mutex-protected thunk to lock-free flow_dv_counter_release().
12100  */
12101 static void
12102 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12103 {
12104         flow_dv_shared_lock(dev);
12105         flow_dv_counter_release(dev, cnt);
12106         flow_dv_shared_unlock(dev);
12107 }
12108
12109 /**
12110  * Validate shared action.
12111  * Dispatcher for action type specific validation.
12112  *
12113  * @param[in] dev
12114  *   Pointer to the Ethernet device structure.
12115  * @param[in] conf
12116  *   Shared action configuration.
12117  * @param[in] action
12118  *   The shared action object to validate.
12119  * @param[out] error
12120  *   Perform verbose error reporting if not NULL. Initialized in case of
12121  *   error only.
12122  *
12123  * @return
12124  *   0 on success, otherwise negative errno value.
12125  */
12126 static int
12127 flow_dv_action_validate(struct rte_eth_dev *dev,
12128                         const struct rte_flow_shared_action_conf *conf,
12129                         const struct rte_flow_action *action,
12130                         struct rte_flow_error *error)
12131 {
12132         RTE_SET_USED(conf);
12133         switch (action->type) {
12134         case RTE_FLOW_ACTION_TYPE_RSS:
12135                 return mlx5_validate_action_rss(dev, action, error);
12136         default:
12137                 return rte_flow_error_set(error, ENOTSUP,
12138                                           RTE_FLOW_ERROR_TYPE_ACTION,
12139                                           NULL,
12140                                           "action type not supported");
12141         }
12142 }
12143
12144 /*
12145  * Mutex-protected thunk to lock-free  __flow_dv_action_create().
12146  */
12147 static struct rte_flow_shared_action *
12148 flow_dv_action_create(struct rte_eth_dev *dev,
12149                       const struct rte_flow_shared_action_conf *conf,
12150                       const struct rte_flow_action *action,
12151                       struct rte_flow_error *error)
12152 {
12153         struct rte_flow_shared_action *shared_action = NULL;
12154
12155         flow_dv_shared_lock(dev);
12156         shared_action = __flow_dv_action_create(dev, conf, action, error);
12157         flow_dv_shared_unlock(dev);
12158         return shared_action;
12159 }
12160
12161 /*
12162  * Mutex-protected thunk to lock-free  __flow_dv_action_destroy().
12163  */
12164 static int
12165 flow_dv_action_destroy(struct rte_eth_dev *dev,
12166                        struct rte_flow_shared_action *action,
12167                        struct rte_flow_error *error)
12168 {
12169         int ret;
12170
12171         flow_dv_shared_lock(dev);
12172         ret = __flow_dv_action_destroy(dev, action, error);
12173         flow_dv_shared_unlock(dev);
12174         return ret;
12175 }
12176
12177 /*
12178  * Mutex-protected thunk to lock-free  __flow_dv_action_update().
12179  */
12180 static int
12181 flow_dv_action_update(struct rte_eth_dev *dev,
12182                       struct rte_flow_shared_action *action,
12183                       const void *action_conf,
12184                       struct rte_flow_error *error)
12185 {
12186         int ret;
12187
12188         flow_dv_shared_lock(dev);
12189         ret = __flow_dv_action_update(dev, action, action_conf,
12190                                       error);
12191         flow_dv_shared_unlock(dev);
12192         return ret;
12193 }
12194
12195 static int
12196 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12197 {
12198         struct mlx5_priv *priv = dev->data->dev_private;
12199         int ret = 0;
12200
12201         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12202                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12203                                                 flags);
12204                 if (ret != 0)
12205                         return ret;
12206         }
12207         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12208                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12209                 if (ret != 0)
12210                         return ret;
12211         }
12212         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12213                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12214                 if (ret != 0)
12215                         return ret;
12216         }
12217         return 0;
12218 }
12219
12220 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12221         .validate = flow_dv_validate,
12222         .prepare = flow_dv_prepare,
12223         .translate = flow_dv_translate,
12224         .apply = flow_dv_apply,
12225         .remove = flow_dv_remove,
12226         .destroy = flow_dv_destroy,
12227         .query = flow_dv_query,
12228         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12229         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12230         .create_policer_rules = flow_dv_create_policer_rules,
12231         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12232         .counter_alloc = flow_dv_counter_allocate,
12233         .counter_free = flow_dv_counter_free,
12234         .counter_query = flow_dv_counter_query,
12235         .get_aged_flows = flow_get_aged_flows,
12236         .action_validate = flow_dv_action_validate,
12237         .action_create = flow_dv_action_create,
12238         .action_destroy = flow_dv_action_destroy,
12239         .action_update = flow_dv_action_update,
12240         .sync_domain = flow_dv_sync_domain,
12241 };
12242
12243 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12244