79d13175c4c5e5a2d5de4c4203f50c94df6fef9d
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84
85 /**
86  * Initialize flow attributes structure according to flow items' types.
87  *
88  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89  * mode. For tunnel mode, the items to be modified are the outermost ones.
90  *
91  * @param[in] item
92  *   Pointer to item specification.
93  * @param[out] attr
94  *   Pointer to flow attributes structure.
95  * @param[in] dev_flow
96  *   Pointer to the sub flow.
97  * @param[in] tunnel_decap
98  *   Whether action is after tunnel decapsulation.
99  */
100 static void
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102                   struct mlx5_flow *dev_flow, bool tunnel_decap)
103 {
104         uint64_t layers = dev_flow->handle->layers;
105
106         /*
107          * If layers is already initialized, it means this dev_flow is the
108          * suffix flow, the layers flags is set by the prefix flow. Need to
109          * use the layer flags from prefix flow as the suffix flow may not
110          * have the user defined items as the flow is split.
111          */
112         if (layers) {
113                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
114                         attr->ipv4 = 1;
115                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
116                         attr->ipv6 = 1;
117                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
118                         attr->tcp = 1;
119                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
120                         attr->udp = 1;
121                 attr->valid = 1;
122                 return;
123         }
124         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125                 uint8_t next_protocol = 0xff;
126                 switch (item->type) {
127                 case RTE_FLOW_ITEM_TYPE_GRE:
128                 case RTE_FLOW_ITEM_TYPE_NVGRE:
129                 case RTE_FLOW_ITEM_TYPE_VXLAN:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
132                 case RTE_FLOW_ITEM_TYPE_MPLS:
133                         if (tunnel_decap)
134                                 attr->attr = 0;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_IPV4:
137                         if (!attr->ipv6)
138                                 attr->ipv4 = 1;
139                         if (item->mask != NULL &&
140                             ((const struct rte_flow_item_ipv4 *)
141                             item->mask)->hdr.next_proto_id)
142                                 next_protocol =
143                                     ((const struct rte_flow_item_ipv4 *)
144                                       (item->spec))->hdr.next_proto_id &
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->mask))->hdr.next_proto_id;
147                         if ((next_protocol == IPPROTO_IPIP ||
148                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
149                                 attr->attr = 0;
150                         break;
151                 case RTE_FLOW_ITEM_TYPE_IPV6:
152                         if (!attr->ipv4)
153                                 attr->ipv6 = 1;
154                         if (item->mask != NULL &&
155                             ((const struct rte_flow_item_ipv6 *)
156                             item->mask)->hdr.proto)
157                                 next_protocol =
158                                     ((const struct rte_flow_item_ipv6 *)
159                                       (item->spec))->hdr.proto &
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->mask))->hdr.proto;
162                         if ((next_protocol == IPPROTO_IPIP ||
163                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
164                                 attr->attr = 0;
165                         break;
166                 case RTE_FLOW_ITEM_TYPE_UDP:
167                         if (!attr->tcp)
168                                 attr->udp = 1;
169                         break;
170                 case RTE_FLOW_ITEM_TYPE_TCP:
171                         if (!attr->udp)
172                                 attr->tcp = 1;
173                         break;
174                 default:
175                         break;
176                 }
177         }
178         attr->valid = 1;
179 }
180
181 /**
182  * Convert rte_mtr_color to mlx5 color.
183  *
184  * @param[in] rcol
185  *   rte_mtr_color.
186  *
187  * @return
188  *   mlx5 color.
189  */
190 static int
191 rte_col_2_mlx5_col(enum rte_color rcol)
192 {
193         switch (rcol) {
194         case RTE_COLOR_GREEN:
195                 return MLX5_FLOW_COLOR_GREEN;
196         case RTE_COLOR_YELLOW:
197                 return MLX5_FLOW_COLOR_YELLOW;
198         case RTE_COLOR_RED:
199                 return MLX5_FLOW_COLOR_RED;
200         default:
201                 break;
202         }
203         return MLX5_FLOW_COLOR_UNDEFINED;
204 }
205
206 struct field_modify_info {
207         uint32_t size; /* Size of field in protocol header, in bytes. */
208         uint32_t offset; /* Offset of field in protocol header, in bytes. */
209         enum mlx5_modification_field id;
210 };
211
212 struct field_modify_info modify_eth[] = {
213         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
214         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
215         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
216         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221         /* Size in bits !!! */
222         {12, 0, MLX5_MODI_OUT_FIRST_VID},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_ipv4[] = {
227         {1,  1, MLX5_MODI_OUT_IP_DSCP},
228         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
229         {4, 12, MLX5_MODI_OUT_SIPV4},
230         {4, 16, MLX5_MODI_OUT_DIPV4},
231         {0, 0, 0},
232 };
233
234 struct field_modify_info modify_ipv6[] = {
235         {1,  0, MLX5_MODI_OUT_IP_DSCP},
236         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
238         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_udp[] = {
249         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_tcp[] = {
255         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
259         {0, 0, 0},
260 };
261
262 static void
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264                           uint8_t next_protocol, uint64_t *item_flags,
265                           int *tunnel)
266 {
267         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269         if (next_protocol == IPPROTO_IPIP) {
270                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
271                 *tunnel = 1;
272         }
273         if (next_protocol == IPPROTO_IPV6) {
274                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
275                 *tunnel = 1;
276         }
277 }
278
279 /**
280  * Acquire the synchronizing object to protect multithreaded access
281  * to shared dv context. Lock occurs only if context is actually
282  * shared, i.e. we have multiport IB device and representors are
283  * created.
284  *
285  * @param[in] dev
286  *   Pointer to the rte_eth_dev structure.
287  */
288 static void
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
290 {
291         struct mlx5_priv *priv = dev->data->dev_private;
292         struct mlx5_dev_ctx_shared *sh = priv->sh;
293
294         if (sh->refcnt > 1) {
295                 int ret;
296
297                 ret = pthread_mutex_lock(&sh->dv_mutex);
298                 MLX5_ASSERT(!ret);
299                 (void)ret;
300         }
301 }
302
303 static void
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
305 {
306         struct mlx5_priv *priv = dev->data->dev_private;
307         struct mlx5_dev_ctx_shared *sh = priv->sh;
308
309         if (sh->refcnt > 1) {
310                 int ret;
311
312                 ret = pthread_mutex_unlock(&sh->dv_mutex);
313                 MLX5_ASSERT(!ret);
314                 (void)ret;
315         }
316 }
317
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
319  *
320  * @param[in] action
321  *   Pointer to struct rte_flow_action.
322  * @param[out] vlan
323  *   Pointer to struct rte_vlan_hdr.
324  */
325 static void
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327                          struct rte_vlan_hdr *vlan)
328 {
329         uint16_t vlan_tci;
330         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
331                 vlan_tci =
332                     ((const struct rte_flow_action_of_set_vlan_pcp *)
333                                                action->conf)->vlan_pcp;
334                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336                 vlan->vlan_tci |= vlan_tci;
337         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339                 vlan->vlan_tci |= rte_be_to_cpu_16
340                     (((const struct rte_flow_action_of_set_vlan_vid *)
341                                              action->conf)->vlan_vid);
342         }
343 }
344
345 /**
346  * Fetch 1, 2, 3 or 4 byte field from the byte array
347  * and return as unsigned integer in host-endian format.
348  *
349  * @param[in] data
350  *   Pointer to data array.
351  * @param[in] size
352  *   Size of field to extract.
353  *
354  * @return
355  *   converted field in host endian format.
356  */
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
359 {
360         uint32_t ret;
361
362         switch (size) {
363         case 1:
364                 ret = *data;
365                 break;
366         case 2:
367                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
368                 break;
369         case 3:
370                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371                 ret = (ret << 8) | *(data + sizeof(uint16_t));
372                 break;
373         case 4:
374                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
375                 break;
376         default:
377                 MLX5_ASSERT(false);
378                 ret = 0;
379                 break;
380         }
381         return ret;
382 }
383
384 /**
385  * Convert modify-header action to DV specification.
386  *
387  * Data length of each action is determined by provided field description
388  * and the item mask. Data bit offset and width of each action is determined
389  * by provided item mask.
390  *
391  * @param[in] item
392  *   Pointer to item specification.
393  * @param[in] field
394  *   Pointer to field modification information.
395  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
398  * @param[in] dcopy
399  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400  *   Negative offset value sets the same offset as source offset.
401  *   size field is ignored, value is taken from source field.
402  * @param[in,out] resource
403  *   Pointer to the modify-header resource.
404  * @param[in] type
405  *   Type of modification.
406  * @param[out] error
407  *   Pointer to the error structure.
408  *
409  * @return
410  *   0 on success, a negative errno value otherwise and rte_errno is set.
411  */
412 static int
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414                               struct field_modify_info *field,
415                               struct field_modify_info *dcopy,
416                               struct mlx5_flow_dv_modify_hdr_resource *resource,
417                               uint32_t type, struct rte_flow_error *error)
418 {
419         uint32_t i = resource->actions_num;
420         struct mlx5_modification_cmd *actions = resource->actions;
421
422         /*
423          * The item and mask are provided in big-endian format.
424          * The fields should be presented as in big-endian format either.
425          * Mask must be always present, it defines the actual field width.
426          */
427         MLX5_ASSERT(item->mask);
428         MLX5_ASSERT(field->size);
429         do {
430                 unsigned int size_b;
431                 unsigned int off_b;
432                 uint32_t mask;
433                 uint32_t data;
434
435                 if (i >= MLX5_MAX_MODIFY_NUM)
436                         return rte_flow_error_set(error, EINVAL,
437                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438                                  "too many items to modify");
439                 /* Fetch variable byte size mask from the array. */
440                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441                                            field->offset, field->size);
442                 if (!mask) {
443                         ++field;
444                         continue;
445                 }
446                 /* Deduce actual data width in bits from mask value. */
447                 off_b = rte_bsf32(mask);
448                 size_b = sizeof(uint32_t) * CHAR_BIT -
449                          off_b - __builtin_clz(mask);
450                 MLX5_ASSERT(size_b);
451                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452                 actions[i] = (struct mlx5_modification_cmd) {
453                         .action_type = type,
454                         .field = field->id,
455                         .offset = off_b,
456                         .length = size_b,
457                 };
458                 /* Convert entire record to expected big-endian format. */
459                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
461                         MLX5_ASSERT(dcopy);
462                         actions[i].dst_field = dcopy->id;
463                         actions[i].dst_offset =
464                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465                         /* Convert entire record to big-endian format. */
466                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
467                 } else {
468                         MLX5_ASSERT(item->spec);
469                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
470                                                    field->offset, field->size);
471                         /* Shift out the trailing masked bits from data. */
472                         data = (data & mask) >> off_b;
473                         actions[i].data1 = rte_cpu_to_be_32(data);
474                 }
475                 ++i;
476                 ++field;
477         } while (field->size);
478         if (resource->actions_num == i)
479                 return rte_flow_error_set(error, EINVAL,
480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481                                           "invalid modification flow item");
482         resource->actions_num = i;
483         return 0;
484 }
485
486 /**
487  * Convert modify-header set IPv4 address action to DV specification.
488  *
489  * @param[in,out] resource
490  *   Pointer to the modify-header resource.
491  * @param[in] action
492  *   Pointer to action specification.
493  * @param[out] error
494  *   Pointer to the error structure.
495  *
496  * @return
497  *   0 on success, a negative errno value otherwise and rte_errno is set.
498  */
499 static int
500 flow_dv_convert_action_modify_ipv4
501                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
502                          const struct rte_flow_action *action,
503                          struct rte_flow_error *error)
504 {
505         const struct rte_flow_action_set_ipv4 *conf =
506                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508         struct rte_flow_item_ipv4 ipv4;
509         struct rte_flow_item_ipv4 ipv4_mask;
510
511         memset(&ipv4, 0, sizeof(ipv4));
512         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514                 ipv4.hdr.src_addr = conf->ipv4_addr;
515                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
516         } else {
517                 ipv4.hdr.dst_addr = conf->ipv4_addr;
518                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
519         }
520         item.spec = &ipv4;
521         item.mask = &ipv4_mask;
522         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523                                              MLX5_MODIFICATION_TYPE_SET, error);
524 }
525
526 /**
527  * Convert modify-header set IPv6 address action to DV specification.
528  *
529  * @param[in,out] resource
530  *   Pointer to the modify-header resource.
531  * @param[in] action
532  *   Pointer to action specification.
533  * @param[out] error
534  *   Pointer to the error structure.
535  *
536  * @return
537  *   0 on success, a negative errno value otherwise and rte_errno is set.
538  */
539 static int
540 flow_dv_convert_action_modify_ipv6
541                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
542                          const struct rte_flow_action *action,
543                          struct rte_flow_error *error)
544 {
545         const struct rte_flow_action_set_ipv6 *conf =
546                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548         struct rte_flow_item_ipv6 ipv6;
549         struct rte_flow_item_ipv6 ipv6_mask;
550
551         memset(&ipv6, 0, sizeof(ipv6));
552         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555                        sizeof(ipv6.hdr.src_addr));
556                 memcpy(&ipv6_mask.hdr.src_addr,
557                        &rte_flow_item_ipv6_mask.hdr.src_addr,
558                        sizeof(ipv6.hdr.src_addr));
559         } else {
560                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561                        sizeof(ipv6.hdr.dst_addr));
562                 memcpy(&ipv6_mask.hdr.dst_addr,
563                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
564                        sizeof(ipv6.hdr.dst_addr));
565         }
566         item.spec = &ipv6;
567         item.mask = &ipv6_mask;
568         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569                                              MLX5_MODIFICATION_TYPE_SET, error);
570 }
571
572 /**
573  * Convert modify-header set MAC address action to DV specification.
574  *
575  * @param[in,out] resource
576  *   Pointer to the modify-header resource.
577  * @param[in] action
578  *   Pointer to action specification.
579  * @param[out] error
580  *   Pointer to the error structure.
581  *
582  * @return
583  *   0 on success, a negative errno value otherwise and rte_errno is set.
584  */
585 static int
586 flow_dv_convert_action_modify_mac
587                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
588                          const struct rte_flow_action *action,
589                          struct rte_flow_error *error)
590 {
591         const struct rte_flow_action_set_mac *conf =
592                 (const struct rte_flow_action_set_mac *)(action->conf);
593         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594         struct rte_flow_item_eth eth;
595         struct rte_flow_item_eth eth_mask;
596
597         memset(&eth, 0, sizeof(eth));
598         memset(&eth_mask, 0, sizeof(eth_mask));
599         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
601                        sizeof(eth.src.addr_bytes));
602                 memcpy(&eth_mask.src.addr_bytes,
603                        &rte_flow_item_eth_mask.src.addr_bytes,
604                        sizeof(eth_mask.src.addr_bytes));
605         } else {
606                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
607                        sizeof(eth.dst.addr_bytes));
608                 memcpy(&eth_mask.dst.addr_bytes,
609                        &rte_flow_item_eth_mask.dst.addr_bytes,
610                        sizeof(eth_mask.dst.addr_bytes));
611         }
612         item.spec = &eth;
613         item.mask = &eth_mask;
614         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615                                              MLX5_MODIFICATION_TYPE_SET, error);
616 }
617
618 /**
619  * Convert modify-header set VLAN VID action to DV specification.
620  *
621  * @param[in,out] resource
622  *   Pointer to the modify-header resource.
623  * @param[in] action
624  *   Pointer to action specification.
625  * @param[out] error
626  *   Pointer to the error structure.
627  *
628  * @return
629  *   0 on success, a negative errno value otherwise and rte_errno is set.
630  */
631 static int
632 flow_dv_convert_action_modify_vlan_vid
633                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
634                          const struct rte_flow_action *action,
635                          struct rte_flow_error *error)
636 {
637         const struct rte_flow_action_of_set_vlan_vid *conf =
638                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639         int i = resource->actions_num;
640         struct mlx5_modification_cmd *actions = resource->actions;
641         struct field_modify_info *field = modify_vlan_out_first_vid;
642
643         if (i >= MLX5_MAX_MODIFY_NUM)
644                 return rte_flow_error_set(error, EINVAL,
645                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646                          "too many items to modify");
647         actions[i] = (struct mlx5_modification_cmd) {
648                 .action_type = MLX5_MODIFICATION_TYPE_SET,
649                 .field = field->id,
650                 .length = field->size,
651                 .offset = field->offset,
652         };
653         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654         actions[i].data1 = conf->vlan_vid;
655         actions[i].data1 = actions[i].data1 << 16;
656         resource->actions_num = ++i;
657         return 0;
658 }
659
660 /**
661  * Convert modify-header set TP action to DV specification.
662  *
663  * @param[in,out] resource
664  *   Pointer to the modify-header resource.
665  * @param[in] action
666  *   Pointer to action specification.
667  * @param[in] items
668  *   Pointer to rte_flow_item objects list.
669  * @param[in] attr
670  *   Pointer to flow attributes structure.
671  * @param[in] dev_flow
672  *   Pointer to the sub flow.
673  * @param[in] tunnel_decap
674  *   Whether action is after tunnel decapsulation.
675  * @param[out] error
676  *   Pointer to the error structure.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 flow_dv_convert_action_modify_tp
683                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
684                          const struct rte_flow_action *action,
685                          const struct rte_flow_item *items,
686                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687                          bool tunnel_decap, struct rte_flow_error *error)
688 {
689         const struct rte_flow_action_set_tp *conf =
690                 (const struct rte_flow_action_set_tp *)(action->conf);
691         struct rte_flow_item item;
692         struct rte_flow_item_udp udp;
693         struct rte_flow_item_udp udp_mask;
694         struct rte_flow_item_tcp tcp;
695         struct rte_flow_item_tcp tcp_mask;
696         struct field_modify_info *field;
697
698         if (!attr->valid)
699                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
700         if (attr->udp) {
701                 memset(&udp, 0, sizeof(udp));
702                 memset(&udp_mask, 0, sizeof(udp_mask));
703                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704                         udp.hdr.src_port = conf->port;
705                         udp_mask.hdr.src_port =
706                                         rte_flow_item_udp_mask.hdr.src_port;
707                 } else {
708                         udp.hdr.dst_port = conf->port;
709                         udp_mask.hdr.dst_port =
710                                         rte_flow_item_udp_mask.hdr.dst_port;
711                 }
712                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
713                 item.spec = &udp;
714                 item.mask = &udp_mask;
715                 field = modify_udp;
716         } else {
717                 MLX5_ASSERT(attr->tcp);
718                 memset(&tcp, 0, sizeof(tcp));
719                 memset(&tcp_mask, 0, sizeof(tcp_mask));
720                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721                         tcp.hdr.src_port = conf->port;
722                         tcp_mask.hdr.src_port =
723                                         rte_flow_item_tcp_mask.hdr.src_port;
724                 } else {
725                         tcp.hdr.dst_port = conf->port;
726                         tcp_mask.hdr.dst_port =
727                                         rte_flow_item_tcp_mask.hdr.dst_port;
728                 }
729                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
730                 item.spec = &tcp;
731                 item.mask = &tcp_mask;
732                 field = modify_tcp;
733         }
734         return flow_dv_convert_modify_action(&item, field, NULL, resource,
735                                              MLX5_MODIFICATION_TYPE_SET, error);
736 }
737
738 /**
739  * Convert modify-header set TTL action to DV specification.
740  *
741  * @param[in,out] resource
742  *   Pointer to the modify-header resource.
743  * @param[in] action
744  *   Pointer to action specification.
745  * @param[in] items
746  *   Pointer to rte_flow_item objects list.
747  * @param[in] attr
748  *   Pointer to flow attributes structure.
749  * @param[in] dev_flow
750  *   Pointer to the sub flow.
751  * @param[in] tunnel_decap
752  *   Whether action is after tunnel decapsulation.
753  * @param[out] error
754  *   Pointer to the error structure.
755  *
756  * @return
757  *   0 on success, a negative errno value otherwise and rte_errno is set.
758  */
759 static int
760 flow_dv_convert_action_modify_ttl
761                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
762                          const struct rte_flow_action *action,
763                          const struct rte_flow_item *items,
764                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765                          bool tunnel_decap, struct rte_flow_error *error)
766 {
767         const struct rte_flow_action_set_ttl *conf =
768                 (const struct rte_flow_action_set_ttl *)(action->conf);
769         struct rte_flow_item item;
770         struct rte_flow_item_ipv4 ipv4;
771         struct rte_flow_item_ipv4 ipv4_mask;
772         struct rte_flow_item_ipv6 ipv6;
773         struct rte_flow_item_ipv6 ipv6_mask;
774         struct field_modify_info *field;
775
776         if (!attr->valid)
777                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
778         if (attr->ipv4) {
779                 memset(&ipv4, 0, sizeof(ipv4));
780                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781                 ipv4.hdr.time_to_live = conf->ttl_value;
782                 ipv4_mask.hdr.time_to_live = 0xFF;
783                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
784                 item.spec = &ipv4;
785                 item.mask = &ipv4_mask;
786                 field = modify_ipv4;
787         } else {
788                 MLX5_ASSERT(attr->ipv6);
789                 memset(&ipv6, 0, sizeof(ipv6));
790                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791                 ipv6.hdr.hop_limits = conf->ttl_value;
792                 ipv6_mask.hdr.hop_limits = 0xFF;
793                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
794                 item.spec = &ipv6;
795                 item.mask = &ipv6_mask;
796                 field = modify_ipv6;
797         }
798         return flow_dv_convert_modify_action(&item, field, NULL, resource,
799                                              MLX5_MODIFICATION_TYPE_SET, error);
800 }
801
802 /**
803  * Convert modify-header decrement TTL action to DV specification.
804  *
805  * @param[in,out] resource
806  *   Pointer to the modify-header resource.
807  * @param[in] action
808  *   Pointer to action specification.
809  * @param[in] items
810  *   Pointer to rte_flow_item objects list.
811  * @param[in] attr
812  *   Pointer to flow attributes structure.
813  * @param[in] dev_flow
814  *   Pointer to the sub flow.
815  * @param[in] tunnel_decap
816  *   Whether action is after tunnel decapsulation.
817  * @param[out] error
818  *   Pointer to the error structure.
819  *
820  * @return
821  *   0 on success, a negative errno value otherwise and rte_errno is set.
822  */
823 static int
824 flow_dv_convert_action_modify_dec_ttl
825                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
826                          const struct rte_flow_item *items,
827                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828                          bool tunnel_decap, struct rte_flow_error *error)
829 {
830         struct rte_flow_item item;
831         struct rte_flow_item_ipv4 ipv4;
832         struct rte_flow_item_ipv4 ipv4_mask;
833         struct rte_flow_item_ipv6 ipv6;
834         struct rte_flow_item_ipv6 ipv6_mask;
835         struct field_modify_info *field;
836
837         if (!attr->valid)
838                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
839         if (attr->ipv4) {
840                 memset(&ipv4, 0, sizeof(ipv4));
841                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842                 ipv4.hdr.time_to_live = 0xFF;
843                 ipv4_mask.hdr.time_to_live = 0xFF;
844                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
845                 item.spec = &ipv4;
846                 item.mask = &ipv4_mask;
847                 field = modify_ipv4;
848         } else {
849                 MLX5_ASSERT(attr->ipv6);
850                 memset(&ipv6, 0, sizeof(ipv6));
851                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852                 ipv6.hdr.hop_limits = 0xFF;
853                 ipv6_mask.hdr.hop_limits = 0xFF;
854                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
855                 item.spec = &ipv6;
856                 item.mask = &ipv6_mask;
857                 field = modify_ipv6;
858         }
859         return flow_dv_convert_modify_action(&item, field, NULL, resource,
860                                              MLX5_MODIFICATION_TYPE_ADD, error);
861 }
862
863 /**
864  * Convert modify-header increment/decrement TCP Sequence number
865  * to DV specification.
866  *
867  * @param[in,out] resource
868  *   Pointer to the modify-header resource.
869  * @param[in] action
870  *   Pointer to action specification.
871  * @param[out] error
872  *   Pointer to the error structure.
873  *
874  * @return
875  *   0 on success, a negative errno value otherwise and rte_errno is set.
876  */
877 static int
878 flow_dv_convert_action_modify_tcp_seq
879                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
880                          const struct rte_flow_action *action,
881                          struct rte_flow_error *error)
882 {
883         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884         uint64_t value = rte_be_to_cpu_32(*conf);
885         struct rte_flow_item item;
886         struct rte_flow_item_tcp tcp;
887         struct rte_flow_item_tcp tcp_mask;
888
889         memset(&tcp, 0, sizeof(tcp));
890         memset(&tcp_mask, 0, sizeof(tcp_mask));
891         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
892                 /*
893                  * The HW has no decrement operation, only increment operation.
894                  * To simulate decrement X from Y using increment operation
895                  * we need to add UINT32_MAX X times to Y.
896                  * Each adding of UINT32_MAX decrements Y by 1.
897                  */
898                 value *= UINT32_MAX;
899         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901         item.type = RTE_FLOW_ITEM_TYPE_TCP;
902         item.spec = &tcp;
903         item.mask = &tcp_mask;
904         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905                                              MLX5_MODIFICATION_TYPE_ADD, error);
906 }
907
908 /**
909  * Convert modify-header increment/decrement TCP Acknowledgment number
910  * to DV specification.
911  *
912  * @param[in,out] resource
913  *   Pointer to the modify-header resource.
914  * @param[in] action
915  *   Pointer to action specification.
916  * @param[out] error
917  *   Pointer to the error structure.
918  *
919  * @return
920  *   0 on success, a negative errno value otherwise and rte_errno is set.
921  */
922 static int
923 flow_dv_convert_action_modify_tcp_ack
924                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
925                          const struct rte_flow_action *action,
926                          struct rte_flow_error *error)
927 {
928         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929         uint64_t value = rte_be_to_cpu_32(*conf);
930         struct rte_flow_item item;
931         struct rte_flow_item_tcp tcp;
932         struct rte_flow_item_tcp tcp_mask;
933
934         memset(&tcp, 0, sizeof(tcp));
935         memset(&tcp_mask, 0, sizeof(tcp_mask));
936         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
937                 /*
938                  * The HW has no decrement operation, only increment operation.
939                  * To simulate decrement X from Y using increment operation
940                  * we need to add UINT32_MAX X times to Y.
941                  * Each adding of UINT32_MAX decrements Y by 1.
942                  */
943                 value *= UINT32_MAX;
944         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946         item.type = RTE_FLOW_ITEM_TYPE_TCP;
947         item.spec = &tcp;
948         item.mask = &tcp_mask;
949         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950                                              MLX5_MODIFICATION_TYPE_ADD, error);
951 }
952
953 static enum mlx5_modification_field reg_to_field[] = {
954         [REG_NON] = MLX5_MODI_OUT_NONE,
955         [REG_A] = MLX5_MODI_META_DATA_REG_A,
956         [REG_B] = MLX5_MODI_META_DATA_REG_B,
957         [REG_C_0] = MLX5_MODI_META_REG_C_0,
958         [REG_C_1] = MLX5_MODI_META_REG_C_1,
959         [REG_C_2] = MLX5_MODI_META_REG_C_2,
960         [REG_C_3] = MLX5_MODI_META_REG_C_3,
961         [REG_C_4] = MLX5_MODI_META_REG_C_4,
962         [REG_C_5] = MLX5_MODI_META_REG_C_5,
963         [REG_C_6] = MLX5_MODI_META_REG_C_6,
964         [REG_C_7] = MLX5_MODI_META_REG_C_7,
965 };
966
967 /**
968  * Convert register set to DV specification.
969  *
970  * @param[in,out] resource
971  *   Pointer to the modify-header resource.
972  * @param[in] action
973  *   Pointer to action specification.
974  * @param[out] error
975  *   Pointer to the error structure.
976  *
977  * @return
978  *   0 on success, a negative errno value otherwise and rte_errno is set.
979  */
980 static int
981 flow_dv_convert_action_set_reg
982                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
983                          const struct rte_flow_action *action,
984                          struct rte_flow_error *error)
985 {
986         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987         struct mlx5_modification_cmd *actions = resource->actions;
988         uint32_t i = resource->actions_num;
989
990         if (i >= MLX5_MAX_MODIFY_NUM)
991                 return rte_flow_error_set(error, EINVAL,
992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993                                           "too many items to modify");
994         MLX5_ASSERT(conf->id != REG_NON);
995         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996         actions[i] = (struct mlx5_modification_cmd) {
997                 .action_type = MLX5_MODIFICATION_TYPE_SET,
998                 .field = reg_to_field[conf->id],
999         };
1000         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1002         ++i;
1003         resource->actions_num = i;
1004         return 0;
1005 }
1006
1007 /**
1008  * Convert SET_TAG action to DV specification.
1009  *
1010  * @param[in] dev
1011  *   Pointer to the rte_eth_dev structure.
1012  * @param[in,out] resource
1013  *   Pointer to the modify-header resource.
1014  * @param[in] conf
1015  *   Pointer to action specification.
1016  * @param[out] error
1017  *   Pointer to the error structure.
1018  *
1019  * @return
1020  *   0 on success, a negative errno value otherwise and rte_errno is set.
1021  */
1022 static int
1023 flow_dv_convert_action_set_tag
1024                         (struct rte_eth_dev *dev,
1025                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1026                          const struct rte_flow_action_set_tag *conf,
1027                          struct rte_flow_error *error)
1028 {
1029         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031         struct rte_flow_item item = {
1032                 .spec = &data,
1033                 .mask = &mask,
1034         };
1035         struct field_modify_info reg_c_x[] = {
1036                 [1] = {0, 0, 0},
1037         };
1038         enum mlx5_modification_field reg_type;
1039         int ret;
1040
1041         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1042         if (ret < 0)
1043                 return ret;
1044         MLX5_ASSERT(ret != REG_NON);
1045         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046         reg_type = reg_to_field[ret];
1047         MLX5_ASSERT(reg_type > 0);
1048         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050                                              MLX5_MODIFICATION_TYPE_SET, error);
1051 }
1052
1053 /**
1054  * Convert internal COPY_REG action to DV specification.
1055  *
1056  * @param[in] dev
1057  *   Pointer to the rte_eth_dev structure.
1058  * @param[in,out] res
1059  *   Pointer to the modify-header resource.
1060  * @param[in] action
1061  *   Pointer to action specification.
1062  * @param[out] error
1063  *   Pointer to the error structure.
1064  *
1065  * @return
1066  *   0 on success, a negative errno value otherwise and rte_errno is set.
1067  */
1068 static int
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1071                                  const struct rte_flow_action *action,
1072                                  struct rte_flow_error *error)
1073 {
1074         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076         struct rte_flow_item item = {
1077                 .spec = NULL,
1078                 .mask = &mask,
1079         };
1080         struct field_modify_info reg_src[] = {
1081                 {4, 0, reg_to_field[conf->src]},
1082                 {0, 0, 0},
1083         };
1084         struct field_modify_info reg_dst = {
1085                 .offset = 0,
1086                 .id = reg_to_field[conf->dst],
1087         };
1088         /* Adjust reg_c[0] usage according to reported mask. */
1089         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090                 struct mlx5_priv *priv = dev->data->dev_private;
1091                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1092
1093                 MLX5_ASSERT(reg_c0);
1094                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095                 if (conf->dst == REG_C_0) {
1096                         /* Copy to reg_c[0], within mask only. */
1097                         reg_dst.offset = rte_bsf32(reg_c0);
1098                         /*
1099                          * Mask is ignoring the enianness, because
1100                          * there is no conversion in datapath.
1101                          */
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103                         /* Copy from destination lower bits to reg_c[0]. */
1104                         mask = reg_c0 >> reg_dst.offset;
1105 #else
1106                         /* Copy from destination upper bits to reg_c[0]. */
1107                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108                                           rte_fls_u32(reg_c0));
1109 #endif
1110                 } else {
1111                         mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113                         /* Copy from reg_c[0] to destination lower bits. */
1114                         reg_dst.offset = 0;
1115 #else
1116                         /* Copy from reg_c[0] to destination upper bits. */
1117                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118                                          (rte_fls_u32(reg_c0) -
1119                                           rte_bsf32(reg_c0));
1120 #endif
1121                 }
1122         }
1123         return flow_dv_convert_modify_action(&item,
1124                                              reg_src, &reg_dst, res,
1125                                              MLX5_MODIFICATION_TYPE_COPY,
1126                                              error);
1127 }
1128
1129 /**
1130  * Convert MARK action to DV specification. This routine is used
1131  * in extensive metadata only and requires metadata register to be
1132  * handled. In legacy mode hardware tag resource is engaged.
1133  *
1134  * @param[in] dev
1135  *   Pointer to the rte_eth_dev structure.
1136  * @param[in] conf
1137  *   Pointer to MARK action specification.
1138  * @param[in,out] resource
1139  *   Pointer to the modify-header resource.
1140  * @param[out] error
1141  *   Pointer to the error structure.
1142  *
1143  * @return
1144  *   0 on success, a negative errno value otherwise and rte_errno is set.
1145  */
1146 static int
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148                             const struct rte_flow_action_mark *conf,
1149                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1150                             struct rte_flow_error *error)
1151 {
1152         struct mlx5_priv *priv = dev->data->dev_private;
1153         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154                                            priv->sh->dv_mark_mask);
1155         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156         struct rte_flow_item item = {
1157                 .spec = &data,
1158                 .mask = &mask,
1159         };
1160         struct field_modify_info reg_c_x[] = {
1161                 [1] = {0, 0, 0},
1162         };
1163         int reg;
1164
1165         if (!mask)
1166                 return rte_flow_error_set(error, EINVAL,
1167                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1168                                           NULL, "zero mark action mask");
1169         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1170         if (reg < 0)
1171                 return reg;
1172         MLX5_ASSERT(reg > 0);
1173         if (reg == REG_C_0) {
1174                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1175                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1176
1177                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1178                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1179                 mask = rte_cpu_to_be_32(mask << shl_c0);
1180         }
1181         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1182         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1183                                              MLX5_MODIFICATION_TYPE_SET, error);
1184 }
1185
1186 /**
1187  * Get metadata register index for specified steering domain.
1188  *
1189  * @param[in] dev
1190  *   Pointer to the rte_eth_dev structure.
1191  * @param[in] attr
1192  *   Attributes of flow to determine steering domain.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   positive index on success, a negative errno value otherwise
1198  *   and rte_errno is set.
1199  */
1200 static enum modify_reg
1201 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1202                          const struct rte_flow_attr *attr,
1203                          struct rte_flow_error *error)
1204 {
1205         int reg =
1206                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1207                                           MLX5_METADATA_FDB :
1208                                             attr->egress ?
1209                                             MLX5_METADATA_TX :
1210                                             MLX5_METADATA_RX, 0, error);
1211         if (reg < 0)
1212                 return rte_flow_error_set(error,
1213                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1214                                           NULL, "unavailable "
1215                                           "metadata register");
1216         return reg;
1217 }
1218
1219 /**
1220  * Convert SET_META action to DV specification.
1221  *
1222  * @param[in] dev
1223  *   Pointer to the rte_eth_dev structure.
1224  * @param[in,out] resource
1225  *   Pointer to the modify-header resource.
1226  * @param[in] attr
1227  *   Attributes of flow that includes this item.
1228  * @param[in] conf
1229  *   Pointer to action specification.
1230  * @param[out] error
1231  *   Pointer to the error structure.
1232  *
1233  * @return
1234  *   0 on success, a negative errno value otherwise and rte_errno is set.
1235  */
1236 static int
1237 flow_dv_convert_action_set_meta
1238                         (struct rte_eth_dev *dev,
1239                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1240                          const struct rte_flow_attr *attr,
1241                          const struct rte_flow_action_set_meta *conf,
1242                          struct rte_flow_error *error)
1243 {
1244         uint32_t data = conf->data;
1245         uint32_t mask = conf->mask;
1246         struct rte_flow_item item = {
1247                 .spec = &data,
1248                 .mask = &mask,
1249         };
1250         struct field_modify_info reg_c_x[] = {
1251                 [1] = {0, 0, 0},
1252         };
1253         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1254
1255         if (reg < 0)
1256                 return reg;
1257         /*
1258          * In datapath code there is no endianness
1259          * coversions for perfromance reasons, all
1260          * pattern conversions are done in rte_flow.
1261          */
1262         if (reg == REG_C_0) {
1263                 struct mlx5_priv *priv = dev->data->dev_private;
1264                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1265                 uint32_t shl_c0;
1266
1267                 MLX5_ASSERT(msk_c0);
1268 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1269                 shl_c0 = rte_bsf32(msk_c0);
1270 #else
1271                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1272 #endif
1273                 mask <<= shl_c0;
1274                 data <<= shl_c0;
1275                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1276         }
1277         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1278         /* The routine expects parameters in memory as big-endian ones. */
1279         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1280                                              MLX5_MODIFICATION_TYPE_SET, error);
1281 }
1282
1283 /**
1284  * Convert modify-header set IPv4 DSCP action to DV specification.
1285  *
1286  * @param[in,out] resource
1287  *   Pointer to the modify-header resource.
1288  * @param[in] action
1289  *   Pointer to action specification.
1290  * @param[out] error
1291  *   Pointer to the error structure.
1292  *
1293  * @return
1294  *   0 on success, a negative errno value otherwise and rte_errno is set.
1295  */
1296 static int
1297 flow_dv_convert_action_modify_ipv4_dscp
1298                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1299                          const struct rte_flow_action *action,
1300                          struct rte_flow_error *error)
1301 {
1302         const struct rte_flow_action_set_dscp *conf =
1303                 (const struct rte_flow_action_set_dscp *)(action->conf);
1304         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1305         struct rte_flow_item_ipv4 ipv4;
1306         struct rte_flow_item_ipv4 ipv4_mask;
1307
1308         memset(&ipv4, 0, sizeof(ipv4));
1309         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1310         ipv4.hdr.type_of_service = conf->dscp;
1311         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1312         item.spec = &ipv4;
1313         item.mask = &ipv4_mask;
1314         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1315                                              MLX5_MODIFICATION_TYPE_SET, error);
1316 }
1317
1318 /**
1319  * Convert modify-header set IPv6 DSCP action to DV specification.
1320  *
1321  * @param[in,out] resource
1322  *   Pointer to the modify-header resource.
1323  * @param[in] action
1324  *   Pointer to action specification.
1325  * @param[out] error
1326  *   Pointer to the error structure.
1327  *
1328  * @return
1329  *   0 on success, a negative errno value otherwise and rte_errno is set.
1330  */
1331 static int
1332 flow_dv_convert_action_modify_ipv6_dscp
1333                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1334                          const struct rte_flow_action *action,
1335                          struct rte_flow_error *error)
1336 {
1337         const struct rte_flow_action_set_dscp *conf =
1338                 (const struct rte_flow_action_set_dscp *)(action->conf);
1339         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1340         struct rte_flow_item_ipv6 ipv6;
1341         struct rte_flow_item_ipv6 ipv6_mask;
1342
1343         memset(&ipv6, 0, sizeof(ipv6));
1344         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1345         /*
1346          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1347          * rdma-core only accept the DSCP bits byte aligned start from
1348          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1349          * bits in IPv6 case as rdma-core requires byte aligned value.
1350          */
1351         ipv6.hdr.vtc_flow = conf->dscp;
1352         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1353         item.spec = &ipv6;
1354         item.mask = &ipv6_mask;
1355         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1356                                              MLX5_MODIFICATION_TYPE_SET, error);
1357 }
1358
1359 /**
1360  * Validate MARK item.
1361  *
1362  * @param[in] dev
1363  *   Pointer to the rte_eth_dev structure.
1364  * @param[in] item
1365  *   Item specification.
1366  * @param[in] attr
1367  *   Attributes of flow that includes this item.
1368  * @param[out] error
1369  *   Pointer to error structure.
1370  *
1371  * @return
1372  *   0 on success, a negative errno value otherwise and rte_errno is set.
1373  */
1374 static int
1375 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1376                            const struct rte_flow_item *item,
1377                            const struct rte_flow_attr *attr __rte_unused,
1378                            struct rte_flow_error *error)
1379 {
1380         struct mlx5_priv *priv = dev->data->dev_private;
1381         struct mlx5_dev_config *config = &priv->config;
1382         const struct rte_flow_item_mark *spec = item->spec;
1383         const struct rte_flow_item_mark *mask = item->mask;
1384         const struct rte_flow_item_mark nic_mask = {
1385                 .id = priv->sh->dv_mark_mask,
1386         };
1387         int ret;
1388
1389         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1390                 return rte_flow_error_set(error, ENOTSUP,
1391                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1392                                           "extended metadata feature"
1393                                           " isn't enabled");
1394         if (!mlx5_flow_ext_mreg_supported(dev))
1395                 return rte_flow_error_set(error, ENOTSUP,
1396                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1397                                           "extended metadata register"
1398                                           " isn't supported");
1399         if (!nic_mask.id)
1400                 return rte_flow_error_set(error, ENOTSUP,
1401                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1402                                           "extended metadata register"
1403                                           " isn't available");
1404         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1405         if (ret < 0)
1406                 return ret;
1407         if (!spec)
1408                 return rte_flow_error_set(error, EINVAL,
1409                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1410                                           item->spec,
1411                                           "data cannot be empty");
1412         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1413                 return rte_flow_error_set(error, EINVAL,
1414                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1415                                           &spec->id,
1416                                           "mark id exceeds the limit");
1417         if (!mask)
1418                 mask = &nic_mask;
1419         if (!mask->id)
1420                 return rte_flow_error_set(error, EINVAL,
1421                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1422                                         "mask cannot be zero");
1423
1424         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1425                                         (const uint8_t *)&nic_mask,
1426                                         sizeof(struct rte_flow_item_mark),
1427                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1428         if (ret < 0)
1429                 return ret;
1430         return 0;
1431 }
1432
1433 /**
1434  * Validate META item.
1435  *
1436  * @param[in] dev
1437  *   Pointer to the rte_eth_dev structure.
1438  * @param[in] item
1439  *   Item specification.
1440  * @param[in] attr
1441  *   Attributes of flow that includes this item.
1442  * @param[out] error
1443  *   Pointer to error structure.
1444  *
1445  * @return
1446  *   0 on success, a negative errno value otherwise and rte_errno is set.
1447  */
1448 static int
1449 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1450                            const struct rte_flow_item *item,
1451                            const struct rte_flow_attr *attr,
1452                            struct rte_flow_error *error)
1453 {
1454         struct mlx5_priv *priv = dev->data->dev_private;
1455         struct mlx5_dev_config *config = &priv->config;
1456         const struct rte_flow_item_meta *spec = item->spec;
1457         const struct rte_flow_item_meta *mask = item->mask;
1458         struct rte_flow_item_meta nic_mask = {
1459                 .data = UINT32_MAX
1460         };
1461         int reg;
1462         int ret;
1463
1464         if (!spec)
1465                 return rte_flow_error_set(error, EINVAL,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1467                                           item->spec,
1468                                           "data cannot be empty");
1469         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1470                 if (!mlx5_flow_ext_mreg_supported(dev))
1471                         return rte_flow_error_set(error, ENOTSUP,
1472                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1473                                           "extended metadata register"
1474                                           " isn't supported");
1475                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1476                 if (reg < 0)
1477                         return reg;
1478                 if (reg == REG_B)
1479                         return rte_flow_error_set(error, ENOTSUP,
1480                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1481                                           "match on reg_b "
1482                                           "isn't supported");
1483                 if (reg != REG_A)
1484                         nic_mask.data = priv->sh->dv_meta_mask;
1485         } else if (attr->transfer) {
1486                 return rte_flow_error_set(error, ENOTSUP,
1487                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1488                                         "extended metadata feature "
1489                                         "should be enabled when "
1490                                         "meta item is requested "
1491                                         "with e-switch mode ");
1492         }
1493         if (!mask)
1494                 mask = &rte_flow_item_meta_mask;
1495         if (!mask->data)
1496                 return rte_flow_error_set(error, EINVAL,
1497                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1498                                         "mask cannot be zero");
1499
1500         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1501                                         (const uint8_t *)&nic_mask,
1502                                         sizeof(struct rte_flow_item_meta),
1503                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1504         return ret;
1505 }
1506
1507 /**
1508  * Validate TAG item.
1509  *
1510  * @param[in] dev
1511  *   Pointer to the rte_eth_dev structure.
1512  * @param[in] item
1513  *   Item specification.
1514  * @param[in] attr
1515  *   Attributes of flow that includes this item.
1516  * @param[out] error
1517  *   Pointer to error structure.
1518  *
1519  * @return
1520  *   0 on success, a negative errno value otherwise and rte_errno is set.
1521  */
1522 static int
1523 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1524                           const struct rte_flow_item *item,
1525                           const struct rte_flow_attr *attr __rte_unused,
1526                           struct rte_flow_error *error)
1527 {
1528         const struct rte_flow_item_tag *spec = item->spec;
1529         const struct rte_flow_item_tag *mask = item->mask;
1530         const struct rte_flow_item_tag nic_mask = {
1531                 .data = RTE_BE32(UINT32_MAX),
1532                 .index = 0xff,
1533         };
1534         int ret;
1535
1536         if (!mlx5_flow_ext_mreg_supported(dev))
1537                 return rte_flow_error_set(error, ENOTSUP,
1538                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1539                                           "extensive metadata register"
1540                                           " isn't supported");
1541         if (!spec)
1542                 return rte_flow_error_set(error, EINVAL,
1543                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1544                                           item->spec,
1545                                           "data cannot be empty");
1546         if (!mask)
1547                 mask = &rte_flow_item_tag_mask;
1548         if (!mask->data)
1549                 return rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551                                         "mask cannot be zero");
1552
1553         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1554                                         (const uint8_t *)&nic_mask,
1555                                         sizeof(struct rte_flow_item_tag),
1556                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1557         if (ret < 0)
1558                 return ret;
1559         if (mask->index != 0xff)
1560                 return rte_flow_error_set(error, EINVAL,
1561                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1562                                           "partial mask for tag index"
1563                                           " is not supported");
1564         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1565         if (ret < 0)
1566                 return ret;
1567         MLX5_ASSERT(ret != REG_NON);
1568         return 0;
1569 }
1570
1571 /**
1572  * Validate vport item.
1573  *
1574  * @param[in] dev
1575  *   Pointer to the rte_eth_dev structure.
1576  * @param[in] item
1577  *   Item specification.
1578  * @param[in] attr
1579  *   Attributes of flow that includes this item.
1580  * @param[in] item_flags
1581  *   Bit-fields that holds the items detected until now.
1582  * @param[out] error
1583  *   Pointer to error structure.
1584  *
1585  * @return
1586  *   0 on success, a negative errno value otherwise and rte_errno is set.
1587  */
1588 static int
1589 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1590                               const struct rte_flow_item *item,
1591                               const struct rte_flow_attr *attr,
1592                               uint64_t item_flags,
1593                               struct rte_flow_error *error)
1594 {
1595         const struct rte_flow_item_port_id *spec = item->spec;
1596         const struct rte_flow_item_port_id *mask = item->mask;
1597         const struct rte_flow_item_port_id switch_mask = {
1598                         .id = 0xffffffff,
1599         };
1600         struct mlx5_priv *esw_priv;
1601         struct mlx5_priv *dev_priv;
1602         int ret;
1603
1604         if (!attr->transfer)
1605                 return rte_flow_error_set(error, EINVAL,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM,
1607                                           NULL,
1608                                           "match on port id is valid only"
1609                                           " when transfer flag is enabled");
1610         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1611                 return rte_flow_error_set(error, ENOTSUP,
1612                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1613                                           "multiple source ports are not"
1614                                           " supported");
1615         if (!mask)
1616                 mask = &switch_mask;
1617         if (mask->id != 0xffffffff)
1618                 return rte_flow_error_set(error, ENOTSUP,
1619                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1620                                            mask,
1621                                            "no support for partial mask on"
1622                                            " \"id\" field");
1623         ret = mlx5_flow_item_acceptable
1624                                 (item, (const uint8_t *)mask,
1625                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1626                                  sizeof(struct rte_flow_item_port_id),
1627                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1628         if (ret)
1629                 return ret;
1630         if (!spec)
1631                 return 0;
1632         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1633         if (!esw_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636                                           "failed to obtain E-Switch info for"
1637                                           " port");
1638         dev_priv = mlx5_dev_to_eswitch_info(dev);
1639         if (!dev_priv)
1640                 return rte_flow_error_set(error, rte_errno,
1641                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1642                                           NULL,
1643                                           "failed to obtain E-Switch info");
1644         if (esw_priv->domain_id != dev_priv->domain_id)
1645                 return rte_flow_error_set(error, EINVAL,
1646                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1647                                           "cannot match on a port from a"
1648                                           " different E-Switch");
1649         return 0;
1650 }
1651
1652 /**
1653  * Validate VLAN item.
1654  *
1655  * @param[in] item
1656  *   Item specification.
1657  * @param[in] item_flags
1658  *   Bit-fields that holds the items detected until now.
1659  * @param[in] dev
1660  *   Ethernet device flow is being created on.
1661  * @param[out] error
1662  *   Pointer to error structure.
1663  *
1664  * @return
1665  *   0 on success, a negative errno value otherwise and rte_errno is set.
1666  */
1667 static int
1668 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1669                            uint64_t item_flags,
1670                            struct rte_eth_dev *dev,
1671                            struct rte_flow_error *error)
1672 {
1673         const struct rte_flow_item_vlan *mask = item->mask;
1674         const struct rte_flow_item_vlan nic_mask = {
1675                 .tci = RTE_BE16(UINT16_MAX),
1676                 .inner_type = RTE_BE16(UINT16_MAX),
1677                 .has_more_vlan = 1,
1678         };
1679         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1680         int ret;
1681         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682                                         MLX5_FLOW_LAYER_INNER_L4) :
1683                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1684                                         MLX5_FLOW_LAYER_OUTER_L4);
1685         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1687
1688         if (item_flags & vlanm)
1689                 return rte_flow_error_set(error, EINVAL,
1690                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1691                                           "multiple VLAN layers not supported");
1692         else if ((item_flags & l34m) != 0)
1693                 return rte_flow_error_set(error, EINVAL,
1694                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1695                                           "VLAN cannot follow L3/L4 layer");
1696         if (!mask)
1697                 mask = &rte_flow_item_vlan_mask;
1698         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699                                         (const uint8_t *)&nic_mask,
1700                                         sizeof(struct rte_flow_item_vlan),
1701                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1702         if (ret)
1703                 return ret;
1704         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705                 struct mlx5_priv *priv = dev->data->dev_private;
1706
1707                 if (priv->vmwa_context) {
1708                         /*
1709                          * Non-NULL context means we have a virtual machine
1710                          * and SR-IOV enabled, we have to create VLAN interface
1711                          * to make hypervisor to setup E-Switch vport
1712                          * context correctly. We avoid creating the multiple
1713                          * VLAN interfaces, so we cannot support VLAN tag mask.
1714                          */
1715                         return rte_flow_error_set(error, EINVAL,
1716                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1717                                                   item,
1718                                                   "VLAN tag mask is not"
1719                                                   " supported in virtual"
1720                                                   " environment");
1721                 }
1722         }
1723         return 0;
1724 }
1725
1726 /*
1727  * GTP flags are contained in 1 byte of the format:
1728  * -------------------------------------------
1729  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1730  * |-----------------------------------------|
1731  * | value | Version | PT | Res | E | S | PN |
1732  * -------------------------------------------
1733  *
1734  * Matching is supported only for GTP flags E, S, PN.
1735  */
1736 #define MLX5_GTP_FLAGS_MASK     0x07
1737
1738 /**
1739  * Validate GTP item.
1740  *
1741  * @param[in] dev
1742  *   Pointer to the rte_eth_dev structure.
1743  * @param[in] item
1744  *   Item specification.
1745  * @param[in] item_flags
1746  *   Bit-fields that holds the items detected until now.
1747  * @param[out] error
1748  *   Pointer to error structure.
1749  *
1750  * @return
1751  *   0 on success, a negative errno value otherwise and rte_errno is set.
1752  */
1753 static int
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755                           const struct rte_flow_item *item,
1756                           uint64_t item_flags,
1757                           struct rte_flow_error *error)
1758 {
1759         struct mlx5_priv *priv = dev->data->dev_private;
1760         const struct rte_flow_item_gtp *spec = item->spec;
1761         const struct rte_flow_item_gtp *mask = item->mask;
1762         const struct rte_flow_item_gtp nic_mask = {
1763                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1764                 .msg_type = 0xff,
1765                 .teid = RTE_BE32(0xffffffff),
1766         };
1767
1768         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769                 return rte_flow_error_set(error, ENOTSUP,
1770                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1771                                           "GTP support is not enabled");
1772         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773                 return rte_flow_error_set(error, ENOTSUP,
1774                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1775                                           "multiple tunnel layers not"
1776                                           " supported");
1777         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778                 return rte_flow_error_set(error, EINVAL,
1779                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1780                                           "no outer UDP layer found");
1781         if (!mask)
1782                 mask = &rte_flow_item_gtp_mask;
1783         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784                 return rte_flow_error_set(error, ENOTSUP,
1785                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1786                                           "Match is supported for GTP"
1787                                           " flags only");
1788         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1789                                          (const uint8_t *)&nic_mask,
1790                                          sizeof(struct rte_flow_item_gtp),
1791                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1792 }
1793
1794 /**
1795  * Validate IPV4 item.
1796  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1797  * add specific validation of fragment_offset field,
1798  *
1799  * @param[in] item
1800  *   Item specification.
1801  * @param[in] item_flags
1802  *   Bit-fields that holds the items detected until now.
1803  * @param[out] error
1804  *   Pointer to error structure.
1805  *
1806  * @return
1807  *   0 on success, a negative errno value otherwise and rte_errno is set.
1808  */
1809 static int
1810 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1811                            uint64_t item_flags,
1812                            uint64_t last_item,
1813                            uint16_t ether_type,
1814                            struct rte_flow_error *error)
1815 {
1816         int ret;
1817         const struct rte_flow_item_ipv4 *spec = item->spec;
1818         const struct rte_flow_item_ipv4 *last = item->last;
1819         const struct rte_flow_item_ipv4 *mask = item->mask;
1820         rte_be16_t fragment_offset_spec = 0;
1821         rte_be16_t fragment_offset_last = 0;
1822         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1823                 .hdr = {
1824                         .src_addr = RTE_BE32(0xffffffff),
1825                         .dst_addr = RTE_BE32(0xffffffff),
1826                         .type_of_service = 0xff,
1827                         .fragment_offset = RTE_BE16(0xffff),
1828                         .next_proto_id = 0xff,
1829                         .time_to_live = 0xff,
1830                 },
1831         };
1832
1833         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1834                                            ether_type, &nic_ipv4_mask,
1835                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1836         if (ret < 0)
1837                 return ret;
1838         if (spec && mask)
1839                 fragment_offset_spec = spec->hdr.fragment_offset &
1840                                        mask->hdr.fragment_offset;
1841         if (!fragment_offset_spec)
1842                 return 0;
1843         /*
1844          * spec and mask are valid, enforce using full mask to make sure the
1845          * complete value is used correctly.
1846          */
1847         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1848                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1849                 return rte_flow_error_set(error, EINVAL,
1850                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1851                                           item, "must use full mask for"
1852                                           " fragment_offset");
1853         /*
1854          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1855          * indicating this is 1st fragment of fragmented packet.
1856          * This is not yet supported in MLX5, return appropriate error message.
1857          */
1858         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1859                 return rte_flow_error_set(error, ENOTSUP,
1860                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1861                                           "match on first fragment not "
1862                                           "supported");
1863         if (fragment_offset_spec && !last)
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1866                                           "specified value not supported");
1867         /* spec and last are valid, validate the specified range. */
1868         fragment_offset_last = last->hdr.fragment_offset &
1869                                mask->hdr.fragment_offset;
1870         /*
1871          * Match on fragment_offset spec 0x2001 and last 0x3fff
1872          * means MF is 1 and frag-offset is > 0.
1873          * This packet is fragment 2nd and onward, excluding last.
1874          * This is not yet supported in MLX5, return appropriate
1875          * error message.
1876          */
1877         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1878             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1879                 return rte_flow_error_set(error, ENOTSUP,
1880                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1881                                           last, "match on following "
1882                                           "fragments not supported");
1883         /*
1884          * Match on fragment_offset spec 0x0001 and last 0x1fff
1885          * means MF is 0 and frag-offset is > 0.
1886          * This packet is last fragment of fragmented packet.
1887          * This is not yet supported in MLX5, return appropriate
1888          * error message.
1889          */
1890         if (fragment_offset_spec == RTE_BE16(1) &&
1891             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1892                 return rte_flow_error_set(error, ENOTSUP,
1893                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1894                                           last, "match on last "
1895                                           "fragment not supported");
1896         /*
1897          * Match on fragment_offset spec 0x0001 and last 0x3fff
1898          * means MF and/or frag-offset is not 0.
1899          * This is a fragmented packet.
1900          * Other range values are invalid and rejected.
1901          */
1902         if (!(fragment_offset_spec == RTE_BE16(1) &&
1903               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1904                 return rte_flow_error_set(error, ENOTSUP,
1905                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1906                                           "specified range not supported");
1907         return 0;
1908 }
1909
1910 /**
1911  * Validate IPV6 fragment extension item.
1912  *
1913  * @param[in] item
1914  *   Item specification.
1915  * @param[in] item_flags
1916  *   Bit-fields that holds the items detected until now.
1917  * @param[out] error
1918  *   Pointer to error structure.
1919  *
1920  * @return
1921  *   0 on success, a negative errno value otherwise and rte_errno is set.
1922  */
1923 static int
1924 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1925                                     uint64_t item_flags,
1926                                     struct rte_flow_error *error)
1927 {
1928         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1929         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1930         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1931         rte_be16_t frag_data_spec = 0;
1932         rte_be16_t frag_data_last = 0;
1933         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1934         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1935                                       MLX5_FLOW_LAYER_OUTER_L4;
1936         int ret = 0;
1937         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1938                 .hdr = {
1939                         .next_header = 0xff,
1940                         .frag_data = RTE_BE16(0xffff),
1941                 },
1942         };
1943
1944         if (item_flags & l4m)
1945                 return rte_flow_error_set(error, EINVAL,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "ipv6 fragment extension item cannot "
1948                                           "follow L4 item.");
1949         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1950             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1953                                           "ipv6 fragment extension item must "
1954                                           "follow ipv6 item");
1955         if (spec && mask)
1956                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1957         if (!frag_data_spec)
1958                 return 0;
1959         /*
1960          * spec and mask are valid, enforce using full mask to make sure the
1961          * complete value is used correctly.
1962          */
1963         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1964                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1967                                           item, "must use full mask for"
1968                                           " frag_data");
1969         /*
1970          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1971          * This is 1st fragment of fragmented packet.
1972          */
1973         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1974                 return rte_flow_error_set(error, ENOTSUP,
1975                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1976                                           "match on first fragment not "
1977                                           "supported");
1978         if (frag_data_spec && !last)
1979                 return rte_flow_error_set(error, EINVAL,
1980                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                           "specified value not supported");
1982         ret = mlx5_flow_item_acceptable
1983                                 (item, (const uint8_t *)mask,
1984                                  (const uint8_t *)&nic_mask,
1985                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1986                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1987         if (ret)
1988                 return ret;
1989         /* spec and last are valid, validate the specified range. */
1990         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1991         /*
1992          * Match on frag_data spec 0x0009 and last 0xfff9
1993          * means M is 1 and frag-offset is > 0.
1994          * This packet is fragment 2nd and onward, excluding last.
1995          * This is not yet supported in MLX5, return appropriate
1996          * error message.
1997          */
1998         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1999                                        RTE_IPV6_EHDR_MF_MASK) &&
2000             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2001                 return rte_flow_error_set(error, ENOTSUP,
2002                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2003                                           last, "match on following "
2004                                           "fragments not supported");
2005         /*
2006          * Match on frag_data spec 0x0008 and last 0xfff8
2007          * means M is 0 and frag-offset is > 0.
2008          * This packet is last fragment of fragmented packet.
2009          * This is not yet supported in MLX5, return appropriate
2010          * error message.
2011          */
2012         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2013             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2014                 return rte_flow_error_set(error, ENOTSUP,
2015                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2016                                           last, "match on last "
2017                                           "fragment not supported");
2018         /* Other range values are invalid and rejected. */
2019         return rte_flow_error_set(error, EINVAL,
2020                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2021                                   "specified range not supported");
2022 }
2023
2024 /**
2025  * Validate the pop VLAN action.
2026  *
2027  * @param[in] dev
2028  *   Pointer to the rte_eth_dev structure.
2029  * @param[in] action_flags
2030  *   Holds the actions detected until now.
2031  * @param[in] action
2032  *   Pointer to the pop vlan action.
2033  * @param[in] item_flags
2034  *   The items found in this flow rule.
2035  * @param[in] attr
2036  *   Pointer to flow attributes.
2037  * @param[out] error
2038  *   Pointer to error structure.
2039  *
2040  * @return
2041  *   0 on success, a negative errno value otherwise and rte_errno is set.
2042  */
2043 static int
2044 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2045                                  uint64_t action_flags,
2046                                  const struct rte_flow_action *action,
2047                                  uint64_t item_flags,
2048                                  const struct rte_flow_attr *attr,
2049                                  struct rte_flow_error *error)
2050 {
2051         const struct mlx5_priv *priv = dev->data->dev_private;
2052
2053         (void)action;
2054         (void)attr;
2055         if (!priv->sh->pop_vlan_action)
2056                 return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058                                           NULL,
2059                                           "pop vlan action is not supported");
2060         if (attr->egress)
2061                 return rte_flow_error_set(error, ENOTSUP,
2062                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2063                                           NULL,
2064                                           "pop vlan action not supported for "
2065                                           "egress");
2066         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2067                 return rte_flow_error_set(error, ENOTSUP,
2068                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2069                                           "no support for multiple VLAN "
2070                                           "actions");
2071         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2072         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2073             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2074                 return rte_flow_error_set(error, ENOTSUP,
2075                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2076                                           NULL,
2077                                           "cannot pop vlan after decap without "
2078                                           "match on inner vlan in the flow");
2079         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2080         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2081             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2082                 return rte_flow_error_set(error, ENOTSUP,
2083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2084                                           NULL,
2085                                           "cannot pop vlan without a "
2086                                           "match on (outer) vlan in the flow");
2087         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2088                 return rte_flow_error_set(error, EINVAL,
2089                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2090                                           "wrong action order, port_id should "
2091                                           "be after pop VLAN action");
2092         if (!attr->transfer && priv->representor)
2093                 return rte_flow_error_set(error, ENOTSUP,
2094                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2095                                           "pop vlan action for VF representor "
2096                                           "not supported on NIC table");
2097         return 0;
2098 }
2099
2100 /**
2101  * Get VLAN default info from vlan match info.
2102  *
2103  * @param[in] items
2104  *   the list of item specifications.
2105  * @param[out] vlan
2106  *   pointer VLAN info to fill to.
2107  *
2108  * @return
2109  *   0 on success, a negative errno value otherwise and rte_errno is set.
2110  */
2111 static void
2112 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2113                                   struct rte_vlan_hdr *vlan)
2114 {
2115         const struct rte_flow_item_vlan nic_mask = {
2116                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2117                                 MLX5DV_FLOW_VLAN_VID_MASK),
2118                 .inner_type = RTE_BE16(0xffff),
2119         };
2120
2121         if (items == NULL)
2122                 return;
2123         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2124                 int type = items->type;
2125
2126                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2127                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2128                         break;
2129         }
2130         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2131                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2132                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2133
2134                 /* If VLAN item in pattern doesn't contain data, return here. */
2135                 if (!vlan_v)
2136                         return;
2137                 if (!vlan_m)
2138                         vlan_m = &nic_mask;
2139                 /* Only full match values are accepted */
2140                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2141                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2142                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2143                         vlan->vlan_tci |=
2144                                 rte_be_to_cpu_16(vlan_v->tci &
2145                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2146                 }
2147                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2148                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2149                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2150                         vlan->vlan_tci |=
2151                                 rte_be_to_cpu_16(vlan_v->tci &
2152                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2153                 }
2154                 if (vlan_m->inner_type == nic_mask.inner_type)
2155                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2156                                                            vlan_m->inner_type);
2157         }
2158 }
2159
2160 /**
2161  * Validate the push VLAN action.
2162  *
2163  * @param[in] dev
2164  *   Pointer to the rte_eth_dev structure.
2165  * @param[in] action_flags
2166  *   Holds the actions detected until now.
2167  * @param[in] item_flags
2168  *   The items found in this flow rule.
2169  * @param[in] action
2170  *   Pointer to the action structure.
2171  * @param[in] attr
2172  *   Pointer to flow attributes
2173  * @param[out] error
2174  *   Pointer to error structure.
2175  *
2176  * @return
2177  *   0 on success, a negative errno value otherwise and rte_errno is set.
2178  */
2179 static int
2180 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2181                                   uint64_t action_flags,
2182                                   const struct rte_flow_item_vlan *vlan_m,
2183                                   const struct rte_flow_action *action,
2184                                   const struct rte_flow_attr *attr,
2185                                   struct rte_flow_error *error)
2186 {
2187         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2188         const struct mlx5_priv *priv = dev->data->dev_private;
2189
2190         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2191             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2192                 return rte_flow_error_set(error, EINVAL,
2193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2194                                           "invalid vlan ethertype");
2195         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2196                 return rte_flow_error_set(error, EINVAL,
2197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2198                                           "wrong action order, port_id should "
2199                                           "be after push VLAN");
2200         if (!attr->transfer && priv->representor)
2201                 return rte_flow_error_set(error, ENOTSUP,
2202                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2203                                           "push vlan action for VF representor "
2204                                           "not supported on NIC table");
2205         if (vlan_m &&
2206             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2207             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2208                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2209             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2210             !(mlx5_flow_find_action
2211                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2212                 return rte_flow_error_set(error, EINVAL,
2213                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2214                                           "not full match mask on VLAN PCP and "
2215                                           "there is no of_set_vlan_pcp action, "
2216                                           "push VLAN action cannot figure out "
2217                                           "PCP value");
2218         if (vlan_m &&
2219             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2220             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2221                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2222             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2223             !(mlx5_flow_find_action
2224                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2225                 return rte_flow_error_set(error, EINVAL,
2226                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2227                                           "not full match mask on VLAN VID and "
2228                                           "there is no of_set_vlan_vid action, "
2229                                           "push VLAN action cannot figure out "
2230                                           "VID value");
2231         (void)attr;
2232         return 0;
2233 }
2234
2235 /**
2236  * Validate the set VLAN PCP.
2237  *
2238  * @param[in] action_flags
2239  *   Holds the actions detected until now.
2240  * @param[in] actions
2241  *   Pointer to the list of actions remaining in the flow rule.
2242  * @param[out] error
2243  *   Pointer to error structure.
2244  *
2245  * @return
2246  *   0 on success, a negative errno value otherwise and rte_errno is set.
2247  */
2248 static int
2249 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2250                                      const struct rte_flow_action actions[],
2251                                      struct rte_flow_error *error)
2252 {
2253         const struct rte_flow_action *action = actions;
2254         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2255
2256         if (conf->vlan_pcp > 7)
2257                 return rte_flow_error_set(error, EINVAL,
2258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2259                                           "VLAN PCP value is too big");
2260         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2261                 return rte_flow_error_set(error, ENOTSUP,
2262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2263                                           "set VLAN PCP action must follow "
2264                                           "the push VLAN action");
2265         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2266                 return rte_flow_error_set(error, ENOTSUP,
2267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2268                                           "Multiple VLAN PCP modification are "
2269                                           "not supported");
2270         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2273                                           "wrong action order, port_id should "
2274                                           "be after set VLAN PCP");
2275         return 0;
2276 }
2277
2278 /**
2279  * Validate the set VLAN VID.
2280  *
2281  * @param[in] item_flags
2282  *   Holds the items detected in this rule.
2283  * @param[in] action_flags
2284  *   Holds the actions detected until now.
2285  * @param[in] actions
2286  *   Pointer to the list of actions remaining in the flow rule.
2287  * @param[out] error
2288  *   Pointer to error structure.
2289  *
2290  * @return
2291  *   0 on success, a negative errno value otherwise and rte_errno is set.
2292  */
2293 static int
2294 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2295                                      uint64_t action_flags,
2296                                      const struct rte_flow_action actions[],
2297                                      struct rte_flow_error *error)
2298 {
2299         const struct rte_flow_action *action = actions;
2300         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2301
2302         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2303                 return rte_flow_error_set(error, EINVAL,
2304                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2305                                           "VLAN VID value is too big");
2306         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2307             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2308                 return rte_flow_error_set(error, ENOTSUP,
2309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2310                                           "set VLAN VID action must follow push"
2311                                           " VLAN action or match on VLAN item");
2312         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2313                 return rte_flow_error_set(error, ENOTSUP,
2314                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2315                                           "Multiple VLAN VID modifications are "
2316                                           "not supported");
2317         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2318                 return rte_flow_error_set(error, EINVAL,
2319                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2320                                           "wrong action order, port_id should "
2321                                           "be after set VLAN VID");
2322         return 0;
2323 }
2324
2325 /*
2326  * Validate the FLAG action.
2327  *
2328  * @param[in] dev
2329  *   Pointer to the rte_eth_dev structure.
2330  * @param[in] action_flags
2331  *   Holds the actions detected until now.
2332  * @param[in] attr
2333  *   Pointer to flow attributes
2334  * @param[out] error
2335  *   Pointer to error structure.
2336  *
2337  * @return
2338  *   0 on success, a negative errno value otherwise and rte_errno is set.
2339  */
2340 static int
2341 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2342                              uint64_t action_flags,
2343                              const struct rte_flow_attr *attr,
2344                              struct rte_flow_error *error)
2345 {
2346         struct mlx5_priv *priv = dev->data->dev_private;
2347         struct mlx5_dev_config *config = &priv->config;
2348         int ret;
2349
2350         /* Fall back if no extended metadata register support. */
2351         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2352                 return mlx5_flow_validate_action_flag(action_flags, attr,
2353                                                       error);
2354         /* Extensive metadata mode requires registers. */
2355         if (!mlx5_flow_ext_mreg_supported(dev))
2356                 return rte_flow_error_set(error, ENOTSUP,
2357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2358                                           "no metadata registers "
2359                                           "to support flag action");
2360         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2361                 return rte_flow_error_set(error, ENOTSUP,
2362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2363                                           "extended metadata register"
2364                                           " isn't available");
2365         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2366         if (ret < 0)
2367                 return ret;
2368         MLX5_ASSERT(ret > 0);
2369         if (action_flags & MLX5_FLOW_ACTION_MARK)
2370                 return rte_flow_error_set(error, EINVAL,
2371                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2372                                           "can't mark and flag in same flow");
2373         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2374                 return rte_flow_error_set(error, EINVAL,
2375                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2376                                           "can't have 2 flag"
2377                                           " actions in same flow");
2378         return 0;
2379 }
2380
2381 /**
2382  * Validate MARK action.
2383  *
2384  * @param[in] dev
2385  *   Pointer to the rte_eth_dev structure.
2386  * @param[in] action
2387  *   Pointer to action.
2388  * @param[in] action_flags
2389  *   Holds the actions detected until now.
2390  * @param[in] attr
2391  *   Pointer to flow attributes
2392  * @param[out] error
2393  *   Pointer to error structure.
2394  *
2395  * @return
2396  *   0 on success, a negative errno value otherwise and rte_errno is set.
2397  */
2398 static int
2399 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2400                              const struct rte_flow_action *action,
2401                              uint64_t action_flags,
2402                              const struct rte_flow_attr *attr,
2403                              struct rte_flow_error *error)
2404 {
2405         struct mlx5_priv *priv = dev->data->dev_private;
2406         struct mlx5_dev_config *config = &priv->config;
2407         const struct rte_flow_action_mark *mark = action->conf;
2408         int ret;
2409
2410         /* Fall back if no extended metadata register support. */
2411         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2412                 return mlx5_flow_validate_action_mark(action, action_flags,
2413                                                       attr, error);
2414         /* Extensive metadata mode requires registers. */
2415         if (!mlx5_flow_ext_mreg_supported(dev))
2416                 return rte_flow_error_set(error, ENOTSUP,
2417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418                                           "no metadata registers "
2419                                           "to support mark action");
2420         if (!priv->sh->dv_mark_mask)
2421                 return rte_flow_error_set(error, ENOTSUP,
2422                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423                                           "extended metadata register"
2424                                           " isn't available");
2425         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2426         if (ret < 0)
2427                 return ret;
2428         MLX5_ASSERT(ret > 0);
2429         if (!mark)
2430                 return rte_flow_error_set(error, EINVAL,
2431                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2432                                           "configuration cannot be null");
2433         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2434                 return rte_flow_error_set(error, EINVAL,
2435                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2436                                           &mark->id,
2437                                           "mark id exceeds the limit");
2438         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2439                 return rte_flow_error_set(error, EINVAL,
2440                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441                                           "can't flag and mark in same flow");
2442         if (action_flags & MLX5_FLOW_ACTION_MARK)
2443                 return rte_flow_error_set(error, EINVAL,
2444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2445                                           "can't have 2 mark actions in same"
2446                                           " flow");
2447         return 0;
2448 }
2449
2450 /**
2451  * Validate SET_META action.
2452  *
2453  * @param[in] dev
2454  *   Pointer to the rte_eth_dev structure.
2455  * @param[in] action
2456  *   Pointer to the action structure.
2457  * @param[in] action_flags
2458  *   Holds the actions detected until now.
2459  * @param[in] attr
2460  *   Pointer to flow attributes
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno value otherwise and rte_errno is set.
2466  */
2467 static int
2468 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2469                                  const struct rte_flow_action *action,
2470                                  uint64_t action_flags __rte_unused,
2471                                  const struct rte_flow_attr *attr,
2472                                  struct rte_flow_error *error)
2473 {
2474         const struct rte_flow_action_set_meta *conf;
2475         uint32_t nic_mask = UINT32_MAX;
2476         int reg;
2477
2478         if (!mlx5_flow_ext_mreg_supported(dev))
2479                 return rte_flow_error_set(error, ENOTSUP,
2480                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2481                                           "extended metadata register"
2482                                           " isn't supported");
2483         reg = flow_dv_get_metadata_reg(dev, attr, error);
2484         if (reg < 0)
2485                 return reg;
2486         if (reg != REG_A && reg != REG_B) {
2487                 struct mlx5_priv *priv = dev->data->dev_private;
2488
2489                 nic_mask = priv->sh->dv_meta_mask;
2490         }
2491         if (!(action->conf))
2492                 return rte_flow_error_set(error, EINVAL,
2493                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2494                                           "configuration cannot be null");
2495         conf = (const struct rte_flow_action_set_meta *)action->conf;
2496         if (!conf->mask)
2497                 return rte_flow_error_set(error, EINVAL,
2498                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2499                                           "zero mask doesn't have any effect");
2500         if (conf->mask & ~nic_mask)
2501                 return rte_flow_error_set(error, EINVAL,
2502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2503                                           "meta data must be within reg C0");
2504         return 0;
2505 }
2506
2507 /**
2508  * Validate SET_TAG action.
2509  *
2510  * @param[in] dev
2511  *   Pointer to the rte_eth_dev structure.
2512  * @param[in] action
2513  *   Pointer to the action structure.
2514  * @param[in] action_flags
2515  *   Holds the actions detected until now.
2516  * @param[in] attr
2517  *   Pointer to flow attributes
2518  * @param[out] error
2519  *   Pointer to error structure.
2520  *
2521  * @return
2522  *   0 on success, a negative errno value otherwise and rte_errno is set.
2523  */
2524 static int
2525 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2526                                 const struct rte_flow_action *action,
2527                                 uint64_t action_flags,
2528                                 const struct rte_flow_attr *attr,
2529                                 struct rte_flow_error *error)
2530 {
2531         const struct rte_flow_action_set_tag *conf;
2532         const uint64_t terminal_action_flags =
2533                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2534                 MLX5_FLOW_ACTION_RSS;
2535         int ret;
2536
2537         if (!mlx5_flow_ext_mreg_supported(dev))
2538                 return rte_flow_error_set(error, ENOTSUP,
2539                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2540                                           "extensive metadata register"
2541                                           " isn't supported");
2542         if (!(action->conf))
2543                 return rte_flow_error_set(error, EINVAL,
2544                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2545                                           "configuration cannot be null");
2546         conf = (const struct rte_flow_action_set_tag *)action->conf;
2547         if (!conf->mask)
2548                 return rte_flow_error_set(error, EINVAL,
2549                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2550                                           "zero mask doesn't have any effect");
2551         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2552         if (ret < 0)
2553                 return ret;
2554         if (!attr->transfer && attr->ingress &&
2555             (action_flags & terminal_action_flags))
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2558                                           "set_tag has no effect"
2559                                           " with terminal actions");
2560         return 0;
2561 }
2562
2563 /**
2564  * Validate count action.
2565  *
2566  * @param[in] dev
2567  *   Pointer to rte_eth_dev structure.
2568  * @param[out] error
2569  *   Pointer to error structure.
2570  *
2571  * @return
2572  *   0 on success, a negative errno value otherwise and rte_errno is set.
2573  */
2574 static int
2575 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2576                               struct rte_flow_error *error)
2577 {
2578         struct mlx5_priv *priv = dev->data->dev_private;
2579
2580         if (!priv->config.devx)
2581                 goto notsup_err;
2582 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2583         return 0;
2584 #endif
2585 notsup_err:
2586         return rte_flow_error_set
2587                       (error, ENOTSUP,
2588                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2589                        NULL,
2590                        "count action not supported");
2591 }
2592
2593 /**
2594  * Validate the L2 encap action.
2595  *
2596  * @param[in] dev
2597  *   Pointer to the rte_eth_dev structure.
2598  * @param[in] action_flags
2599  *   Holds the actions detected until now.
2600  * @param[in] action
2601  *   Pointer to the action structure.
2602  * @param[in] attr
2603  *   Pointer to flow attributes.
2604  * @param[out] error
2605  *   Pointer to error structure.
2606  *
2607  * @return
2608  *   0 on success, a negative errno value otherwise and rte_errno is set.
2609  */
2610 static int
2611 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2612                                  uint64_t action_flags,
2613                                  const struct rte_flow_action *action,
2614                                  const struct rte_flow_attr *attr,
2615                                  struct rte_flow_error *error)
2616 {
2617         const struct mlx5_priv *priv = dev->data->dev_private;
2618
2619         if (!(action->conf))
2620                 return rte_flow_error_set(error, EINVAL,
2621                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2622                                           "configuration cannot be null");
2623         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2624                 return rte_flow_error_set(error, EINVAL,
2625                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626                                           "can only have a single encap action "
2627                                           "in a flow");
2628         if (!attr->transfer && priv->representor)
2629                 return rte_flow_error_set(error, ENOTSUP,
2630                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2631                                           "encap action for VF representor "
2632                                           "not supported on NIC table");
2633         return 0;
2634 }
2635
2636 /**
2637  * Validate a decap action.
2638  *
2639  * @param[in] dev
2640  *   Pointer to the rte_eth_dev structure.
2641  * @param[in] action_flags
2642  *   Holds the actions detected until now.
2643  * @param[in] attr
2644  *   Pointer to flow attributes
2645  * @param[out] error
2646  *   Pointer to error structure.
2647  *
2648  * @return
2649  *   0 on success, a negative errno value otherwise and rte_errno is set.
2650  */
2651 static int
2652 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2653                               uint64_t action_flags,
2654                               const struct rte_flow_attr *attr,
2655                               struct rte_flow_error *error)
2656 {
2657         const struct mlx5_priv *priv = dev->data->dev_private;
2658
2659         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2660             !priv->config.decap_en)
2661                 return rte_flow_error_set(error, ENOTSUP,
2662                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663                                           "decap is not enabled");
2664         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2665                 return rte_flow_error_set(error, ENOTSUP,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2667                                           action_flags &
2668                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2669                                           "have a single decap action" : "decap "
2670                                           "after encap is not supported");
2671         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2672                 return rte_flow_error_set(error, EINVAL,
2673                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2674                                           "can't have decap action after"
2675                                           " modify action");
2676         if (attr->egress)
2677                 return rte_flow_error_set(error, ENOTSUP,
2678                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2679                                           NULL,
2680                                           "decap action not supported for "
2681                                           "egress");
2682         if (!attr->transfer && priv->representor)
2683                 return rte_flow_error_set(error, ENOTSUP,
2684                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685                                           "decap action for VF representor "
2686                                           "not supported on NIC table");
2687         return 0;
2688 }
2689
2690 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2691
2692 /**
2693  * Validate the raw encap and decap actions.
2694  *
2695  * @param[in] dev
2696  *   Pointer to the rte_eth_dev structure.
2697  * @param[in] decap
2698  *   Pointer to the decap action.
2699  * @param[in] encap
2700  *   Pointer to the encap action.
2701  * @param[in] attr
2702  *   Pointer to flow attributes
2703  * @param[in/out] action_flags
2704  *   Holds the actions detected until now.
2705  * @param[out] actions_n
2706  *   pointer to the number of actions counter.
2707  * @param[out] error
2708  *   Pointer to error structure.
2709  *
2710  * @return
2711  *   0 on success, a negative errno value otherwise and rte_errno is set.
2712  */
2713 static int
2714 flow_dv_validate_action_raw_encap_decap
2715         (struct rte_eth_dev *dev,
2716          const struct rte_flow_action_raw_decap *decap,
2717          const struct rte_flow_action_raw_encap *encap,
2718          const struct rte_flow_attr *attr, uint64_t *action_flags,
2719          int *actions_n, struct rte_flow_error *error)
2720 {
2721         const struct mlx5_priv *priv = dev->data->dev_private;
2722         int ret;
2723
2724         if (encap && (!encap->size || !encap->data))
2725                 return rte_flow_error_set(error, EINVAL,
2726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727                                           "raw encap data cannot be empty");
2728         if (decap && encap) {
2729                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2730                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2731                         /* L3 encap. */
2732                         decap = NULL;
2733                 else if (encap->size <=
2734                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2735                            decap->size >
2736                            MLX5_ENCAPSULATION_DECISION_SIZE)
2737                         /* L3 decap. */
2738                         encap = NULL;
2739                 else if (encap->size >
2740                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2741                            decap->size >
2742                            MLX5_ENCAPSULATION_DECISION_SIZE)
2743                         /* 2 L2 actions: encap and decap. */
2744                         ;
2745                 else
2746                         return rte_flow_error_set(error,
2747                                 ENOTSUP,
2748                                 RTE_FLOW_ERROR_TYPE_ACTION,
2749                                 NULL, "unsupported too small "
2750                                 "raw decap and too small raw "
2751                                 "encap combination");
2752         }
2753         if (decap) {
2754                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2755                                                     error);
2756                 if (ret < 0)
2757                         return ret;
2758                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2759                 ++(*actions_n);
2760         }
2761         if (encap) {
2762                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2763                         return rte_flow_error_set(error, ENOTSUP,
2764                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2765                                                   NULL,
2766                                                   "small raw encap size");
2767                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2768                         return rte_flow_error_set(error, EINVAL,
2769                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2770                                                   NULL,
2771                                                   "more than one encap action");
2772                 if (!attr->transfer && priv->representor)
2773                         return rte_flow_error_set
2774                                         (error, ENOTSUP,
2775                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776                                          "encap action for VF representor "
2777                                          "not supported on NIC table");
2778                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2779                 ++(*actions_n);
2780         }
2781         return 0;
2782 }
2783
2784 /**
2785  * Match encap_decap resource.
2786  *
2787  * @param list
2788  *   Pointer to the hash list.
2789  * @param entry
2790  *   Pointer to exist resource entry object.
2791  * @param key
2792  *   Key of the new entry.
2793  * @param ctx_cb
2794  *   Pointer to new encap_decap resource.
2795  *
2796  * @return
2797  *   0 on matching, none-zero otherwise.
2798  */
2799 int
2800 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2801                              struct mlx5_hlist_entry *entry,
2802                              uint64_t key __rte_unused, void *cb_ctx)
2803 {
2804         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2805         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2806         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2807
2808         cache_resource = container_of(entry,
2809                                       struct mlx5_flow_dv_encap_decap_resource,
2810                                       entry);
2811         if (resource->entry.key == cache_resource->entry.key &&
2812             resource->reformat_type == cache_resource->reformat_type &&
2813             resource->ft_type == cache_resource->ft_type &&
2814             resource->flags == cache_resource->flags &&
2815             resource->size == cache_resource->size &&
2816             !memcmp((const void *)resource->buf,
2817                     (const void *)cache_resource->buf,
2818                     resource->size))
2819                 return 0;
2820         return -1;
2821 }
2822
2823 /**
2824  * Allocate encap_decap resource.
2825  *
2826  * @param list
2827  *   Pointer to the hash list.
2828  * @param entry
2829  *   Pointer to exist resource entry object.
2830  * @param ctx_cb
2831  *   Pointer to new encap_decap resource.
2832  *
2833  * @return
2834  *   0 on matching, none-zero otherwise.
2835  */
2836 struct mlx5_hlist_entry *
2837 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2838                               uint64_t key __rte_unused,
2839                               void *cb_ctx)
2840 {
2841         struct mlx5_dev_ctx_shared *sh = list->ctx;
2842         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2843         struct mlx5dv_dr_domain *domain;
2844         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2845         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2846         uint32_t idx;
2847         int ret;
2848
2849         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2850                 domain = sh->fdb_domain;
2851         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2852                 domain = sh->rx_domain;
2853         else
2854                 domain = sh->tx_domain;
2855         /* Register new encap/decap resource. */
2856         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2857                                        &idx);
2858         if (!cache_resource) {
2859                 rte_flow_error_set(ctx->error, ENOMEM,
2860                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2861                                    "cannot allocate resource memory");
2862                 return NULL;
2863         }
2864         *cache_resource = *resource;
2865         cache_resource->idx = idx;
2866         ret = mlx5_flow_os_create_flow_action_packet_reformat
2867                                         (sh->ctx, domain, cache_resource,
2868                                          &cache_resource->action);
2869         if (ret) {
2870                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2871                 rte_flow_error_set(ctx->error, ENOMEM,
2872                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2873                                    NULL, "cannot create action");
2874                 return NULL;
2875         }
2876
2877         return &cache_resource->entry;
2878 }
2879
2880 /**
2881  * Find existing encap/decap resource or create and register a new one.
2882  *
2883  * @param[in, out] dev
2884  *   Pointer to rte_eth_dev structure.
2885  * @param[in, out] resource
2886  *   Pointer to encap/decap resource.
2887  * @parm[in, out] dev_flow
2888  *   Pointer to the dev_flow.
2889  * @param[out] error
2890  *   pointer to error structure.
2891  *
2892  * @return
2893  *   0 on success otherwise -errno and errno is set.
2894  */
2895 static int
2896 flow_dv_encap_decap_resource_register
2897                         (struct rte_eth_dev *dev,
2898                          struct mlx5_flow_dv_encap_decap_resource *resource,
2899                          struct mlx5_flow *dev_flow,
2900                          struct rte_flow_error *error)
2901 {
2902         struct mlx5_priv *priv = dev->data->dev_private;
2903         struct mlx5_dev_ctx_shared *sh = priv->sh;
2904         struct mlx5_hlist_entry *entry;
2905         union mlx5_flow_encap_decap_key encap_decap_key = {
2906                 {
2907                         .ft_type = resource->ft_type,
2908                         .refmt_type = resource->reformat_type,
2909                         .buf_size = resource->size,
2910                         .table_level = !!dev_flow->dv.group,
2911                         .cksum = 0,
2912                 }
2913         };
2914         struct mlx5_flow_cb_ctx ctx = {
2915                 .error = error,
2916                 .data = resource,
2917         };
2918
2919         resource->flags = dev_flow->dv.group ? 0 : 1;
2920         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2921                                                 resource->size, 0);
2922         resource->entry.key = encap_decap_key.v64;
2923         entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2924                                     &ctx);
2925         if (!entry)
2926                 return -rte_errno;
2927         resource = container_of(entry, typeof(*resource), entry);
2928         dev_flow->dv.encap_decap = resource;
2929         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2930         return 0;
2931 }
2932
2933 /**
2934  * Find existing table jump resource or create and register a new one.
2935  *
2936  * @param[in, out] dev
2937  *   Pointer to rte_eth_dev structure.
2938  * @param[in, out] tbl
2939  *   Pointer to flow table resource.
2940  * @parm[in, out] dev_flow
2941  *   Pointer to the dev_flow.
2942  * @param[out] error
2943  *   pointer to error structure.
2944  *
2945  * @return
2946  *   0 on success otherwise -errno and errno is set.
2947  */
2948 static int
2949 flow_dv_jump_tbl_resource_register
2950                         (struct rte_eth_dev *dev __rte_unused,
2951                          struct mlx5_flow_tbl_resource *tbl,
2952                          struct mlx5_flow *dev_flow,
2953                          struct rte_flow_error *error __rte_unused)
2954 {
2955         struct mlx5_flow_tbl_data_entry *tbl_data =
2956                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2957
2958         MLX5_ASSERT(tbl);
2959         MLX5_ASSERT(tbl_data->jump.action);
2960         dev_flow->handle->rix_jump = tbl_data->idx;
2961         dev_flow->dv.jump = &tbl_data->jump;
2962         return 0;
2963 }
2964
2965 int
2966 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2967                          struct mlx5_cache_entry *entry, void *cb_ctx)
2968 {
2969         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2970         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2971         struct mlx5_flow_dv_port_id_action_resource *res =
2972                         container_of(entry, typeof(*res), entry);
2973
2974         return ref->port_id != res->port_id;
2975 }
2976
2977 struct mlx5_cache_entry *
2978 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2979                           struct mlx5_cache_entry *entry __rte_unused,
2980                           void *cb_ctx)
2981 {
2982         struct mlx5_dev_ctx_shared *sh = list->ctx;
2983         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2984         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2985         struct mlx5_flow_dv_port_id_action_resource *cache;
2986         uint32_t idx;
2987         int ret;
2988
2989         /* Register new port id action resource. */
2990         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2991         if (!cache) {
2992                 rte_flow_error_set(ctx->error, ENOMEM,
2993                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994                                    "cannot allocate port_id action cache memory");
2995                 return NULL;
2996         }
2997         *cache = *ref;
2998         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2999                                                         ref->port_id,
3000                                                         &cache->action);
3001         if (ret) {
3002                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3003                 rte_flow_error_set(ctx->error, ENOMEM,
3004                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3005                                    "cannot create action");
3006                 return NULL;
3007         }
3008         return &cache->entry;
3009 }
3010
3011 /**
3012  * Find existing table port ID resource or create and register a new one.
3013  *
3014  * @param[in, out] dev
3015  *   Pointer to rte_eth_dev structure.
3016  * @param[in, out] resource
3017  *   Pointer to port ID action resource.
3018  * @parm[in, out] dev_flow
3019  *   Pointer to the dev_flow.
3020  * @param[out] error
3021  *   pointer to error structure.
3022  *
3023  * @return
3024  *   0 on success otherwise -errno and errno is set.
3025  */
3026 static int
3027 flow_dv_port_id_action_resource_register
3028                         (struct rte_eth_dev *dev,
3029                          struct mlx5_flow_dv_port_id_action_resource *resource,
3030                          struct mlx5_flow *dev_flow,
3031                          struct rte_flow_error *error)
3032 {
3033         struct mlx5_priv *priv = dev->data->dev_private;
3034         struct mlx5_cache_entry *entry;
3035         struct mlx5_flow_dv_port_id_action_resource *cache;
3036         struct mlx5_flow_cb_ctx ctx = {
3037                 .error = error,
3038                 .data = resource,
3039         };
3040
3041         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3042         if (!entry)
3043                 return -rte_errno;
3044         cache = container_of(entry, typeof(*cache), entry);
3045         dev_flow->dv.port_id_action = cache;
3046         dev_flow->handle->rix_port_id_action = cache->idx;
3047         return 0;
3048 }
3049
3050 int
3051 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3052                          struct mlx5_cache_entry *entry, void *cb_ctx)
3053 {
3054         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3055         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3056         struct mlx5_flow_dv_push_vlan_action_resource *res =
3057                         container_of(entry, typeof(*res), entry);
3058
3059         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3060 }
3061
3062 struct mlx5_cache_entry *
3063 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3064                           struct mlx5_cache_entry *entry __rte_unused,
3065                           void *cb_ctx)
3066 {
3067         struct mlx5_dev_ctx_shared *sh = list->ctx;
3068         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3069         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3070         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3071         struct mlx5dv_dr_domain *domain;
3072         uint32_t idx;
3073         int ret;
3074
3075         /* Register new port id action resource. */
3076         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3077         if (!cache) {
3078                 rte_flow_error_set(ctx->error, ENOMEM,
3079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3080                                    "cannot allocate push_vlan action cache memory");
3081                 return NULL;
3082         }
3083         *cache = *ref;
3084         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3085                 domain = sh->fdb_domain;
3086         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3087                 domain = sh->rx_domain;
3088         else
3089                 domain = sh->tx_domain;
3090         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3091                                                         &cache->action);
3092         if (ret) {
3093                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3094                 rte_flow_error_set(ctx->error, ENOMEM,
3095                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3096                                    "cannot create push vlan action");
3097                 return NULL;
3098         }
3099         return &cache->entry;
3100 }
3101
3102 /**
3103  * Find existing push vlan resource or create and register a new one.
3104  *
3105  * @param [in, out] dev
3106  *   Pointer to rte_eth_dev structure.
3107  * @param[in, out] resource
3108  *   Pointer to port ID action resource.
3109  * @parm[in, out] dev_flow
3110  *   Pointer to the dev_flow.
3111  * @param[out] error
3112  *   pointer to error structure.
3113  *
3114  * @return
3115  *   0 on success otherwise -errno and errno is set.
3116  */
3117 static int
3118 flow_dv_push_vlan_action_resource_register
3119                        (struct rte_eth_dev *dev,
3120                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3121                         struct mlx5_flow *dev_flow,
3122                         struct rte_flow_error *error)
3123 {
3124         struct mlx5_priv *priv = dev->data->dev_private;
3125         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3126         struct mlx5_cache_entry *entry;
3127         struct mlx5_flow_cb_ctx ctx = {
3128                 .error = error,
3129                 .data = resource,
3130         };
3131
3132         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3133         if (!entry)
3134                 return -rte_errno;
3135         cache = container_of(entry, typeof(*cache), entry);
3136
3137         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3138         dev_flow->dv.push_vlan_res = cache;
3139         return 0;
3140 }
3141
3142 /**
3143  * Get the size of specific rte_flow_item_type hdr size
3144  *
3145  * @param[in] item_type
3146  *   Tested rte_flow_item_type.
3147  *
3148  * @return
3149  *   sizeof struct item_type, 0 if void or irrelevant.
3150  */
3151 static size_t
3152 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3153 {
3154         size_t retval;
3155
3156         switch (item_type) {
3157         case RTE_FLOW_ITEM_TYPE_ETH:
3158                 retval = sizeof(struct rte_ether_hdr);
3159                 break;
3160         case RTE_FLOW_ITEM_TYPE_VLAN:
3161                 retval = sizeof(struct rte_vlan_hdr);
3162                 break;
3163         case RTE_FLOW_ITEM_TYPE_IPV4:
3164                 retval = sizeof(struct rte_ipv4_hdr);
3165                 break;
3166         case RTE_FLOW_ITEM_TYPE_IPV6:
3167                 retval = sizeof(struct rte_ipv6_hdr);
3168                 break;
3169         case RTE_FLOW_ITEM_TYPE_UDP:
3170                 retval = sizeof(struct rte_udp_hdr);
3171                 break;
3172         case RTE_FLOW_ITEM_TYPE_TCP:
3173                 retval = sizeof(struct rte_tcp_hdr);
3174                 break;
3175         case RTE_FLOW_ITEM_TYPE_VXLAN:
3176         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3177                 retval = sizeof(struct rte_vxlan_hdr);
3178                 break;
3179         case RTE_FLOW_ITEM_TYPE_GRE:
3180         case RTE_FLOW_ITEM_TYPE_NVGRE:
3181                 retval = sizeof(struct rte_gre_hdr);
3182                 break;
3183         case RTE_FLOW_ITEM_TYPE_MPLS:
3184                 retval = sizeof(struct rte_mpls_hdr);
3185                 break;
3186         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3187         default:
3188                 retval = 0;
3189                 break;
3190         }
3191         return retval;
3192 }
3193
3194 #define MLX5_ENCAP_IPV4_VERSION         0x40
3195 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3196 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3197 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3198 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3199 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3200 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3201
3202 /**
3203  * Convert the encap action data from list of rte_flow_item to raw buffer
3204  *
3205  * @param[in] items
3206  *   Pointer to rte_flow_item objects list.
3207  * @param[out] buf
3208  *   Pointer to the output buffer.
3209  * @param[out] size
3210  *   Pointer to the output buffer size.
3211  * @param[out] error
3212  *   Pointer to the error structure.
3213  *
3214  * @return
3215  *   0 on success, a negative errno value otherwise and rte_errno is set.
3216  */
3217 static int
3218 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3219                            size_t *size, struct rte_flow_error *error)
3220 {
3221         struct rte_ether_hdr *eth = NULL;
3222         struct rte_vlan_hdr *vlan = NULL;
3223         struct rte_ipv4_hdr *ipv4 = NULL;
3224         struct rte_ipv6_hdr *ipv6 = NULL;
3225         struct rte_udp_hdr *udp = NULL;
3226         struct rte_vxlan_hdr *vxlan = NULL;
3227         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3228         struct rte_gre_hdr *gre = NULL;
3229         size_t len;
3230         size_t temp_size = 0;
3231
3232         if (!items)
3233                 return rte_flow_error_set(error, EINVAL,
3234                                           RTE_FLOW_ERROR_TYPE_ACTION,
3235                                           NULL, "invalid empty data");
3236         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3237                 len = flow_dv_get_item_hdr_len(items->type);
3238                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3239                         return rte_flow_error_set(error, EINVAL,
3240                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3241                                                   (void *)items->type,
3242                                                   "items total size is too big"
3243                                                   " for encap action");
3244                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3245                 switch (items->type) {
3246                 case RTE_FLOW_ITEM_TYPE_ETH:
3247                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3248                         break;
3249                 case RTE_FLOW_ITEM_TYPE_VLAN:
3250                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3251                         if (!eth)
3252                                 return rte_flow_error_set(error, EINVAL,
3253                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3254                                                 (void *)items->type,
3255                                                 "eth header not found");
3256                         if (!eth->ether_type)
3257                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3258                         break;
3259                 case RTE_FLOW_ITEM_TYPE_IPV4:
3260                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3261                         if (!vlan && !eth)
3262                                 return rte_flow_error_set(error, EINVAL,
3263                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3264                                                 (void *)items->type,
3265                                                 "neither eth nor vlan"
3266                                                 " header found");
3267                         if (vlan && !vlan->eth_proto)
3268                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3269                         else if (eth && !eth->ether_type)
3270                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3271                         if (!ipv4->version_ihl)
3272                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3273                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3274                         if (!ipv4->time_to_live)
3275                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3276                         break;
3277                 case RTE_FLOW_ITEM_TYPE_IPV6:
3278                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3279                         if (!vlan && !eth)
3280                                 return rte_flow_error_set(error, EINVAL,
3281                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3282                                                 (void *)items->type,
3283                                                 "neither eth nor vlan"
3284                                                 " header found");
3285                         if (vlan && !vlan->eth_proto)
3286                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3287                         else if (eth && !eth->ether_type)
3288                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3289                         if (!ipv6->vtc_flow)
3290                                 ipv6->vtc_flow =
3291                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3292                         if (!ipv6->hop_limits)
3293                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3294                         break;
3295                 case RTE_FLOW_ITEM_TYPE_UDP:
3296                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3297                         if (!ipv4 && !ipv6)
3298                                 return rte_flow_error_set(error, EINVAL,
3299                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3300                                                 (void *)items->type,
3301                                                 "ip header not found");
3302                         if (ipv4 && !ipv4->next_proto_id)
3303                                 ipv4->next_proto_id = IPPROTO_UDP;
3304                         else if (ipv6 && !ipv6->proto)
3305                                 ipv6->proto = IPPROTO_UDP;
3306                         break;
3307                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3308                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3309                         if (!udp)
3310                                 return rte_flow_error_set(error, EINVAL,
3311                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3312                                                 (void *)items->type,
3313                                                 "udp header not found");
3314                         if (!udp->dst_port)
3315                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3316                         if (!vxlan->vx_flags)
3317                                 vxlan->vx_flags =
3318                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3319                         break;
3320                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3321                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3322                         if (!udp)
3323                                 return rte_flow_error_set(error, EINVAL,
3324                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3325                                                 (void *)items->type,
3326                                                 "udp header not found");
3327                         if (!vxlan_gpe->proto)
3328                                 return rte_flow_error_set(error, EINVAL,
3329                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3330                                                 (void *)items->type,
3331                                                 "next protocol not found");
3332                         if (!udp->dst_port)
3333                                 udp->dst_port =
3334                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3335                         if (!vxlan_gpe->vx_flags)
3336                                 vxlan_gpe->vx_flags =
3337                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3338                         break;
3339                 case RTE_FLOW_ITEM_TYPE_GRE:
3340                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3341                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3342                         if (!gre->proto)
3343                                 return rte_flow_error_set(error, EINVAL,
3344                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3345                                                 (void *)items->type,
3346                                                 "next protocol not found");
3347                         if (!ipv4 && !ipv6)
3348                                 return rte_flow_error_set(error, EINVAL,
3349                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3350                                                 (void *)items->type,
3351                                                 "ip header not found");
3352                         if (ipv4 && !ipv4->next_proto_id)
3353                                 ipv4->next_proto_id = IPPROTO_GRE;
3354                         else if (ipv6 && !ipv6->proto)
3355                                 ipv6->proto = IPPROTO_GRE;
3356                         break;
3357                 case RTE_FLOW_ITEM_TYPE_VOID:
3358                         break;
3359                 default:
3360                         return rte_flow_error_set(error, EINVAL,
3361                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3362                                                   (void *)items->type,
3363                                                   "unsupported item type");
3364                         break;
3365                 }
3366                 temp_size += len;
3367         }
3368         *size = temp_size;
3369         return 0;
3370 }
3371
3372 static int
3373 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3374 {
3375         struct rte_ether_hdr *eth = NULL;
3376         struct rte_vlan_hdr *vlan = NULL;
3377         struct rte_ipv6_hdr *ipv6 = NULL;
3378         struct rte_udp_hdr *udp = NULL;
3379         char *next_hdr;
3380         uint16_t proto;
3381
3382         eth = (struct rte_ether_hdr *)data;
3383         next_hdr = (char *)(eth + 1);
3384         proto = RTE_BE16(eth->ether_type);
3385
3386         /* VLAN skipping */
3387         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3388                 vlan = (struct rte_vlan_hdr *)next_hdr;
3389                 proto = RTE_BE16(vlan->eth_proto);
3390                 next_hdr += sizeof(struct rte_vlan_hdr);
3391         }
3392
3393         /* HW calculates IPv4 csum. no need to proceed */
3394         if (proto == RTE_ETHER_TYPE_IPV4)
3395                 return 0;
3396
3397         /* non IPv4/IPv6 header. not supported */
3398         if (proto != RTE_ETHER_TYPE_IPV6) {
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION,
3401                                           NULL, "Cannot offload non IPv4/IPv6");
3402         }
3403
3404         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3405
3406         /* ignore non UDP */
3407         if (ipv6->proto != IPPROTO_UDP)
3408                 return 0;
3409
3410         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3411         udp->dgram_cksum = 0;
3412
3413         return 0;
3414 }
3415
3416 /**
3417  * Convert L2 encap action to DV specification.
3418  *
3419  * @param[in] dev
3420  *   Pointer to rte_eth_dev structure.
3421  * @param[in] action
3422  *   Pointer to action structure.
3423  * @param[in, out] dev_flow
3424  *   Pointer to the mlx5_flow.
3425  * @param[in] transfer
3426  *   Mark if the flow is E-Switch flow.
3427  * @param[out] error
3428  *   Pointer to the error structure.
3429  *
3430  * @return
3431  *   0 on success, a negative errno value otherwise and rte_errno is set.
3432  */
3433 static int
3434 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3435                                const struct rte_flow_action *action,
3436                                struct mlx5_flow *dev_flow,
3437                                uint8_t transfer,
3438                                struct rte_flow_error *error)
3439 {
3440         const struct rte_flow_item *encap_data;
3441         const struct rte_flow_action_raw_encap *raw_encap_data;
3442         struct mlx5_flow_dv_encap_decap_resource res = {
3443                 .reformat_type =
3444                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3445                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3446                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3447         };
3448
3449         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3450                 raw_encap_data =
3451                         (const struct rte_flow_action_raw_encap *)action->conf;
3452                 res.size = raw_encap_data->size;
3453                 memcpy(res.buf, raw_encap_data->data, res.size);
3454         } else {
3455                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3456                         encap_data =
3457                                 ((const struct rte_flow_action_vxlan_encap *)
3458                                                 action->conf)->definition;
3459                 else
3460                         encap_data =
3461                                 ((const struct rte_flow_action_nvgre_encap *)
3462                                                 action->conf)->definition;
3463                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3464                                                &res.size, error))
3465                         return -rte_errno;
3466         }
3467         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3468                 return -rte_errno;
3469         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3470                 return rte_flow_error_set(error, EINVAL,
3471                                           RTE_FLOW_ERROR_TYPE_ACTION,
3472                                           NULL, "can't create L2 encap action");
3473         return 0;
3474 }
3475
3476 /**
3477  * Convert L2 decap action to DV specification.
3478  *
3479  * @param[in] dev
3480  *   Pointer to rte_eth_dev structure.
3481  * @param[in, out] dev_flow
3482  *   Pointer to the mlx5_flow.
3483  * @param[in] transfer
3484  *   Mark if the flow is E-Switch flow.
3485  * @param[out] error
3486  *   Pointer to the error structure.
3487  *
3488  * @return
3489  *   0 on success, a negative errno value otherwise and rte_errno is set.
3490  */
3491 static int
3492 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3493                                struct mlx5_flow *dev_flow,
3494                                uint8_t transfer,
3495                                struct rte_flow_error *error)
3496 {
3497         struct mlx5_flow_dv_encap_decap_resource res = {
3498                 .size = 0,
3499                 .reformat_type =
3500                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3501                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3502                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3503         };
3504
3505         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3506                 return rte_flow_error_set(error, EINVAL,
3507                                           RTE_FLOW_ERROR_TYPE_ACTION,
3508                                           NULL, "can't create L2 decap action");
3509         return 0;
3510 }
3511
3512 /**
3513  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3514  *
3515  * @param[in] dev
3516  *   Pointer to rte_eth_dev structure.
3517  * @param[in] action
3518  *   Pointer to action structure.
3519  * @param[in, out] dev_flow
3520  *   Pointer to the mlx5_flow.
3521  * @param[in] attr
3522  *   Pointer to the flow attributes.
3523  * @param[out] error
3524  *   Pointer to the error structure.
3525  *
3526  * @return
3527  *   0 on success, a negative errno value otherwise and rte_errno is set.
3528  */
3529 static int
3530 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3531                                 const struct rte_flow_action *action,
3532                                 struct mlx5_flow *dev_flow,
3533                                 const struct rte_flow_attr *attr,
3534                                 struct rte_flow_error *error)
3535 {
3536         const struct rte_flow_action_raw_encap *encap_data;
3537         struct mlx5_flow_dv_encap_decap_resource res;
3538
3539         memset(&res, 0, sizeof(res));
3540         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3541         res.size = encap_data->size;
3542         memcpy(res.buf, encap_data->data, res.size);
3543         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3544                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3545                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3546         if (attr->transfer)
3547                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3548         else
3549                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3550                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3551         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3552                 return rte_flow_error_set(error, EINVAL,
3553                                           RTE_FLOW_ERROR_TYPE_ACTION,
3554                                           NULL, "can't create encap action");
3555         return 0;
3556 }
3557
3558 /**
3559  * Create action push VLAN.
3560  *
3561  * @param[in] dev
3562  *   Pointer to rte_eth_dev structure.
3563  * @param[in] attr
3564  *   Pointer to the flow attributes.
3565  * @param[in] vlan
3566  *   Pointer to the vlan to push to the Ethernet header.
3567  * @param[in, out] dev_flow
3568  *   Pointer to the mlx5_flow.
3569  * @param[out] error
3570  *   Pointer to the error structure.
3571  *
3572  * @return
3573  *   0 on success, a negative errno value otherwise and rte_errno is set.
3574  */
3575 static int
3576 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3577                                 const struct rte_flow_attr *attr,
3578                                 const struct rte_vlan_hdr *vlan,
3579                                 struct mlx5_flow *dev_flow,
3580                                 struct rte_flow_error *error)
3581 {
3582         struct mlx5_flow_dv_push_vlan_action_resource res;
3583
3584         memset(&res, 0, sizeof(res));
3585         res.vlan_tag =
3586                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3587                                  vlan->vlan_tci);
3588         if (attr->transfer)
3589                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3590         else
3591                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3592                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3593         return flow_dv_push_vlan_action_resource_register
3594                                             (dev, &res, dev_flow, error);
3595 }
3596
3597 static int fdb_mirror;
3598
3599 /**
3600  * Validate the modify-header actions.
3601  *
3602  * @param[in] action_flags
3603  *   Holds the actions detected until now.
3604  * @param[in] action
3605  *   Pointer to the modify action.
3606  * @param[out] error
3607  *   Pointer to error structure.
3608  *
3609  * @return
3610  *   0 on success, a negative errno value otherwise and rte_errno is set.
3611  */
3612 static int
3613 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3614                                    const struct rte_flow_action *action,
3615                                    struct rte_flow_error *error)
3616 {
3617         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3618                 return rte_flow_error_set(error, EINVAL,
3619                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3620                                           NULL, "action configuration not set");
3621         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3622                 return rte_flow_error_set(error, EINVAL,
3623                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3624                                           "can't have encap action before"
3625                                           " modify action");
3626         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3627                 return rte_flow_error_set(error, EINVAL,
3628                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3629                                           "can't support sample action before"
3630                                           " modify action for E-Switch"
3631                                           " mirroring");
3632         return 0;
3633 }
3634
3635 /**
3636  * Validate the modify-header MAC address actions.
3637  *
3638  * @param[in] action_flags
3639  *   Holds the actions detected until now.
3640  * @param[in] action
3641  *   Pointer to the modify action.
3642  * @param[in] item_flags
3643  *   Holds the items detected.
3644  * @param[out] error
3645  *   Pointer to error structure.
3646  *
3647  * @return
3648  *   0 on success, a negative errno value otherwise and rte_errno is set.
3649  */
3650 static int
3651 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3652                                    const struct rte_flow_action *action,
3653                                    const uint64_t item_flags,
3654                                    struct rte_flow_error *error)
3655 {
3656         int ret = 0;
3657
3658         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3659         if (!ret) {
3660                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3661                         return rte_flow_error_set(error, EINVAL,
3662                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3663                                                   NULL,
3664                                                   "no L2 item in pattern");
3665         }
3666         return ret;
3667 }
3668
3669 /**
3670  * Validate the modify-header IPv4 address actions.
3671  *
3672  * @param[in] action_flags
3673  *   Holds the actions detected until now.
3674  * @param[in] action
3675  *   Pointer to the modify action.
3676  * @param[in] item_flags
3677  *   Holds the items detected.
3678  * @param[out] error
3679  *   Pointer to error structure.
3680  *
3681  * @return
3682  *   0 on success, a negative errno value otherwise and rte_errno is set.
3683  */
3684 static int
3685 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3686                                     const struct rte_flow_action *action,
3687                                     const uint64_t item_flags,
3688                                     struct rte_flow_error *error)
3689 {
3690         int ret = 0;
3691         uint64_t layer;
3692
3693         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3694         if (!ret) {
3695                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3696                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3697                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3698                 if (!(item_flags & layer))
3699                         return rte_flow_error_set(error, EINVAL,
3700                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3701                                                   NULL,
3702                                                   "no ipv4 item in pattern");
3703         }
3704         return ret;
3705 }
3706
3707 /**
3708  * Validate the modify-header IPv6 address actions.
3709  *
3710  * @param[in] action_flags
3711  *   Holds the actions detected until now.
3712  * @param[in] action
3713  *   Pointer to the modify action.
3714  * @param[in] item_flags
3715  *   Holds the items detected.
3716  * @param[out] error
3717  *   Pointer to error structure.
3718  *
3719  * @return
3720  *   0 on success, a negative errno value otherwise and rte_errno is set.
3721  */
3722 static int
3723 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3724                                     const struct rte_flow_action *action,
3725                                     const uint64_t item_flags,
3726                                     struct rte_flow_error *error)
3727 {
3728         int ret = 0;
3729         uint64_t layer;
3730
3731         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3732         if (!ret) {
3733                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3734                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3735                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3736                 if (!(item_flags & layer))
3737                         return rte_flow_error_set(error, EINVAL,
3738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3739                                                   NULL,
3740                                                   "no ipv6 item in pattern");
3741         }
3742         return ret;
3743 }
3744
3745 /**
3746  * Validate the modify-header TP actions.
3747  *
3748  * @param[in] action_flags
3749  *   Holds the actions detected until now.
3750  * @param[in] action
3751  *   Pointer to the modify action.
3752  * @param[in] item_flags
3753  *   Holds the items detected.
3754  * @param[out] error
3755  *   Pointer to error structure.
3756  *
3757  * @return
3758  *   0 on success, a negative errno value otherwise and rte_errno is set.
3759  */
3760 static int
3761 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3762                                   const struct rte_flow_action *action,
3763                                   const uint64_t item_flags,
3764                                   struct rte_flow_error *error)
3765 {
3766         int ret = 0;
3767         uint64_t layer;
3768
3769         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3770         if (!ret) {
3771                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772                                  MLX5_FLOW_LAYER_INNER_L4 :
3773                                  MLX5_FLOW_LAYER_OUTER_L4;
3774                 if (!(item_flags & layer))
3775                         return rte_flow_error_set(error, EINVAL,
3776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3777                                                   NULL, "no transport layer "
3778                                                   "in pattern");
3779         }
3780         return ret;
3781 }
3782
3783 /**
3784  * Validate the modify-header actions of increment/decrement
3785  * TCP Sequence-number.
3786  *
3787  * @param[in] action_flags
3788  *   Holds the actions detected until now.
3789  * @param[in] action
3790  *   Pointer to the modify action.
3791  * @param[in] item_flags
3792  *   Holds the items detected.
3793  * @param[out] error
3794  *   Pointer to error structure.
3795  *
3796  * @return
3797  *   0 on success, a negative errno value otherwise and rte_errno is set.
3798  */
3799 static int
3800 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3801                                        const struct rte_flow_action *action,
3802                                        const uint64_t item_flags,
3803                                        struct rte_flow_error *error)
3804 {
3805         int ret = 0;
3806         uint64_t layer;
3807
3808         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3809         if (!ret) {
3810                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3811                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3812                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3813                 if (!(item_flags & layer))
3814                         return rte_flow_error_set(error, EINVAL,
3815                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3816                                                   NULL, "no TCP item in"
3817                                                   " pattern");
3818                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3819                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3820                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3821                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3822                         return rte_flow_error_set(error, EINVAL,
3823                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3824                                                   NULL,
3825                                                   "cannot decrease and increase"
3826                                                   " TCP sequence number"
3827                                                   " at the same time");
3828         }
3829         return ret;
3830 }
3831
3832 /**
3833  * Validate the modify-header actions of increment/decrement
3834  * TCP Acknowledgment number.
3835  *
3836  * @param[in] action_flags
3837  *   Holds the actions detected until now.
3838  * @param[in] action
3839  *   Pointer to the modify action.
3840  * @param[in] item_flags
3841  *   Holds the items detected.
3842  * @param[out] error
3843  *   Pointer to error structure.
3844  *
3845  * @return
3846  *   0 on success, a negative errno value otherwise and rte_errno is set.
3847  */
3848 static int
3849 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3850                                        const struct rte_flow_action *action,
3851                                        const uint64_t item_flags,
3852                                        struct rte_flow_error *error)
3853 {
3854         int ret = 0;
3855         uint64_t layer;
3856
3857         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3858         if (!ret) {
3859                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3860                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3861                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3862                 if (!(item_flags & layer))
3863                         return rte_flow_error_set(error, EINVAL,
3864                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3865                                                   NULL, "no TCP item in"
3866                                                   " pattern");
3867                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3868                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3869                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3870                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3871                         return rte_flow_error_set(error, EINVAL,
3872                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3873                                                   NULL,
3874                                                   "cannot decrease and increase"
3875                                                   " TCP acknowledgment number"
3876                                                   " at the same time");
3877         }
3878         return ret;
3879 }
3880
3881 /**
3882  * Validate the modify-header TTL actions.
3883  *
3884  * @param[in] action_flags
3885  *   Holds the actions detected until now.
3886  * @param[in] action
3887  *   Pointer to the modify action.
3888  * @param[in] item_flags
3889  *   Holds the items detected.
3890  * @param[out] error
3891  *   Pointer to error structure.
3892  *
3893  * @return
3894  *   0 on success, a negative errno value otherwise and rte_errno is set.
3895  */
3896 static int
3897 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3898                                    const struct rte_flow_action *action,
3899                                    const uint64_t item_flags,
3900                                    struct rte_flow_error *error)
3901 {
3902         int ret = 0;
3903         uint64_t layer;
3904
3905         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3906         if (!ret) {
3907                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3908                                  MLX5_FLOW_LAYER_INNER_L3 :
3909                                  MLX5_FLOW_LAYER_OUTER_L3;
3910                 if (!(item_flags & layer))
3911                         return rte_flow_error_set(error, EINVAL,
3912                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3913                                                   NULL,
3914                                                   "no IP protocol in pattern");
3915         }
3916         return ret;
3917 }
3918
3919 /**
3920  * Validate jump action.
3921  *
3922  * @param[in] action
3923  *   Pointer to the jump action.
3924  * @param[in] action_flags
3925  *   Holds the actions detected until now.
3926  * @param[in] attributes
3927  *   Pointer to flow attributes
3928  * @param[in] external
3929  *   Action belongs to flow rule created by request external to PMD.
3930  * @param[out] error
3931  *   Pointer to error structure.
3932  *
3933  * @return
3934  *   0 on success, a negative errno value otherwise and rte_errno is set.
3935  */
3936 static int
3937 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3938                              const struct mlx5_flow_tunnel *tunnel,
3939                              const struct rte_flow_action *action,
3940                              uint64_t action_flags,
3941                              const struct rte_flow_attr *attributes,
3942                              bool external, struct rte_flow_error *error)
3943 {
3944         uint32_t target_group, table;
3945         int ret = 0;
3946         struct flow_grp_info grp_info = {
3947                 .external = !!external,
3948                 .transfer = !!attributes->transfer,
3949                 .fdb_def_rule = 1,
3950                 .std_tbl_fix = 0
3951         };
3952         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3953                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3954                 return rte_flow_error_set(error, EINVAL,
3955                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3956                                           "can't have 2 fate actions in"
3957                                           " same flow");
3958         if (action_flags & MLX5_FLOW_ACTION_METER)
3959                 return rte_flow_error_set(error, ENOTSUP,
3960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3961                                           "jump with meter not support");
3962         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3963                 return rte_flow_error_set(error, EINVAL,
3964                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3965                                           "E-Switch mirroring can't support"
3966                                           " Sample action and jump action in"
3967                                           " same flow now");
3968         if (!action->conf)
3969                 return rte_flow_error_set(error, EINVAL,
3970                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3971                                           NULL, "action configuration not set");
3972         target_group =
3973                 ((const struct rte_flow_action_jump *)action->conf)->group;
3974         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3975                                        grp_info, error);
3976         if (ret)
3977                 return ret;
3978         if (attributes->group == target_group &&
3979             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3980                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3981                 return rte_flow_error_set(error, EINVAL,
3982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3983                                           "target group must be other than"
3984                                           " the current flow group");
3985         return 0;
3986 }
3987
3988 /*
3989  * Validate the port_id action.
3990  *
3991  * @param[in] dev
3992  *   Pointer to rte_eth_dev structure.
3993  * @param[in] action_flags
3994  *   Bit-fields that holds the actions detected until now.
3995  * @param[in] action
3996  *   Port_id RTE action structure.
3997  * @param[in] attr
3998  *   Attributes of flow that includes this action.
3999  * @param[out] error
4000  *   Pointer to error structure.
4001  *
4002  * @return
4003  *   0 on success, a negative errno value otherwise and rte_errno is set.
4004  */
4005 static int
4006 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4007                                 uint64_t action_flags,
4008                                 const struct rte_flow_action *action,
4009                                 const struct rte_flow_attr *attr,
4010                                 struct rte_flow_error *error)
4011 {
4012         const struct rte_flow_action_port_id *port_id;
4013         struct mlx5_priv *act_priv;
4014         struct mlx5_priv *dev_priv;
4015         uint16_t port;
4016
4017         if (!attr->transfer)
4018                 return rte_flow_error_set(error, ENOTSUP,
4019                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4020                                           NULL,
4021                                           "port id action is valid in transfer"
4022                                           " mode only");
4023         if (!action || !action->conf)
4024                 return rte_flow_error_set(error, ENOTSUP,
4025                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4026                                           NULL,
4027                                           "port id action parameters must be"
4028                                           " specified");
4029         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4030                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4031                 return rte_flow_error_set(error, EINVAL,
4032                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4033                                           "can have only one fate actions in"
4034                                           " a flow");
4035         dev_priv = mlx5_dev_to_eswitch_info(dev);
4036         if (!dev_priv)
4037                 return rte_flow_error_set(error, rte_errno,
4038                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4039                                           NULL,
4040                                           "failed to obtain E-Switch info");
4041         port_id = action->conf;
4042         port = port_id->original ? dev->data->port_id : port_id->id;
4043         act_priv = mlx5_port_to_eswitch_info(port, false);
4044         if (!act_priv)
4045                 return rte_flow_error_set
4046                                 (error, rte_errno,
4047                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4048                                  "failed to obtain E-Switch port id for port");
4049         if (act_priv->domain_id != dev_priv->domain_id)
4050                 return rte_flow_error_set
4051                                 (error, EINVAL,
4052                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4053                                  "port does not belong to"
4054                                  " E-Switch being configured");
4055         return 0;
4056 }
4057
4058 /**
4059  * Get the maximum number of modify header actions.
4060  *
4061  * @param dev
4062  *   Pointer to rte_eth_dev structure.
4063  * @param flags
4064  *   Flags bits to check if root level.
4065  *
4066  * @return
4067  *   Max number of modify header actions device can support.
4068  */
4069 static inline unsigned int
4070 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4071                               uint64_t flags)
4072 {
4073         /*
4074          * There's no way to directly query the max capacity from FW.
4075          * The maximal value on root table should be assumed to be supported.
4076          */
4077         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4078                 return MLX5_MAX_MODIFY_NUM;
4079         else
4080                 return MLX5_ROOT_TBL_MODIFY_NUM;
4081 }
4082
4083 /**
4084  * Validate the meter action.
4085  *
4086  * @param[in] dev
4087  *   Pointer to rte_eth_dev structure.
4088  * @param[in] action_flags
4089  *   Bit-fields that holds the actions detected until now.
4090  * @param[in] action
4091  *   Pointer to the meter action.
4092  * @param[in] attr
4093  *   Attributes of flow that includes this action.
4094  * @param[out] error
4095  *   Pointer to error structure.
4096  *
4097  * @return
4098  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4099  */
4100 static int
4101 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4102                                 uint64_t action_flags,
4103                                 const struct rte_flow_action *action,
4104                                 const struct rte_flow_attr *attr,
4105                                 struct rte_flow_error *error)
4106 {
4107         struct mlx5_priv *priv = dev->data->dev_private;
4108         const struct rte_flow_action_meter *am = action->conf;
4109         struct mlx5_flow_meter *fm;
4110
4111         if (!am)
4112                 return rte_flow_error_set(error, EINVAL,
4113                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4114                                           "meter action conf is NULL");
4115
4116         if (action_flags & MLX5_FLOW_ACTION_METER)
4117                 return rte_flow_error_set(error, ENOTSUP,
4118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4119                                           "meter chaining not support");
4120         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4121                 return rte_flow_error_set(error, ENOTSUP,
4122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4123                                           "meter with jump not support");
4124         if (!priv->mtr_en)
4125                 return rte_flow_error_set(error, ENOTSUP,
4126                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4127                                           NULL,
4128                                           "meter action not supported");
4129         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4130         if (!fm)
4131                 return rte_flow_error_set(error, EINVAL,
4132                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4133                                           "Meter not found");
4134         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4135               (!fm->ingress && !attr->ingress && attr->egress) ||
4136               (!fm->egress && !attr->egress && attr->ingress))))
4137                 return rte_flow_error_set(error, EINVAL,
4138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4139                                           "Flow attributes are either invalid "
4140                                           "or have a conflict with current "
4141                                           "meter attributes");
4142         return 0;
4143 }
4144
4145 /**
4146  * Validate the age action.
4147  *
4148  * @param[in] action_flags
4149  *   Holds the actions detected until now.
4150  * @param[in] action
4151  *   Pointer to the age action.
4152  * @param[in] dev
4153  *   Pointer to the Ethernet device structure.
4154  * @param[out] error
4155  *   Pointer to error structure.
4156  *
4157  * @return
4158  *   0 on success, a negative errno value otherwise and rte_errno is set.
4159  */
4160 static int
4161 flow_dv_validate_action_age(uint64_t action_flags,
4162                             const struct rte_flow_action *action,
4163                             struct rte_eth_dev *dev,
4164                             struct rte_flow_error *error)
4165 {
4166         struct mlx5_priv *priv = dev->data->dev_private;
4167         const struct rte_flow_action_age *age = action->conf;
4168
4169         if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4170                 return rte_flow_error_set(error, ENOTSUP,
4171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4172                                           NULL,
4173                                           "age action not supported");
4174         if (!(action->conf))
4175                 return rte_flow_error_set(error, EINVAL,
4176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4177                                           "configuration cannot be null");
4178         if (!(age->timeout))
4179                 return rte_flow_error_set(error, EINVAL,
4180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4181                                           "invalid timeout value 0");
4182         if (action_flags & MLX5_FLOW_ACTION_AGE)
4183                 return rte_flow_error_set(error, EINVAL,
4184                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4185                                           "duplicate age actions set");
4186         return 0;
4187 }
4188
4189 /**
4190  * Validate the modify-header IPv4 DSCP actions.
4191  *
4192  * @param[in] action_flags
4193  *   Holds the actions detected until now.
4194  * @param[in] action
4195  *   Pointer to the modify action.
4196  * @param[in] item_flags
4197  *   Holds the items detected.
4198  * @param[out] error
4199  *   Pointer to error structure.
4200  *
4201  * @return
4202  *   0 on success, a negative errno value otherwise and rte_errno is set.
4203  */
4204 static int
4205 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4206                                          const struct rte_flow_action *action,
4207                                          const uint64_t item_flags,
4208                                          struct rte_flow_error *error)
4209 {
4210         int ret = 0;
4211
4212         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4213         if (!ret) {
4214                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4215                         return rte_flow_error_set(error, EINVAL,
4216                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4217                                                   NULL,
4218                                                   "no ipv4 item in pattern");
4219         }
4220         return ret;
4221 }
4222
4223 /**
4224  * Validate the modify-header IPv6 DSCP actions.
4225  *
4226  * @param[in] action_flags
4227  *   Holds the actions detected until now.
4228  * @param[in] action
4229  *   Pointer to the modify action.
4230  * @param[in] item_flags
4231  *   Holds the items detected.
4232  * @param[out] error
4233  *   Pointer to error structure.
4234  *
4235  * @return
4236  *   0 on success, a negative errno value otherwise and rte_errno is set.
4237  */
4238 static int
4239 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4240                                          const struct rte_flow_action *action,
4241                                          const uint64_t item_flags,
4242                                          struct rte_flow_error *error)
4243 {
4244         int ret = 0;
4245
4246         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4247         if (!ret) {
4248                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4249                         return rte_flow_error_set(error, EINVAL,
4250                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4251                                                   NULL,
4252                                                   "no ipv6 item in pattern");
4253         }
4254         return ret;
4255 }
4256
4257 /**
4258  * Match modify-header resource.
4259  *
4260  * @param list
4261  *   Pointer to the hash list.
4262  * @param entry
4263  *   Pointer to exist resource entry object.
4264  * @param key
4265  *   Key of the new entry.
4266  * @param ctx
4267  *   Pointer to new modify-header resource.
4268  *
4269  * @return
4270  *   0 on matching, non-zero otherwise.
4271  */
4272 int
4273 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4274                         struct mlx5_hlist_entry *entry,
4275                         uint64_t key __rte_unused, void *cb_ctx)
4276 {
4277         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4278         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4279         struct mlx5_flow_dv_modify_hdr_resource *resource =
4280                         container_of(entry, typeof(*resource), entry);
4281         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4282
4283         key_len += ref->actions_num * sizeof(ref->actions[0]);
4284         return ref->actions_num != resource->actions_num ||
4285                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4286 }
4287
4288 struct mlx5_hlist_entry *
4289 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4290                          void *cb_ctx)
4291 {
4292         struct mlx5_dev_ctx_shared *sh = list->ctx;
4293         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4294         struct mlx5dv_dr_domain *ns;
4295         struct mlx5_flow_dv_modify_hdr_resource *entry;
4296         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4297         int ret;
4298         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4299         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4300
4301         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4302                             SOCKET_ID_ANY);
4303         if (!entry) {
4304                 rte_flow_error_set(ctx->error, ENOMEM,
4305                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4306                                    "cannot allocate resource memory");
4307                 return NULL;
4308         }
4309         rte_memcpy(&entry->ft_type,
4310                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4311                    key_len + data_len);
4312         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4313                 ns = sh->fdb_domain;
4314         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4315                 ns = sh->tx_domain;
4316         else
4317                 ns = sh->rx_domain;
4318         ret = mlx5_flow_os_create_flow_action_modify_header
4319                                         (sh->ctx, ns, entry,
4320                                          data_len, &entry->action);
4321         if (ret) {
4322                 mlx5_free(entry);
4323                 rte_flow_error_set(ctx->error, ENOMEM,
4324                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4325                                    NULL, "cannot create modification action");
4326                 return NULL;
4327         }
4328         return &entry->entry;
4329 }
4330
4331 /**
4332  * Validate the sample action.
4333  *
4334  * @param[in] action_flags
4335  *   Holds the actions detected until now.
4336  * @param[in] action
4337  *   Pointer to the sample action.
4338  * @param[in] dev
4339  *   Pointer to the Ethernet device structure.
4340  * @param[in] attr
4341  *   Attributes of flow that includes this action.
4342  * @param[out] error
4343  *   Pointer to error structure.
4344  *
4345  * @return
4346  *   0 on success, a negative errno value otherwise and rte_errno is set.
4347  */
4348 static int
4349 flow_dv_validate_action_sample(uint64_t action_flags,
4350                                const struct rte_flow_action *action,
4351                                struct rte_eth_dev *dev,
4352                                const struct rte_flow_attr *attr,
4353                                struct rte_flow_error *error)
4354 {
4355         struct mlx5_priv *priv = dev->data->dev_private;
4356         struct mlx5_dev_config *dev_conf = &priv->config;
4357         const struct rte_flow_action_sample *sample = action->conf;
4358         const struct rte_flow_action *act;
4359         uint64_t sub_action_flags = 0;
4360         uint16_t queue_index = 0xFFFF;
4361         int actions_n = 0;
4362         int ret;
4363         fdb_mirror = 0;
4364
4365         if (!sample)
4366                 return rte_flow_error_set(error, EINVAL,
4367                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4368                                           "configuration cannot be NULL");
4369         if (sample->ratio == 0)
4370                 return rte_flow_error_set(error, EINVAL,
4371                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4372                                           "ratio value starts from 1");
4373         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4374                 return rte_flow_error_set(error, ENOTSUP,
4375                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4376                                           NULL,
4377                                           "sample action not supported");
4378         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4379                 return rte_flow_error_set(error, EINVAL,
4380                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4381                                           "Multiple sample actions not "
4382                                           "supported");
4383         if (action_flags & MLX5_FLOW_ACTION_METER)
4384                 return rte_flow_error_set(error, EINVAL,
4385                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4386                                           "wrong action order, meter should "
4387                                           "be after sample action");
4388         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4389                 return rte_flow_error_set(error, EINVAL,
4390                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4391                                           "wrong action order, jump should "
4392                                           "be after sample action");
4393         act = sample->actions;
4394         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4395                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4396                         return rte_flow_error_set(error, ENOTSUP,
4397                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4398                                                   act, "too many actions");
4399                 switch (act->type) {
4400                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4401                         ret = mlx5_flow_validate_action_queue(act,
4402                                                               sub_action_flags,
4403                                                               dev,
4404                                                               attr, error);
4405                         if (ret < 0)
4406                                 return ret;
4407                         queue_index = ((const struct rte_flow_action_queue *)
4408                                                         (act->conf))->index;
4409                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4410                         ++actions_n;
4411                         break;
4412                 case RTE_FLOW_ACTION_TYPE_MARK:
4413                         ret = flow_dv_validate_action_mark(dev, act,
4414                                                            sub_action_flags,
4415                                                            attr, error);
4416                         if (ret < 0)
4417                                 return ret;
4418                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4419                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4420                                                 MLX5_FLOW_ACTION_MARK_EXT;
4421                         else
4422                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4423                         ++actions_n;
4424                         break;
4425                 case RTE_FLOW_ACTION_TYPE_COUNT:
4426                         ret = flow_dv_validate_action_count(dev, error);
4427                         if (ret < 0)
4428                                 return ret;
4429                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4430                         ++actions_n;
4431                         break;
4432                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4433                         ret = flow_dv_validate_action_port_id(dev,
4434                                                               sub_action_flags,
4435                                                               act,
4436                                                               attr,
4437                                                               error);
4438                         if (ret)
4439                                 return ret;
4440                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4441                         ++actions_n;
4442                         break;
4443                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4444                         ret = flow_dv_validate_action_raw_encap_decap
4445                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4446                                  &actions_n, error);
4447                         if (ret < 0)
4448                                 return ret;
4449                         ++actions_n;
4450                         break;
4451                 default:
4452                         return rte_flow_error_set(error, ENOTSUP,
4453                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4454                                                   NULL,
4455                                                   "Doesn't support optional "
4456                                                   "action");
4457                 }
4458         }
4459         if (attr->ingress && !attr->transfer) {
4460                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4461                         return rte_flow_error_set(error, EINVAL,
4462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4463                                                   NULL,
4464                                                   "Ingress must has a dest "
4465                                                   "QUEUE for Sample");
4466         } else if (attr->egress && !attr->transfer) {
4467                 return rte_flow_error_set(error, ENOTSUP,
4468                                           RTE_FLOW_ERROR_TYPE_ACTION,
4469                                           NULL,
4470                                           "Sample Only support Ingress "
4471                                           "or E-Switch");
4472         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4473                 MLX5_ASSERT(attr->transfer);
4474                 if (sample->ratio > 1)
4475                         return rte_flow_error_set(error, ENOTSUP,
4476                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4477                                                   NULL,
4478                                                   "E-Switch doesn't support "
4479                                                   "any optional action "
4480                                                   "for sampling");
4481                 fdb_mirror = 1;
4482                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4483                         return rte_flow_error_set(error, ENOTSUP,
4484                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4485                                                   NULL,
4486                                                   "unsupported action QUEUE");
4487                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4488                         return rte_flow_error_set(error, EINVAL,
4489                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4490                                                   NULL,
4491                                                   "E-Switch must has a dest "
4492                                                   "port for mirroring");
4493         }
4494         /* Continue validation for Xcap actions.*/
4495         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4496             (queue_index == 0xFFFF ||
4497              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4498                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4499                      MLX5_FLOW_XCAP_ACTIONS)
4500                         return rte_flow_error_set(error, ENOTSUP,
4501                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4502                                                   NULL, "encap and decap "
4503                                                   "combination aren't "
4504                                                   "supported");
4505                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4506                                                         MLX5_FLOW_ACTION_ENCAP))
4507                         return rte_flow_error_set(error, ENOTSUP,
4508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4509                                                   NULL, "encap is not supported"
4510                                                   " for ingress traffic");
4511         }
4512         return 0;
4513 }
4514
4515 /**
4516  * Find existing modify-header resource or create and register a new one.
4517  *
4518  * @param dev[in, out]
4519  *   Pointer to rte_eth_dev structure.
4520  * @param[in, out] resource
4521  *   Pointer to modify-header resource.
4522  * @parm[in, out] dev_flow
4523  *   Pointer to the dev_flow.
4524  * @param[out] error
4525  *   pointer to error structure.
4526  *
4527  * @return
4528  *   0 on success otherwise -errno and errno is set.
4529  */
4530 static int
4531 flow_dv_modify_hdr_resource_register
4532                         (struct rte_eth_dev *dev,
4533                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4534                          struct mlx5_flow *dev_flow,
4535                          struct rte_flow_error *error)
4536 {
4537         struct mlx5_priv *priv = dev->data->dev_private;
4538         struct mlx5_dev_ctx_shared *sh = priv->sh;
4539         uint32_t key_len = sizeof(*resource) -
4540                            offsetof(typeof(*resource), ft_type) +
4541                            resource->actions_num * sizeof(resource->actions[0]);
4542         struct mlx5_hlist_entry *entry;
4543         struct mlx5_flow_cb_ctx ctx = {
4544                 .error = error,
4545                 .data = resource,
4546         };
4547
4548         resource->flags = dev_flow->dv.group ? 0 :
4549                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4550         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4551                                     resource->flags))
4552                 return rte_flow_error_set(error, EOVERFLOW,
4553                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4554                                           "too many modify header items");
4555         resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4556         entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4557         if (!entry)
4558                 return -rte_errno;
4559         resource = container_of(entry, typeof(*resource), entry);
4560         dev_flow->handle->dvh.modify_hdr = resource;
4561         return 0;
4562 }
4563
4564 /**
4565  * Get DV flow counter by index.
4566  *
4567  * @param[in] dev
4568  *   Pointer to the Ethernet device structure.
4569  * @param[in] idx
4570  *   mlx5 flow counter index in the container.
4571  * @param[out] ppool
4572  *   mlx5 flow counter pool in the container,
4573  *
4574  * @return
4575  *   Pointer to the counter, NULL otherwise.
4576  */
4577 static struct mlx5_flow_counter *
4578 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4579                            uint32_t idx,
4580                            struct mlx5_flow_counter_pool **ppool)
4581 {
4582         struct mlx5_priv *priv = dev->data->dev_private;
4583         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4584         struct mlx5_flow_counter_pool *pool;
4585
4586         /* Decrease to original index and clear shared bit. */
4587         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4588         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4589         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4590         MLX5_ASSERT(pool);
4591         if (ppool)
4592                 *ppool = pool;
4593         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4594 }
4595
4596 /**
4597  * Check the devx counter belongs to the pool.
4598  *
4599  * @param[in] pool
4600  *   Pointer to the counter pool.
4601  * @param[in] id
4602  *   The counter devx ID.
4603  *
4604  * @return
4605  *   True if counter belongs to the pool, false otherwise.
4606  */
4607 static bool
4608 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4609 {
4610         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4611                    MLX5_COUNTERS_PER_POOL;
4612
4613         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4614                 return true;
4615         return false;
4616 }
4617
4618 /**
4619  * Get a pool by devx counter ID.
4620  *
4621  * @param[in] cmng
4622  *   Pointer to the counter management.
4623  * @param[in] id
4624  *   The counter devx ID.
4625  *
4626  * @return
4627  *   The counter pool pointer if exists, NULL otherwise,
4628  */
4629 static struct mlx5_flow_counter_pool *
4630 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4631 {
4632         uint32_t i;
4633         struct mlx5_flow_counter_pool *pool = NULL;
4634
4635         rte_spinlock_lock(&cmng->pool_update_sl);
4636         /* Check last used pool. */
4637         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4638             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4639                 pool = cmng->pools[cmng->last_pool_idx];
4640                 goto out;
4641         }
4642         /* ID out of range means no suitable pool in the container. */
4643         if (id > cmng->max_id || id < cmng->min_id)
4644                 goto out;
4645         /*
4646          * Find the pool from the end of the container, since mostly counter
4647          * ID is sequence increasing, and the last pool should be the needed
4648          * one.
4649          */
4650         i = cmng->n_valid;
4651         while (i--) {
4652                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4653
4654                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4655                         pool = pool_tmp;
4656                         break;
4657                 }
4658         }
4659 out:
4660         rte_spinlock_unlock(&cmng->pool_update_sl);
4661         return pool;
4662 }
4663
4664 /**
4665  * Resize a counter container.
4666  *
4667  * @param[in] dev
4668  *   Pointer to the Ethernet device structure.
4669  *
4670  * @return
4671  *   0 on success, otherwise negative errno value and rte_errno is set.
4672  */
4673 static int
4674 flow_dv_container_resize(struct rte_eth_dev *dev)
4675 {
4676         struct mlx5_priv *priv = dev->data->dev_private;
4677         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4678         void *old_pools = cmng->pools;
4679         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4680         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4681         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4682
4683         if (!pools) {
4684                 rte_errno = ENOMEM;
4685                 return -ENOMEM;
4686         }
4687         if (old_pools)
4688                 memcpy(pools, old_pools, cmng->n *
4689                                        sizeof(struct mlx5_flow_counter_pool *));
4690         cmng->n = resize;
4691         cmng->pools = pools;
4692         if (old_pools)
4693                 mlx5_free(old_pools);
4694         return 0;
4695 }
4696
4697 /**
4698  * Query a devx flow counter.
4699  *
4700  * @param[in] dev
4701  *   Pointer to the Ethernet device structure.
4702  * @param[in] cnt
4703  *   Index to the flow counter.
4704  * @param[out] pkts
4705  *   The statistics value of packets.
4706  * @param[out] bytes
4707  *   The statistics value of bytes.
4708  *
4709  * @return
4710  *   0 on success, otherwise a negative errno value and rte_errno is set.
4711  */
4712 static inline int
4713 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4714                      uint64_t *bytes)
4715 {
4716         struct mlx5_priv *priv = dev->data->dev_private;
4717         struct mlx5_flow_counter_pool *pool = NULL;
4718         struct mlx5_flow_counter *cnt;
4719         int offset;
4720
4721         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4722         MLX5_ASSERT(pool);
4723         if (priv->sh->cmng.counter_fallback)
4724                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4725                                         0, pkts, bytes, 0, NULL, NULL, 0);
4726         rte_spinlock_lock(&pool->sl);
4727         if (!pool->raw) {
4728                 *pkts = 0;
4729                 *bytes = 0;
4730         } else {
4731                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4732                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4733                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4734         }
4735         rte_spinlock_unlock(&pool->sl);
4736         return 0;
4737 }
4738
4739 /**
4740  * Create and initialize a new counter pool.
4741  *
4742  * @param[in] dev
4743  *   Pointer to the Ethernet device structure.
4744  * @param[out] dcs
4745  *   The devX counter handle.
4746  * @param[in] age
4747  *   Whether the pool is for counter that was allocated for aging.
4748  * @param[in/out] cont_cur
4749  *   Pointer to the container pointer, it will be update in pool resize.
4750  *
4751  * @return
4752  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4753  */
4754 static struct mlx5_flow_counter_pool *
4755 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4756                     uint32_t age)
4757 {
4758         struct mlx5_priv *priv = dev->data->dev_private;
4759         struct mlx5_flow_counter_pool *pool;
4760         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4761         bool fallback = priv->sh->cmng.counter_fallback;
4762         uint32_t size = sizeof(*pool);
4763
4764         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4765         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4766         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4767         if (!pool) {
4768                 rte_errno = ENOMEM;
4769                 return NULL;
4770         }
4771         pool->raw = NULL;
4772         pool->is_aged = !!age;
4773         pool->query_gen = 0;
4774         pool->min_dcs = dcs;
4775         rte_spinlock_init(&pool->sl);
4776         rte_spinlock_init(&pool->csl);
4777         TAILQ_INIT(&pool->counters[0]);
4778         TAILQ_INIT(&pool->counters[1]);
4779         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4780         rte_spinlock_lock(&cmng->pool_update_sl);
4781         pool->index = cmng->n_valid;
4782         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4783                 mlx5_free(pool);
4784                 rte_spinlock_unlock(&cmng->pool_update_sl);
4785                 return NULL;
4786         }
4787         cmng->pools[pool->index] = pool;
4788         cmng->n_valid++;
4789         if (unlikely(fallback)) {
4790                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4791
4792                 if (base < cmng->min_id)
4793                         cmng->min_id = base;
4794                 if (base > cmng->max_id)
4795                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4796                 cmng->last_pool_idx = pool->index;
4797         }
4798         rte_spinlock_unlock(&cmng->pool_update_sl);
4799         return pool;
4800 }
4801
4802 /**
4803  * Prepare a new counter and/or a new counter pool.
4804  *
4805  * @param[in] dev
4806  *   Pointer to the Ethernet device structure.
4807  * @param[out] cnt_free
4808  *   Where to put the pointer of a new counter.
4809  * @param[in] age
4810  *   Whether the pool is for counter that was allocated for aging.
4811  *
4812  * @return
4813  *   The counter pool pointer and @p cnt_free is set on success,
4814  *   NULL otherwise and rte_errno is set.
4815  */
4816 static struct mlx5_flow_counter_pool *
4817 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4818                              struct mlx5_flow_counter **cnt_free,
4819                              uint32_t age)
4820 {
4821         struct mlx5_priv *priv = dev->data->dev_private;
4822         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4823         struct mlx5_flow_counter_pool *pool;
4824         struct mlx5_counters tmp_tq;
4825         struct mlx5_devx_obj *dcs = NULL;
4826         struct mlx5_flow_counter *cnt;
4827         enum mlx5_counter_type cnt_type =
4828                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4829         bool fallback = priv->sh->cmng.counter_fallback;
4830         uint32_t i;
4831
4832         if (fallback) {
4833                 /* bulk_bitmap must be 0 for single counter allocation. */
4834                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4835                 if (!dcs)
4836                         return NULL;
4837                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4838                 if (!pool) {
4839                         pool = flow_dv_pool_create(dev, dcs, age);
4840                         if (!pool) {
4841                                 mlx5_devx_cmd_destroy(dcs);
4842                                 return NULL;
4843                         }
4844                 }
4845                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4846                 cnt = MLX5_POOL_GET_CNT(pool, i);
4847                 cnt->pool = pool;
4848                 cnt->dcs_when_free = dcs;
4849                 *cnt_free = cnt;
4850                 return pool;
4851         }
4852         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4853         if (!dcs) {
4854                 rte_errno = ENODATA;
4855                 return NULL;
4856         }
4857         pool = flow_dv_pool_create(dev, dcs, age);
4858         if (!pool) {
4859                 mlx5_devx_cmd_destroy(dcs);
4860                 return NULL;
4861         }
4862         TAILQ_INIT(&tmp_tq);
4863         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4864                 cnt = MLX5_POOL_GET_CNT(pool, i);
4865                 cnt->pool = pool;
4866                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4867         }
4868         rte_spinlock_lock(&cmng->csl[cnt_type]);
4869         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4870         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4871         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4872         (*cnt_free)->pool = pool;
4873         return pool;
4874 }
4875
4876 /**
4877  * Allocate a flow counter.
4878  *
4879  * @param[in] dev
4880  *   Pointer to the Ethernet device structure.
4881  * @param[in] age
4882  *   Whether the counter was allocated for aging.
4883  *
4884  * @return
4885  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4886  */
4887 static uint32_t
4888 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4889 {
4890         struct mlx5_priv *priv = dev->data->dev_private;
4891         struct mlx5_flow_counter_pool *pool = NULL;
4892         struct mlx5_flow_counter *cnt_free = NULL;
4893         bool fallback = priv->sh->cmng.counter_fallback;
4894         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4895         enum mlx5_counter_type cnt_type =
4896                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4897         uint32_t cnt_idx;
4898
4899         if (!priv->config.devx) {
4900                 rte_errno = ENOTSUP;
4901                 return 0;
4902         }
4903         /* Get free counters from container. */
4904         rte_spinlock_lock(&cmng->csl[cnt_type]);
4905         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4906         if (cnt_free)
4907                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4908         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4909         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4910                 goto err;
4911         pool = cnt_free->pool;
4912         if (fallback)
4913                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4914         /* Create a DV counter action only in the first time usage. */
4915         if (!cnt_free->action) {
4916                 uint16_t offset;
4917                 struct mlx5_devx_obj *dcs;
4918                 int ret;
4919
4920                 if (!fallback) {
4921                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4922                         dcs = pool->min_dcs;
4923                 } else {
4924                         offset = 0;
4925                         dcs = cnt_free->dcs_when_free;
4926                 }
4927                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4928                                                             &cnt_free->action);
4929                 if (ret) {
4930                         rte_errno = errno;
4931                         goto err;
4932                 }
4933         }
4934         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4935                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4936         /* Update the counter reset values. */
4937         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4938                                  &cnt_free->bytes))
4939                 goto err;
4940         if (!fallback && !priv->sh->cmng.query_thread_on)
4941                 /* Start the asynchronous batch query by the host thread. */
4942                 mlx5_set_query_alarm(priv->sh);
4943         return cnt_idx;
4944 err:
4945         if (cnt_free) {
4946                 cnt_free->pool = pool;
4947                 if (fallback)
4948                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4949                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4950                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4951                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4952         }
4953         return 0;
4954 }
4955
4956 /**
4957  * Allocate a shared flow counter.
4958  *
4959  * @param[in] ctx
4960  *   Pointer to the shared counter configuration.
4961  * @param[in] data
4962  *   Pointer to save the allocated counter index.
4963  *
4964  * @return
4965  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4966  */
4967
4968 static int32_t
4969 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4970 {
4971         struct mlx5_shared_counter_conf *conf = ctx;
4972         struct rte_eth_dev *dev = conf->dev;
4973         struct mlx5_flow_counter *cnt;
4974
4975         data->dword = flow_dv_counter_alloc(dev, 0);
4976         data->dword |= MLX5_CNT_SHARED_OFFSET;
4977         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4978         cnt->shared_info.id = conf->id;
4979         return 0;
4980 }
4981
4982 /**
4983  * Get a shared flow counter.
4984  *
4985  * @param[in] dev
4986  *   Pointer to the Ethernet device structure.
4987  * @param[in] id
4988  *   Counter identifier.
4989  *
4990  * @return
4991  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4992  */
4993 static uint32_t
4994 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4995 {
4996         struct mlx5_priv *priv = dev->data->dev_private;
4997         struct mlx5_shared_counter_conf conf = {
4998                 .dev = dev,
4999                 .id = id,
5000         };
5001         union mlx5_l3t_data data = {
5002                 .dword = 0,
5003         };
5004
5005         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5006                                flow_dv_counter_alloc_shared_cb, &conf);
5007         return data.dword;
5008 }
5009
5010 /**
5011  * Get age param from counter index.
5012  *
5013  * @param[in] dev
5014  *   Pointer to the Ethernet device structure.
5015  * @param[in] counter
5016  *   Index to the counter handler.
5017  *
5018  * @return
5019  *   The aging parameter specified for the counter index.
5020  */
5021 static struct mlx5_age_param*
5022 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5023                                 uint32_t counter)
5024 {
5025         struct mlx5_flow_counter *cnt;
5026         struct mlx5_flow_counter_pool *pool = NULL;
5027
5028         flow_dv_counter_get_by_idx(dev, counter, &pool);
5029         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5030         cnt = MLX5_POOL_GET_CNT(pool, counter);
5031         return MLX5_CNT_TO_AGE(cnt);
5032 }
5033
5034 /**
5035  * Remove a flow counter from aged counter list.
5036  *
5037  * @param[in] dev
5038  *   Pointer to the Ethernet device structure.
5039  * @param[in] counter
5040  *   Index to the counter handler.
5041  * @param[in] cnt
5042  *   Pointer to the counter handler.
5043  */
5044 static void
5045 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5046                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5047 {
5048         struct mlx5_age_info *age_info;
5049         struct mlx5_age_param *age_param;
5050         struct mlx5_priv *priv = dev->data->dev_private;
5051         uint16_t expected = AGE_CANDIDATE;
5052
5053         age_info = GET_PORT_AGE_INFO(priv);
5054         age_param = flow_dv_counter_idx_get_age(dev, counter);
5055         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5056                                          AGE_FREE, false, __ATOMIC_RELAXED,
5057                                          __ATOMIC_RELAXED)) {
5058                 /**
5059                  * We need the lock even it is age timeout,
5060                  * since counter may still in process.
5061                  */
5062                 rte_spinlock_lock(&age_info->aged_sl);
5063                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5064                 rte_spinlock_unlock(&age_info->aged_sl);
5065                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5066         }
5067 }
5068
5069 /**
5070  * Release a flow counter.
5071  *
5072  * @param[in] dev
5073  *   Pointer to the Ethernet device structure.
5074  * @param[in] counter
5075  *   Index to the counter handler.
5076  */
5077 static void
5078 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5079 {
5080         struct mlx5_priv *priv = dev->data->dev_private;
5081         struct mlx5_flow_counter_pool *pool = NULL;
5082         struct mlx5_flow_counter *cnt;
5083         enum mlx5_counter_type cnt_type;
5084
5085         if (!counter)
5086                 return;
5087         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5088         MLX5_ASSERT(pool);
5089         if (IS_SHARED_CNT(counter) &&
5090             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5091                 return;
5092         if (pool->is_aged)
5093                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5094         cnt->pool = pool;
5095         /*
5096          * Put the counter back to list to be updated in none fallback mode.
5097          * Currently, we are using two list alternately, while one is in query,
5098          * add the freed counter to the other list based on the pool query_gen
5099          * value. After query finishes, add counter the list to the global
5100          * container counter list. The list changes while query starts. In
5101          * this case, lock will not be needed as query callback and release
5102          * function both operate with the different list.
5103          *
5104          */
5105         if (!priv->sh->cmng.counter_fallback) {
5106                 rte_spinlock_lock(&pool->csl);
5107                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5108                 rte_spinlock_unlock(&pool->csl);
5109         } else {
5110                 cnt->dcs_when_free = cnt->dcs_when_active;
5111                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5112                                            MLX5_COUNTER_TYPE_ORIGIN;
5113                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5114                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5115                                   cnt, next);
5116                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5117         }
5118 }
5119
5120 /**
5121  * Verify the @p attributes will be correctly understood by the NIC and store
5122  * them in the @p flow if everything is correct.
5123  *
5124  * @param[in] dev
5125  *   Pointer to dev struct.
5126  * @param[in] attributes
5127  *   Pointer to flow attributes
5128  * @param[in] external
5129  *   This flow rule is created by request external to PMD.
5130  * @param[out] error
5131  *   Pointer to error structure.
5132  *
5133  * @return
5134  *   - 0 on success and non root table.
5135  *   - 1 on success and root table.
5136  *   - a negative errno value otherwise and rte_errno is set.
5137  */
5138 static int
5139 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5140                             const struct mlx5_flow_tunnel *tunnel,
5141                             const struct rte_flow_attr *attributes,
5142                             struct flow_grp_info grp_info,
5143                             struct rte_flow_error *error)
5144 {
5145         struct mlx5_priv *priv = dev->data->dev_private;
5146         uint32_t priority_max = priv->config.flow_prio - 1;
5147         int ret = 0;
5148
5149 #ifndef HAVE_MLX5DV_DR
5150         RTE_SET_USED(tunnel);
5151         RTE_SET_USED(grp_info);
5152         if (attributes->group)
5153                 return rte_flow_error_set(error, ENOTSUP,
5154                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5155                                           NULL,
5156                                           "groups are not supported");
5157 #else
5158         uint32_t table = 0;
5159
5160         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5161                                        grp_info, error);
5162         if (ret)
5163                 return ret;
5164         if (!table)
5165                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5166 #endif
5167         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5168             attributes->priority >= priority_max)
5169                 return rte_flow_error_set(error, ENOTSUP,
5170                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5171                                           NULL,
5172                                           "priority out of range");
5173         if (attributes->transfer) {
5174                 if (!priv->config.dv_esw_en)
5175                         return rte_flow_error_set
5176                                 (error, ENOTSUP,
5177                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5178                                  "E-Switch dr is not supported");
5179                 if (!(priv->representor || priv->master))
5180                         return rte_flow_error_set
5181                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5182                                  NULL, "E-Switch configuration can only be"
5183                                  " done by a master or a representor device");
5184                 if (attributes->egress)
5185                         return rte_flow_error_set
5186                                 (error, ENOTSUP,
5187                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5188                                  "egress is not supported");
5189         }
5190         if (!(attributes->egress ^ attributes->ingress))
5191                 return rte_flow_error_set(error, ENOTSUP,
5192                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5193                                           "must specify exactly one of "
5194                                           "ingress or egress");
5195         return ret;
5196 }
5197
5198 /**
5199  * Internal validation function. For validating both actions and items.
5200  *
5201  * @param[in] dev
5202  *   Pointer to the rte_eth_dev structure.
5203  * @param[in] attr
5204  *   Pointer to the flow attributes.
5205  * @param[in] items
5206  *   Pointer to the list of items.
5207  * @param[in] actions
5208  *   Pointer to the list of actions.
5209  * @param[in] external
5210  *   This flow rule is created by request external to PMD.
5211  * @param[in] hairpin
5212  *   Number of hairpin TX actions, 0 means classic flow.
5213  * @param[out] error
5214  *   Pointer to the error structure.
5215  *
5216  * @return
5217  *   0 on success, a negative errno value otherwise and rte_errno is set.
5218  */
5219 static int
5220 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5221                  const struct rte_flow_item items[],
5222                  const struct rte_flow_action actions[],
5223                  bool external, int hairpin, struct rte_flow_error *error)
5224 {
5225         int ret;
5226         uint64_t action_flags = 0;
5227         uint64_t item_flags = 0;
5228         uint64_t last_item = 0;
5229         uint8_t next_protocol = 0xff;
5230         uint16_t ether_type = 0;
5231         int actions_n = 0;
5232         uint8_t item_ipv6_proto = 0;
5233         const struct rte_flow_item *gre_item = NULL;
5234         const struct rte_flow_action_raw_decap *decap;
5235         const struct rte_flow_action_raw_encap *encap;
5236         const struct rte_flow_action_rss *rss;
5237         const struct rte_flow_item_tcp nic_tcp_mask = {
5238                 .hdr = {
5239                         .tcp_flags = 0xFF,
5240                         .src_port = RTE_BE16(UINT16_MAX),
5241                         .dst_port = RTE_BE16(UINT16_MAX),
5242                 }
5243         };
5244         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5245                 .hdr = {
5246                         .src_addr =
5247                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5248                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5249                         .dst_addr =
5250                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5251                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5252                         .vtc_flow = RTE_BE32(0xffffffff),
5253                         .proto = 0xff,
5254                         .hop_limits = 0xff,
5255                 },
5256                 .has_frag_ext = 1,
5257         };
5258         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5259                 .hdr = {
5260                         .common = {
5261                                 .u32 =
5262                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5263                                         .type = 0xFF,
5264                                         }).u32),
5265                         },
5266                         .dummy[0] = 0xffffffff,
5267                 },
5268         };
5269         struct mlx5_priv *priv = dev->data->dev_private;
5270         struct mlx5_dev_config *dev_conf = &priv->config;
5271         uint16_t queue_index = 0xFFFF;
5272         const struct rte_flow_item_vlan *vlan_m = NULL;
5273         int16_t rw_act_num = 0;
5274         uint64_t is_root;
5275         const struct mlx5_flow_tunnel *tunnel;
5276         struct flow_grp_info grp_info = {
5277                 .external = !!external,
5278                 .transfer = !!attr->transfer,
5279                 .fdb_def_rule = !!priv->fdb_def_rule,
5280         };
5281         const struct rte_eth_hairpin_conf *conf;
5282
5283         if (items == NULL)
5284                 return -1;
5285         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5286                 tunnel = flow_items_to_tunnel(items);
5287                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5288                                 MLX5_FLOW_ACTION_DECAP;
5289         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5290                 tunnel = flow_actions_to_tunnel(actions);
5291                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5292         } else {
5293                 tunnel = NULL;
5294         }
5295         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5296                                 (dev, tunnel, attr, items, actions);
5297         ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5298         if (ret < 0)
5299                 return ret;
5300         is_root = (uint64_t)ret;
5301         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5302                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5303                 int type = items->type;
5304
5305                 if (!mlx5_flow_os_item_supported(type))
5306                         return rte_flow_error_set(error, ENOTSUP,
5307                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5308                                                   NULL, "item not supported");
5309                 switch (type) {
5310                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5311                         if (items[0].type != (typeof(items[0].type))
5312                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5313                                 return rte_flow_error_set
5314                                                 (error, EINVAL,
5315                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5316                                                 NULL, "MLX5 private items "
5317                                                 "must be the first");
5318                         break;
5319                 case RTE_FLOW_ITEM_TYPE_VOID:
5320                         break;
5321                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5322                         ret = flow_dv_validate_item_port_id
5323                                         (dev, items, attr, item_flags, error);
5324                         if (ret < 0)
5325                                 return ret;
5326                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5327                         break;
5328                 case RTE_FLOW_ITEM_TYPE_ETH:
5329                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5330                                                           true, error);
5331                         if (ret < 0)
5332                                 return ret;
5333                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5334                                              MLX5_FLOW_LAYER_OUTER_L2;
5335                         if (items->mask != NULL && items->spec != NULL) {
5336                                 ether_type =
5337                                         ((const struct rte_flow_item_eth *)
5338                                          items->spec)->type;
5339                                 ether_type &=
5340                                         ((const struct rte_flow_item_eth *)
5341                                          items->mask)->type;
5342                                 ether_type = rte_be_to_cpu_16(ether_type);
5343                         } else {
5344                                 ether_type = 0;
5345                         }
5346                         break;
5347                 case RTE_FLOW_ITEM_TYPE_VLAN:
5348                         ret = flow_dv_validate_item_vlan(items, item_flags,
5349                                                          dev, error);
5350                         if (ret < 0)
5351                                 return ret;
5352                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5353                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5354                         if (items->mask != NULL && items->spec != NULL) {
5355                                 ether_type =
5356                                         ((const struct rte_flow_item_vlan *)
5357                                          items->spec)->inner_type;
5358                                 ether_type &=
5359                                         ((const struct rte_flow_item_vlan *)
5360                                          items->mask)->inner_type;
5361                                 ether_type = rte_be_to_cpu_16(ether_type);
5362                         } else {
5363                                 ether_type = 0;
5364                         }
5365                         /* Store outer VLAN mask for of_push_vlan action. */
5366                         if (!tunnel)
5367                                 vlan_m = items->mask;
5368                         break;
5369                 case RTE_FLOW_ITEM_TYPE_IPV4:
5370                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5371                                                   &item_flags, &tunnel);
5372                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5373                                                          last_item, ether_type,
5374                                                          error);
5375                         if (ret < 0)
5376                                 return ret;
5377                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5378                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5379                         if (items->mask != NULL &&
5380                             ((const struct rte_flow_item_ipv4 *)
5381                              items->mask)->hdr.next_proto_id) {
5382                                 next_protocol =
5383                                         ((const struct rte_flow_item_ipv4 *)
5384                                          (items->spec))->hdr.next_proto_id;
5385                                 next_protocol &=
5386                                         ((const struct rte_flow_item_ipv4 *)
5387                                          (items->mask))->hdr.next_proto_id;
5388                         } else {
5389                                 /* Reset for inner layer. */
5390                                 next_protocol = 0xff;
5391                         }
5392                         break;
5393                 case RTE_FLOW_ITEM_TYPE_IPV6:
5394                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5395                                                   &item_flags, &tunnel);
5396                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5397                                                            last_item,
5398                                                            ether_type,
5399                                                            &nic_ipv6_mask,
5400                                                            error);
5401                         if (ret < 0)
5402                                 return ret;
5403                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5404                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5405                         if (items->mask != NULL &&
5406                             ((const struct rte_flow_item_ipv6 *)
5407                              items->mask)->hdr.proto) {
5408                                 item_ipv6_proto =
5409                                         ((const struct rte_flow_item_ipv6 *)
5410                                          items->spec)->hdr.proto;
5411                                 next_protocol =
5412                                         ((const struct rte_flow_item_ipv6 *)
5413                                          items->spec)->hdr.proto;
5414                                 next_protocol &=
5415                                         ((const struct rte_flow_item_ipv6 *)
5416                                          items->mask)->hdr.proto;
5417                         } else {
5418                                 /* Reset for inner layer. */
5419                                 next_protocol = 0xff;
5420                         }
5421                         break;
5422                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5423                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5424                                                                   item_flags,
5425                                                                   error);
5426                         if (ret < 0)
5427                                 return ret;
5428                         last_item = tunnel ?
5429                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5430                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5431                         if (items->mask != NULL &&
5432                             ((const struct rte_flow_item_ipv6_frag_ext *)
5433                              items->mask)->hdr.next_header) {
5434                                 next_protocol =
5435                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5436                                  items->spec)->hdr.next_header;
5437                                 next_protocol &=
5438                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5439                                  items->mask)->hdr.next_header;
5440                         } else {
5441                                 /* Reset for inner layer. */
5442                                 next_protocol = 0xff;
5443                         }
5444                         break;
5445                 case RTE_FLOW_ITEM_TYPE_TCP:
5446                         ret = mlx5_flow_validate_item_tcp
5447                                                 (items, item_flags,
5448                                                  next_protocol,
5449                                                  &nic_tcp_mask,
5450                                                  error);
5451                         if (ret < 0)
5452                                 return ret;
5453                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5454                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5455                         break;
5456                 case RTE_FLOW_ITEM_TYPE_UDP:
5457                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5458                                                           next_protocol,
5459                                                           error);
5460                         if (ret < 0)
5461                                 return ret;
5462                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5463                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5464                         break;
5465                 case RTE_FLOW_ITEM_TYPE_GRE:
5466                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5467                                                           next_protocol, error);
5468                         if (ret < 0)
5469                                 return ret;
5470                         gre_item = items;
5471                         last_item = MLX5_FLOW_LAYER_GRE;
5472                         break;
5473                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5474                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5475                                                             next_protocol,
5476                                                             error);
5477                         if (ret < 0)
5478                                 return ret;
5479                         last_item = MLX5_FLOW_LAYER_NVGRE;
5480                         break;
5481                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5482                         ret = mlx5_flow_validate_item_gre_key
5483                                 (items, item_flags, gre_item, error);
5484                         if (ret < 0)
5485                                 return ret;
5486                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5487                         break;
5488                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5489                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5490                                                             error);
5491                         if (ret < 0)
5492                                 return ret;
5493                         last_item = MLX5_FLOW_LAYER_VXLAN;
5494                         break;
5495                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5496                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5497                                                                 item_flags, dev,
5498                                                                 error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5502                         break;
5503                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5504                         ret = mlx5_flow_validate_item_geneve(items,
5505                                                              item_flags, dev,
5506                                                              error);
5507                         if (ret < 0)
5508                                 return ret;
5509                         last_item = MLX5_FLOW_LAYER_GENEVE;
5510                         break;
5511                 case RTE_FLOW_ITEM_TYPE_MPLS:
5512                         ret = mlx5_flow_validate_item_mpls(dev, items,
5513                                                            item_flags,
5514                                                            last_item, error);
5515                         if (ret < 0)
5516                                 return ret;
5517                         last_item = MLX5_FLOW_LAYER_MPLS;
5518                         break;
5519
5520                 case RTE_FLOW_ITEM_TYPE_MARK:
5521                         ret = flow_dv_validate_item_mark(dev, items, attr,
5522                                                          error);
5523                         if (ret < 0)
5524                                 return ret;
5525                         last_item = MLX5_FLOW_ITEM_MARK;
5526                         break;
5527                 case RTE_FLOW_ITEM_TYPE_META:
5528                         ret = flow_dv_validate_item_meta(dev, items, attr,
5529                                                          error);
5530                         if (ret < 0)
5531                                 return ret;
5532                         last_item = MLX5_FLOW_ITEM_METADATA;
5533                         break;
5534                 case RTE_FLOW_ITEM_TYPE_ICMP:
5535                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5536                                                            next_protocol,
5537                                                            error);
5538                         if (ret < 0)
5539                                 return ret;
5540                         last_item = MLX5_FLOW_LAYER_ICMP;
5541                         break;
5542                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5543                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5544                                                             next_protocol,
5545                                                             error);
5546                         if (ret < 0)
5547                                 return ret;
5548                         item_ipv6_proto = IPPROTO_ICMPV6;
5549                         last_item = MLX5_FLOW_LAYER_ICMP6;
5550                         break;
5551                 case RTE_FLOW_ITEM_TYPE_TAG:
5552                         ret = flow_dv_validate_item_tag(dev, items,
5553                                                         attr, error);
5554                         if (ret < 0)
5555                                 return ret;
5556                         last_item = MLX5_FLOW_ITEM_TAG;
5557                         break;
5558                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5559                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5560                         break;
5561                 case RTE_FLOW_ITEM_TYPE_GTP:
5562                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5563                                                         error);
5564                         if (ret < 0)
5565                                 return ret;
5566                         last_item = MLX5_FLOW_LAYER_GTP;
5567                         break;
5568                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5569                         /* Capacity will be checked in the translate stage. */
5570                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5571                                                             last_item,
5572                                                             ether_type,
5573                                                             &nic_ecpri_mask,
5574                                                             error);
5575                         if (ret < 0)
5576                                 return ret;
5577                         last_item = MLX5_FLOW_LAYER_ECPRI;
5578                         break;
5579                 default:
5580                         return rte_flow_error_set(error, ENOTSUP,
5581                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5582                                                   NULL, "item not supported");
5583                 }
5584                 item_flags |= last_item;
5585         }
5586         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5587                 int type = actions->type;
5588
5589                 if (!mlx5_flow_os_action_supported(type))
5590                         return rte_flow_error_set(error, ENOTSUP,
5591                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5592                                                   actions,
5593                                                   "action not supported");
5594                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5595                         return rte_flow_error_set(error, ENOTSUP,
5596                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5597                                                   actions, "too many actions");
5598                 switch (type) {
5599                 case RTE_FLOW_ACTION_TYPE_VOID:
5600                         break;
5601                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5602                         ret = flow_dv_validate_action_port_id(dev,
5603                                                               action_flags,
5604                                                               actions,
5605                                                               attr,
5606                                                               error);
5607                         if (ret)
5608                                 return ret;
5609                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5610                         ++actions_n;
5611                         break;
5612                 case RTE_FLOW_ACTION_TYPE_FLAG:
5613                         ret = flow_dv_validate_action_flag(dev, action_flags,
5614                                                            attr, error);
5615                         if (ret < 0)
5616                                 return ret;
5617                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5618                                 /* Count all modify-header actions as one. */
5619                                 if (!(action_flags &
5620                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5621                                         ++actions_n;
5622                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5623                                                 MLX5_FLOW_ACTION_MARK_EXT;
5624                         } else {
5625                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5626                                 ++actions_n;
5627                         }
5628                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5629                         break;
5630                 case RTE_FLOW_ACTION_TYPE_MARK:
5631                         ret = flow_dv_validate_action_mark(dev, actions,
5632                                                            action_flags,
5633                                                            attr, error);
5634                         if (ret < 0)
5635                                 return ret;
5636                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5637                                 /* Count all modify-header actions as one. */
5638                                 if (!(action_flags &
5639                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5640                                         ++actions_n;
5641                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5642                                                 MLX5_FLOW_ACTION_MARK_EXT;
5643                         } else {
5644                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5645                                 ++actions_n;
5646                         }
5647                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5648                         break;
5649                 case RTE_FLOW_ACTION_TYPE_SET_META:
5650                         ret = flow_dv_validate_action_set_meta(dev, actions,
5651                                                                action_flags,
5652                                                                attr, error);
5653                         if (ret < 0)
5654                                 return ret;
5655                         /* Count all modify-header actions as one action. */
5656                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5657                                 ++actions_n;
5658                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5659                         rw_act_num += MLX5_ACT_NUM_SET_META;
5660                         break;
5661                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5662                         ret = flow_dv_validate_action_set_tag(dev, actions,
5663                                                               action_flags,
5664                                                               attr, error);
5665                         if (ret < 0)
5666                                 return ret;
5667                         /* Count all modify-header actions as one action. */
5668                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5669                                 ++actions_n;
5670                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5671                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5672                         break;
5673                 case RTE_FLOW_ACTION_TYPE_DROP:
5674                         ret = mlx5_flow_validate_action_drop(action_flags,
5675                                                              attr, error);
5676                         if (ret < 0)
5677                                 return ret;
5678                         action_flags |= MLX5_FLOW_ACTION_DROP;
5679                         ++actions_n;
5680                         break;
5681                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5682                         ret = mlx5_flow_validate_action_queue(actions,
5683                                                               action_flags, dev,
5684                                                               attr, error);
5685                         if (ret < 0)
5686                                 return ret;
5687                         queue_index = ((const struct rte_flow_action_queue *)
5688                                                         (actions->conf))->index;
5689                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5690                         ++actions_n;
5691                         break;
5692                 case RTE_FLOW_ACTION_TYPE_RSS:
5693                         rss = actions->conf;
5694                         ret = mlx5_flow_validate_action_rss(actions,
5695                                                             action_flags, dev,
5696                                                             attr, item_flags,
5697                                                             error);
5698                         if (ret < 0)
5699                                 return ret;
5700                         if (rss != NULL && rss->queue_num)
5701                                 queue_index = rss->queue[0];
5702                         action_flags |= MLX5_FLOW_ACTION_RSS;
5703                         ++actions_n;
5704                         break;
5705                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5706                         ret =
5707                         mlx5_flow_validate_action_default_miss(action_flags,
5708                                         attr, error);
5709                         if (ret < 0)
5710                                 return ret;
5711                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5712                         ++actions_n;
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_COUNT:
5715                         ret = flow_dv_validate_action_count(dev, error);
5716                         if (ret < 0)
5717                                 return ret;
5718                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5719                         ++actions_n;
5720                         break;
5721                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5722                         if (flow_dv_validate_action_pop_vlan(dev,
5723                                                              action_flags,
5724                                                              actions,
5725                                                              item_flags, attr,
5726                                                              error))
5727                                 return -rte_errno;
5728                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5729                         ++actions_n;
5730                         break;
5731                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5732                         ret = flow_dv_validate_action_push_vlan(dev,
5733                                                                 action_flags,
5734                                                                 vlan_m,
5735                                                                 actions, attr,
5736                                                                 error);
5737                         if (ret < 0)
5738                                 return ret;
5739                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5740                         ++actions_n;
5741                         break;
5742                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5743                         ret = flow_dv_validate_action_set_vlan_pcp
5744                                                 (action_flags, actions, error);
5745                         if (ret < 0)
5746                                 return ret;
5747                         /* Count PCP with push_vlan command. */
5748                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5749                         break;
5750                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5751                         ret = flow_dv_validate_action_set_vlan_vid
5752                                                 (item_flags, action_flags,
5753                                                  actions, error);
5754                         if (ret < 0)
5755                                 return ret;
5756                         /* Count VID with push_vlan command. */
5757                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5758                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5759                         break;
5760                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5761                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5762                         ret = flow_dv_validate_action_l2_encap(dev,
5763                                                                action_flags,
5764                                                                actions, attr,
5765                                                                error);
5766                         if (ret < 0)
5767                                 return ret;
5768                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5769                         ++actions_n;
5770                         break;
5771                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5772                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5773                         ret = flow_dv_validate_action_decap(dev, action_flags,
5774                                                             attr, error);
5775                         if (ret < 0)
5776                                 return ret;
5777                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5778                         ++actions_n;
5779                         break;
5780                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5781                         ret = flow_dv_validate_action_raw_encap_decap
5782                                 (dev, NULL, actions->conf, attr, &action_flags,
5783                                  &actions_n, error);
5784                         if (ret < 0)
5785                                 return ret;
5786                         break;
5787                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5788                         decap = actions->conf;
5789                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5790                                 ;
5791                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5792                                 encap = NULL;
5793                                 actions--;
5794                         } else {
5795                                 encap = actions->conf;
5796                         }
5797                         ret = flow_dv_validate_action_raw_encap_decap
5798                                            (dev,
5799                                             decap ? decap : &empty_decap, encap,
5800                                             attr, &action_flags, &actions_n,
5801                                             error);
5802                         if (ret < 0)
5803                                 return ret;
5804                         break;
5805                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5806                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5807                         ret = flow_dv_validate_action_modify_mac(action_flags,
5808                                                                  actions,
5809                                                                  item_flags,
5810                                                                  error);
5811                         if (ret < 0)
5812                                 return ret;
5813                         /* Count all modify-header actions as one action. */
5814                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5815                                 ++actions_n;
5816                         action_flags |= actions->type ==
5817                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5818                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5819                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5820                         /*
5821                          * Even if the source and destination MAC addresses have
5822                          * overlap in the header with 4B alignment, the convert
5823                          * function will handle them separately and 4 SW actions
5824                          * will be created. And 2 actions will be added each
5825                          * time no matter how many bytes of address will be set.
5826                          */
5827                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5828                         break;
5829                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5830                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5831                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5832                                                                   actions,
5833                                                                   item_flags,
5834                                                                   error);
5835                         if (ret < 0)
5836                                 return ret;
5837                         /* Count all modify-header actions as one action. */
5838                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5839                                 ++actions_n;
5840                         action_flags |= actions->type ==
5841                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5842                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5843                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5844                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5845                         break;
5846                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5847                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5848                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5849                                                                   actions,
5850                                                                   item_flags,
5851                                                                   error);
5852                         if (ret < 0)
5853                                 return ret;
5854                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5855                                 return rte_flow_error_set(error, ENOTSUP,
5856                                         RTE_FLOW_ERROR_TYPE_ACTION,
5857                                         actions,
5858                                         "Can't change header "
5859                                         "with ICMPv6 proto");
5860                         /* Count all modify-header actions as one action. */
5861                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5862                                 ++actions_n;
5863                         action_flags |= actions->type ==
5864                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5865                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5866                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5867                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5868                         break;
5869                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5870                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5871                         ret = flow_dv_validate_action_modify_tp(action_flags,
5872                                                                 actions,
5873                                                                 item_flags,
5874                                                                 error);
5875                         if (ret < 0)
5876                                 return ret;
5877                         /* Count all modify-header actions as one action. */
5878                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5879                                 ++actions_n;
5880                         action_flags |= actions->type ==
5881                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5882                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5883                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5884                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5885                         break;
5886                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5887                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5888                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5889                                                                  actions,
5890                                                                  item_flags,
5891                                                                  error);
5892                         if (ret < 0)
5893                                 return ret;
5894                         /* Count all modify-header actions as one action. */
5895                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5896                                 ++actions_n;
5897                         action_flags |= actions->type ==
5898                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5899                                                 MLX5_FLOW_ACTION_SET_TTL :
5900                                                 MLX5_FLOW_ACTION_DEC_TTL;
5901                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5902                         break;
5903                 case RTE_FLOW_ACTION_TYPE_JUMP:
5904                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5905                                                            action_flags,
5906                                                            attr, external,
5907                                                            error);
5908                         if (ret)
5909                                 return ret;
5910                         ++actions_n;
5911                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5912                         break;
5913                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5914                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5915                         ret = flow_dv_validate_action_modify_tcp_seq
5916                                                                 (action_flags,
5917                                                                  actions,
5918                                                                  item_flags,
5919                                                                  error);
5920                         if (ret < 0)
5921                                 return ret;
5922                         /* Count all modify-header actions as one action. */
5923                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5924                                 ++actions_n;
5925                         action_flags |= actions->type ==
5926                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5927                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5928                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5929                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5930                         break;
5931                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5932                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5933                         ret = flow_dv_validate_action_modify_tcp_ack
5934                                                                 (action_flags,
5935                                                                  actions,
5936                                                                  item_flags,
5937                                                                  error);
5938                         if (ret < 0)
5939                                 return ret;
5940                         /* Count all modify-header actions as one action. */
5941                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5942                                 ++actions_n;
5943                         action_flags |= actions->type ==
5944                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5945                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5946                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5947                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5948                         break;
5949                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5950                         break;
5951                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5952                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5953                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5954                         break;
5955                 case RTE_FLOW_ACTION_TYPE_METER:
5956                         ret = mlx5_flow_validate_action_meter(dev,
5957                                                               action_flags,
5958                                                               actions, attr,
5959                                                               error);
5960                         if (ret < 0)
5961                                 return ret;
5962                         action_flags |= MLX5_FLOW_ACTION_METER;
5963                         ++actions_n;
5964                         /* Meter action will add one more TAG action. */
5965                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5966                         break;
5967                 case RTE_FLOW_ACTION_TYPE_AGE:
5968                         ret = flow_dv_validate_action_age(action_flags,
5969                                                           actions, dev,
5970                                                           error);
5971                         if (ret < 0)
5972                                 return ret;
5973                         action_flags |= MLX5_FLOW_ACTION_AGE;
5974                         ++actions_n;
5975                         break;
5976                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5977                         ret = flow_dv_validate_action_modify_ipv4_dscp
5978                                                          (action_flags,
5979                                                           actions,
5980                                                           item_flags,
5981                                                           error);
5982                         if (ret < 0)
5983                                 return ret;
5984                         /* Count all modify-header actions as one action. */
5985                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5986                                 ++actions_n;
5987                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5988                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5989                         break;
5990                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5991                         ret = flow_dv_validate_action_modify_ipv6_dscp
5992                                                                 (action_flags,
5993                                                                  actions,
5994                                                                  item_flags,
5995                                                                  error);
5996                         if (ret < 0)
5997                                 return ret;
5998                         /* Count all modify-header actions as one action. */
5999                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6000                                 ++actions_n;
6001                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6002                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6003                         break;
6004                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6005                         ret = flow_dv_validate_action_sample(action_flags,
6006                                                              actions, dev,
6007                                                              attr, error);
6008                         if (ret < 0)
6009                                 return ret;
6010                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6011                         ++actions_n;
6012                         break;
6013                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6014                         if (actions[0].type != (typeof(actions[0].type))
6015                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6016                                 return rte_flow_error_set
6017                                                 (error, EINVAL,
6018                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6019                                                 NULL, "MLX5 private action "
6020                                                 "must be the first");
6021
6022                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6023                         break;
6024                 default:
6025                         return rte_flow_error_set(error, ENOTSUP,
6026                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6027                                                   actions,
6028                                                   "action not supported");
6029                 }
6030         }
6031         /*
6032          * Validate actions in flow rules
6033          * - Explicit decap action is prohibited by the tunnel offload API.
6034          * - Drop action in tunnel steer rule is prohibited by the API.
6035          * - Application cannot use MARK action because it's value can mask
6036          *   tunnel default miss nitification.
6037          * - JUMP in tunnel match rule has no support in current PMD
6038          *   implementation.
6039          * - TAG & META are reserved for future uses.
6040          */
6041         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6042                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6043                                             MLX5_FLOW_ACTION_MARK     |
6044                                             MLX5_FLOW_ACTION_SET_TAG  |
6045                                             MLX5_FLOW_ACTION_SET_META |
6046                                             MLX5_FLOW_ACTION_DROP;
6047
6048                 if (action_flags & bad_actions_mask)
6049                         return rte_flow_error_set
6050                                         (error, EINVAL,
6051                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6052                                         "Invalid RTE action in tunnel "
6053                                         "set decap rule");
6054                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6055                         return rte_flow_error_set
6056                                         (error, EINVAL,
6057                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6058                                         "tunnel set decap rule must terminate "
6059                                         "with JUMP");
6060                 if (!attr->ingress)
6061                         return rte_flow_error_set
6062                                         (error, EINVAL,
6063                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6064                                         "tunnel flows for ingress traffic only");
6065         }
6066         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6067                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6068                                             MLX5_FLOW_ACTION_MARK    |
6069                                             MLX5_FLOW_ACTION_SET_TAG |
6070                                             MLX5_FLOW_ACTION_SET_META;
6071
6072                 if (action_flags & bad_actions_mask)
6073                         return rte_flow_error_set
6074                                         (error, EINVAL,
6075                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6076                                         "Invalid RTE action in tunnel "
6077                                         "set match rule");
6078         }
6079         /*
6080          * Validate the drop action mutual exclusion with other actions.
6081          * Drop action is mutually-exclusive with any other action, except for
6082          * Count action.
6083          */
6084         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6085             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6086                 return rte_flow_error_set(error, EINVAL,
6087                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6088                                           "Drop action is mutually-exclusive "
6089                                           "with any other action, except for "
6090                                           "Count action");
6091         /* Eswitch has few restrictions on using items and actions */
6092         if (attr->transfer) {
6093                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6094                     action_flags & MLX5_FLOW_ACTION_FLAG)
6095                         return rte_flow_error_set(error, ENOTSUP,
6096                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6097                                                   NULL,
6098                                                   "unsupported action FLAG");
6099                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6100                     action_flags & MLX5_FLOW_ACTION_MARK)
6101                         return rte_flow_error_set(error, ENOTSUP,
6102                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6103                                                   NULL,
6104                                                   "unsupported action MARK");
6105                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6106                         return rte_flow_error_set(error, ENOTSUP,
6107                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6108                                                   NULL,
6109                                                   "unsupported action QUEUE");
6110                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6111                         return rte_flow_error_set(error, ENOTSUP,
6112                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6113                                                   NULL,
6114                                                   "unsupported action RSS");
6115                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6116                         return rte_flow_error_set(error, EINVAL,
6117                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6118                                                   actions,
6119                                                   "no fate action is found");
6120         } else {
6121                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6122                         return rte_flow_error_set(error, EINVAL,
6123                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6124                                                   actions,
6125                                                   "no fate action is found");
6126         }
6127         /*
6128          * Continue validation for Xcap and VLAN actions.
6129          * If hairpin is working in explicit TX rule mode, there is no actions
6130          * splitting and the validation of hairpin ingress flow should be the
6131          * same as other standard flows.
6132          */
6133         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6134                              MLX5_FLOW_VLAN_ACTIONS)) &&
6135             (queue_index == 0xFFFF ||
6136              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6137              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6138              conf->tx_explicit != 0))) {
6139                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6140                     MLX5_FLOW_XCAP_ACTIONS)
6141                         return rte_flow_error_set(error, ENOTSUP,
6142                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6143                                                   NULL, "encap and decap "
6144                                                   "combination aren't supported");
6145                 if (!attr->transfer && attr->ingress) {
6146                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6147                                 return rte_flow_error_set
6148                                                 (error, ENOTSUP,
6149                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6150                                                  NULL, "encap is not supported"
6151                                                  " for ingress traffic");
6152                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6153                                 return rte_flow_error_set
6154                                                 (error, ENOTSUP,
6155                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6156                                                  NULL, "push VLAN action not "
6157                                                  "supported for ingress");
6158                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6159                                         MLX5_FLOW_VLAN_ACTIONS)
6160                                 return rte_flow_error_set
6161                                                 (error, ENOTSUP,
6162                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6163                                                  NULL, "no support for "
6164                                                  "multiple VLAN actions");
6165                 }
6166         }
6167         /*
6168          * Hairpin flow will add one more TAG action in TX implicit mode.
6169          * In TX explicit mode, there will be no hairpin flow ID.
6170          */
6171         if (hairpin > 0)
6172                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6173         /* extra metadata enabled: one more TAG action will be add. */
6174         if (dev_conf->dv_flow_en &&
6175             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6176             mlx5_flow_ext_mreg_supported(dev))
6177                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6178         if ((uint32_t)rw_act_num >
6179                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6180                 return rte_flow_error_set(error, ENOTSUP,
6181                                           RTE_FLOW_ERROR_TYPE_ACTION,
6182                                           NULL, "too many header modify"
6183                                           " actions to support");
6184         }
6185         return 0;
6186 }
6187
6188 /**
6189  * Internal preparation function. Allocates the DV flow size,
6190  * this size is constant.
6191  *
6192  * @param[in] dev
6193  *   Pointer to the rte_eth_dev structure.
6194  * @param[in] attr
6195  *   Pointer to the flow attributes.
6196  * @param[in] items
6197  *   Pointer to the list of items.
6198  * @param[in] actions
6199  *   Pointer to the list of actions.
6200  * @param[out] error
6201  *   Pointer to the error structure.
6202  *
6203  * @return
6204  *   Pointer to mlx5_flow object on success,
6205  *   otherwise NULL and rte_errno is set.
6206  */
6207 static struct mlx5_flow *
6208 flow_dv_prepare(struct rte_eth_dev *dev,
6209                 const struct rte_flow_attr *attr __rte_unused,
6210                 const struct rte_flow_item items[] __rte_unused,
6211                 const struct rte_flow_action actions[] __rte_unused,
6212                 struct rte_flow_error *error)
6213 {
6214         uint32_t handle_idx = 0;
6215         struct mlx5_flow *dev_flow;
6216         struct mlx5_flow_handle *dev_handle;
6217         struct mlx5_priv *priv = dev->data->dev_private;
6218         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6219
6220         MLX5_ASSERT(wks);
6221         /* In case of corrupting the memory. */
6222         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6223                 rte_flow_error_set(error, ENOSPC,
6224                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6225                                    "not free temporary device flow");
6226                 return NULL;
6227         }
6228         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6229                                    &handle_idx);
6230         if (!dev_handle) {
6231                 rte_flow_error_set(error, ENOMEM,
6232                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6233                                    "not enough memory to create flow handle");
6234                 return NULL;
6235         }
6236         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6237         dev_flow = &wks->flows[wks->flow_idx++];
6238         dev_flow->handle = dev_handle;
6239         dev_flow->handle_idx = handle_idx;
6240         /*
6241          * In some old rdma-core releases, before continuing, a check of the
6242          * length of matching parameter will be done at first. It needs to use
6243          * the length without misc4 param. If the flow has misc4 support, then
6244          * the length needs to be adjusted accordingly. Each param member is
6245          * aligned with a 64B boundary naturally.
6246          */
6247         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6248                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6249         /*
6250          * The matching value needs to be cleared to 0 before using. In the
6251          * past, it will be automatically cleared when using rte_*alloc
6252          * API. The time consumption will be almost the same as before.
6253          */
6254         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6255         dev_flow->ingress = attr->ingress;
6256         dev_flow->dv.transfer = attr->transfer;
6257         return dev_flow;
6258 }
6259
6260 #ifdef RTE_LIBRTE_MLX5_DEBUG
6261 /**
6262  * Sanity check for match mask and value. Similar to check_valid_spec() in
6263  * kernel driver. If unmasked bit is present in value, it returns failure.
6264  *
6265  * @param match_mask
6266  *   pointer to match mask buffer.
6267  * @param match_value
6268  *   pointer to match value buffer.
6269  *
6270  * @return
6271  *   0 if valid, -EINVAL otherwise.
6272  */
6273 static int
6274 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6275 {
6276         uint8_t *m = match_mask;
6277         uint8_t *v = match_value;
6278         unsigned int i;
6279
6280         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6281                 if (v[i] & ~m[i]) {
6282                         DRV_LOG(ERR,
6283                                 "match_value differs from match_criteria"
6284                                 " %p[%u] != %p[%u]",
6285                                 match_value, i, match_mask, i);
6286                         return -EINVAL;
6287                 }
6288         }
6289         return 0;
6290 }
6291 #endif
6292
6293 /**
6294  * Add match of ip_version.
6295  *
6296  * @param[in] group
6297  *   Flow group.
6298  * @param[in] headers_v
6299  *   Values header pointer.
6300  * @param[in] headers_m
6301  *   Masks header pointer.
6302  * @param[in] ip_version
6303  *   The IP version to set.
6304  */
6305 static inline void
6306 flow_dv_set_match_ip_version(uint32_t group,
6307                              void *headers_v,
6308                              void *headers_m,
6309                              uint8_t ip_version)
6310 {
6311         if (group == 0)
6312                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6313         else
6314                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6315                          ip_version);
6316         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6317         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6318         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6319 }
6320
6321 /**
6322  * Add Ethernet item to matcher and to the value.
6323  *
6324  * @param[in, out] matcher
6325  *   Flow matcher.
6326  * @param[in, out] key
6327  *   Flow matcher value.
6328  * @param[in] item
6329  *   Flow pattern to translate.
6330  * @param[in] inner
6331  *   Item is inner pattern.
6332  */
6333 static void
6334 flow_dv_translate_item_eth(void *matcher, void *key,
6335                            const struct rte_flow_item *item, int inner,
6336                            uint32_t group)
6337 {
6338         const struct rte_flow_item_eth *eth_m = item->mask;
6339         const struct rte_flow_item_eth *eth_v = item->spec;
6340         const struct rte_flow_item_eth nic_mask = {
6341                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6342                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6343                 .type = RTE_BE16(0xffff),
6344                 .has_vlan = 0,
6345         };
6346         void *hdrs_m;
6347         void *hdrs_v;
6348         char *l24_v;
6349         unsigned int i;
6350
6351         if (!eth_v)
6352                 return;
6353         if (!eth_m)
6354                 eth_m = &nic_mask;
6355         if (inner) {
6356                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6357                                          inner_headers);
6358                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6359         } else {
6360                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6361                                          outer_headers);
6362                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6363         }
6364         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6365                &eth_m->dst, sizeof(eth_m->dst));
6366         /* The value must be in the range of the mask. */
6367         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6368         for (i = 0; i < sizeof(eth_m->dst); ++i)
6369                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6370         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6371                &eth_m->src, sizeof(eth_m->src));
6372         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6373         /* The value must be in the range of the mask. */
6374         for (i = 0; i < sizeof(eth_m->dst); ++i)
6375                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6376         /*
6377          * HW supports match on one Ethertype, the Ethertype following the last
6378          * VLAN tag of the packet (see PRM).
6379          * Set match on ethertype only if ETH header is not followed by VLAN.
6380          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6381          * ethertype, and use ip_version field instead.
6382          * eCPRI over Ether layer will use type value 0xAEFE.
6383          */
6384         if (eth_m->type == 0xFFFF) {
6385                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6386                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6387                 switch (eth_v->type) {
6388                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6389                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6390                         return;
6391                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6392                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6393                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6394                         return;
6395                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6396                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6397                         return;
6398                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6399                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6400                         return;
6401                 default:
6402                         break;
6403                 }
6404         }
6405         if (eth_m->has_vlan) {
6406                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6407                 if (eth_v->has_vlan) {
6408                         /*
6409                          * Here, when also has_more_vlan field in VLAN item is
6410                          * not set, only single-tagged packets will be matched.
6411                          */
6412                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6413                         return;
6414                 }
6415         }
6416         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6417                  rte_be_to_cpu_16(eth_m->type));
6418         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6419         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6420 }
6421
6422 /**
6423  * Add VLAN item to matcher and to the value.
6424  *
6425  * @param[in, out] dev_flow
6426  *   Flow descriptor.
6427  * @param[in, out] matcher
6428  *   Flow matcher.
6429  * @param[in, out] key
6430  *   Flow matcher value.
6431  * @param[in] item
6432  *   Flow pattern to translate.
6433  * @param[in] inner
6434  *   Item is inner pattern.
6435  */
6436 static void
6437 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6438                             void *matcher, void *key,
6439                             const struct rte_flow_item *item,
6440                             int inner, uint32_t group)
6441 {
6442         const struct rte_flow_item_vlan *vlan_m = item->mask;
6443         const struct rte_flow_item_vlan *vlan_v = item->spec;
6444         void *hdrs_m;
6445         void *hdrs_v;
6446         uint16_t tci_m;
6447         uint16_t tci_v;
6448
6449         if (inner) {
6450                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6451                                          inner_headers);
6452                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6453         } else {
6454                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6455                                          outer_headers);
6456                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6457                 /*
6458                  * This is workaround, masks are not supported,
6459                  * and pre-validated.
6460                  */
6461                 if (vlan_v)
6462                         dev_flow->handle->vf_vlan.tag =
6463                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6464         }
6465         /*
6466          * When VLAN item exists in flow, mark packet as tagged,
6467          * even if TCI is not specified.
6468          */
6469         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6470                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6471                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6472         }
6473         if (!vlan_v)
6474                 return;
6475         if (!vlan_m)
6476                 vlan_m = &rte_flow_item_vlan_mask;
6477         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6478         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6479         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6480         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6481         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6482         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6483         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6484         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6485         /*
6486          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6487          * ethertype, and use ip_version field instead.
6488          */
6489         if (vlan_m->inner_type == 0xFFFF) {
6490                 switch (vlan_v->inner_type) {
6491                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6492                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6493                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6494                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6495                         return;
6496                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6497                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6498                         return;
6499                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6500                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6501                         return;
6502                 default:
6503                         break;
6504                 }
6505         }
6506         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6507                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6508                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6509                 /* Only one vlan_tag bit can be set. */
6510                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6511                 return;
6512         }
6513         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6514                  rte_be_to_cpu_16(vlan_m->inner_type));
6515         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6516                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6517 }
6518
6519 /**
6520  * Add IPV4 item to matcher and to the value.
6521  *
6522  * @param[in, out] matcher
6523  *   Flow matcher.
6524  * @param[in, out] key
6525  *   Flow matcher value.
6526  * @param[in] item
6527  *   Flow pattern to translate.
6528  * @param[in] inner
6529  *   Item is inner pattern.
6530  * @param[in] group
6531  *   The group to insert the rule.
6532  */
6533 static void
6534 flow_dv_translate_item_ipv4(void *matcher, void *key,
6535                             const struct rte_flow_item *item,
6536                             int inner, uint32_t group)
6537 {
6538         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6539         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6540         const struct rte_flow_item_ipv4 nic_mask = {
6541                 .hdr = {
6542                         .src_addr = RTE_BE32(0xffffffff),
6543                         .dst_addr = RTE_BE32(0xffffffff),
6544                         .type_of_service = 0xff,
6545                         .next_proto_id = 0xff,
6546                         .time_to_live = 0xff,
6547                 },
6548         };
6549         void *headers_m;
6550         void *headers_v;
6551         char *l24_m;
6552         char *l24_v;
6553         uint8_t tos;
6554
6555         if (inner) {
6556                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6557                                          inner_headers);
6558                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6559         } else {
6560                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6561                                          outer_headers);
6562                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6563         }
6564         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6565         if (!ipv4_v)
6566                 return;
6567         if (!ipv4_m)
6568                 ipv4_m = &nic_mask;
6569         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6570                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6571         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6572                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6573         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6574         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6575         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6576                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6577         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6578                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6579         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6580         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6581         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6582         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6583                  ipv4_m->hdr.type_of_service);
6584         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6585         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6586                  ipv4_m->hdr.type_of_service >> 2);
6587         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6588         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6589                  ipv4_m->hdr.next_proto_id);
6590         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6591                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6592         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6593                  ipv4_m->hdr.time_to_live);
6594         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6595                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6596         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6597                  !!(ipv4_m->hdr.fragment_offset));
6598         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6599                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6600 }
6601
6602 /**
6603  * Add IPV6 item to matcher and to the value.
6604  *
6605  * @param[in, out] matcher
6606  *   Flow matcher.
6607  * @param[in, out] key
6608  *   Flow matcher value.
6609  * @param[in] item
6610  *   Flow pattern to translate.
6611  * @param[in] inner
6612  *   Item is inner pattern.
6613  * @param[in] group
6614  *   The group to insert the rule.
6615  */
6616 static void
6617 flow_dv_translate_item_ipv6(void *matcher, void *key,
6618                             const struct rte_flow_item *item,
6619                             int inner, uint32_t group)
6620 {
6621         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6622         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6623         const struct rte_flow_item_ipv6 nic_mask = {
6624                 .hdr = {
6625                         .src_addr =
6626                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6627                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6628                         .dst_addr =
6629                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6630                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6631                         .vtc_flow = RTE_BE32(0xffffffff),
6632                         .proto = 0xff,
6633                         .hop_limits = 0xff,
6634                 },
6635         };
6636         void *headers_m;
6637         void *headers_v;
6638         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6639         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6640         char *l24_m;
6641         char *l24_v;
6642         uint32_t vtc_m;
6643         uint32_t vtc_v;
6644         int i;
6645         int size;
6646
6647         if (inner) {
6648                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6649                                          inner_headers);
6650                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6651         } else {
6652                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6653                                          outer_headers);
6654                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6655         }
6656         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6657         if (!ipv6_v)
6658                 return;
6659         if (!ipv6_m)
6660                 ipv6_m = &nic_mask;
6661         size = sizeof(ipv6_m->hdr.dst_addr);
6662         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6663                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6664         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6665                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6666         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6667         for (i = 0; i < size; ++i)
6668                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6669         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6670                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6671         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6672                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6673         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6674         for (i = 0; i < size; ++i)
6675                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6676         /* TOS. */
6677         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6678         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6679         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6680         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6681         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6682         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6683         /* Label. */
6684         if (inner) {
6685                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6686                          vtc_m);
6687                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6688                          vtc_v);
6689         } else {
6690                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6691                          vtc_m);
6692                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6693                          vtc_v);
6694         }
6695         /* Protocol. */
6696         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6697                  ipv6_m->hdr.proto);
6698         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6699                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6700         /* Hop limit. */
6701         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6702                  ipv6_m->hdr.hop_limits);
6703         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6704                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6706                  !!(ipv6_m->has_frag_ext));
6707         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6708                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6709 }
6710
6711 /**
6712  * Add IPV6 fragment extension item to matcher and to the value.
6713  *
6714  * @param[in, out] matcher
6715  *   Flow matcher.
6716  * @param[in, out] key
6717  *   Flow matcher value.
6718  * @param[in] item
6719  *   Flow pattern to translate.
6720  * @param[in] inner
6721  *   Item is inner pattern.
6722  */
6723 static void
6724 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6725                                      const struct rte_flow_item *item,
6726                                      int inner)
6727 {
6728         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6729         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6730         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6731                 .hdr = {
6732                         .next_header = 0xff,
6733                         .frag_data = RTE_BE16(0xffff),
6734                 },
6735         };
6736         void *headers_m;
6737         void *headers_v;
6738
6739         if (inner) {
6740                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6741                                          inner_headers);
6742                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6743         } else {
6744                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6745                                          outer_headers);
6746                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6747         }
6748         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6749         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6750         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6751         if (!ipv6_frag_ext_v)
6752                 return;
6753         if (!ipv6_frag_ext_m)
6754                 ipv6_frag_ext_m = &nic_mask;
6755         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6756                  ipv6_frag_ext_m->hdr.next_header);
6757         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6758                  ipv6_frag_ext_v->hdr.next_header &
6759                  ipv6_frag_ext_m->hdr.next_header);
6760 }
6761
6762 /**
6763  * Add TCP item to matcher and to the value.
6764  *
6765  * @param[in, out] matcher
6766  *   Flow matcher.
6767  * @param[in, out] key
6768  *   Flow matcher value.
6769  * @param[in] item
6770  *   Flow pattern to translate.
6771  * @param[in] inner
6772  *   Item is inner pattern.
6773  */
6774 static void
6775 flow_dv_translate_item_tcp(void *matcher, void *key,
6776                            const struct rte_flow_item *item,
6777                            int inner)
6778 {
6779         const struct rte_flow_item_tcp *tcp_m = item->mask;
6780         const struct rte_flow_item_tcp *tcp_v = item->spec;
6781         void *headers_m;
6782         void *headers_v;
6783
6784         if (inner) {
6785                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6786                                          inner_headers);
6787                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6788         } else {
6789                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6790                                          outer_headers);
6791                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6792         }
6793         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6794         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6795         if (!tcp_v)
6796                 return;
6797         if (!tcp_m)
6798                 tcp_m = &rte_flow_item_tcp_mask;
6799         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6800                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6801         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6802                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6803         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6804                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6805         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6806                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6807         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6808                  tcp_m->hdr.tcp_flags);
6809         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6810                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6811 }
6812
6813 /**
6814  * Add UDP item to matcher and to the value.
6815  *
6816  * @param[in, out] matcher
6817  *   Flow matcher.
6818  * @param[in, out] key
6819  *   Flow matcher value.
6820  * @param[in] item
6821  *   Flow pattern to translate.
6822  * @param[in] inner
6823  *   Item is inner pattern.
6824  */
6825 static void
6826 flow_dv_translate_item_udp(void *matcher, void *key,
6827                            const struct rte_flow_item *item,
6828                            int inner)
6829 {
6830         const struct rte_flow_item_udp *udp_m = item->mask;
6831         const struct rte_flow_item_udp *udp_v = item->spec;
6832         void *headers_m;
6833         void *headers_v;
6834
6835         if (inner) {
6836                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6837                                          inner_headers);
6838                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6839         } else {
6840                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6841                                          outer_headers);
6842                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6843         }
6844         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6845         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6846         if (!udp_v)
6847                 return;
6848         if (!udp_m)
6849                 udp_m = &rte_flow_item_udp_mask;
6850         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6851                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6852         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6853                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6854         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6855                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6856         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6857                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6858 }
6859
6860 /**
6861  * Add GRE optional Key item to matcher and to the value.
6862  *
6863  * @param[in, out] matcher
6864  *   Flow matcher.
6865  * @param[in, out] key
6866  *   Flow matcher value.
6867  * @param[in] item
6868  *   Flow pattern to translate.
6869  * @param[in] inner
6870  *   Item is inner pattern.
6871  */
6872 static void
6873 flow_dv_translate_item_gre_key(void *matcher, void *key,
6874                                    const struct rte_flow_item *item)
6875 {
6876         const rte_be32_t *key_m = item->mask;
6877         const rte_be32_t *key_v = item->spec;
6878         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6879         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6880         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6881
6882         /* GRE K bit must be on and should already be validated */
6883         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6884         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6885         if (!key_v)
6886                 return;
6887         if (!key_m)
6888                 key_m = &gre_key_default_mask;
6889         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6890                  rte_be_to_cpu_32(*key_m) >> 8);
6891         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6892                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6893         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6894                  rte_be_to_cpu_32(*key_m) & 0xFF);
6895         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6896                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6897 }
6898
6899 /**
6900  * Add GRE item to matcher and to the value.
6901  *
6902  * @param[in, out] matcher
6903  *   Flow matcher.
6904  * @param[in, out] key
6905  *   Flow matcher value.
6906  * @param[in] item
6907  *   Flow pattern to translate.
6908  * @param[in] inner
6909  *   Item is inner pattern.
6910  */
6911 static void
6912 flow_dv_translate_item_gre(void *matcher, void *key,
6913                            const struct rte_flow_item *item,
6914                            int inner)
6915 {
6916         const struct rte_flow_item_gre *gre_m = item->mask;
6917         const struct rte_flow_item_gre *gre_v = item->spec;
6918         void *headers_m;
6919         void *headers_v;
6920         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6921         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6922         struct {
6923                 union {
6924                         __extension__
6925                         struct {
6926                                 uint16_t version:3;
6927                                 uint16_t rsvd0:9;
6928                                 uint16_t s_present:1;
6929                                 uint16_t k_present:1;
6930                                 uint16_t rsvd_bit1:1;
6931                                 uint16_t c_present:1;
6932                         };
6933                         uint16_t value;
6934                 };
6935         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6936
6937         if (inner) {
6938                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6939                                          inner_headers);
6940                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6941         } else {
6942                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6943                                          outer_headers);
6944                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6945         }
6946         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6947         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6948         if (!gre_v)
6949                 return;
6950         if (!gre_m)
6951                 gre_m = &rte_flow_item_gre_mask;
6952         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6953                  rte_be_to_cpu_16(gre_m->protocol));
6954         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6955                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6956         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6957         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6958         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6959                  gre_crks_rsvd0_ver_m.c_present);
6960         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6961                  gre_crks_rsvd0_ver_v.c_present &
6962                  gre_crks_rsvd0_ver_m.c_present);
6963         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6964                  gre_crks_rsvd0_ver_m.k_present);
6965         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6966                  gre_crks_rsvd0_ver_v.k_present &
6967                  gre_crks_rsvd0_ver_m.k_present);
6968         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6969                  gre_crks_rsvd0_ver_m.s_present);
6970         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6971                  gre_crks_rsvd0_ver_v.s_present &
6972                  gre_crks_rsvd0_ver_m.s_present);
6973 }
6974
6975 /**
6976  * Add NVGRE item to matcher and to the value.
6977  *
6978  * @param[in, out] matcher
6979  *   Flow matcher.
6980  * @param[in, out] key
6981  *   Flow matcher value.
6982  * @param[in] item
6983  *   Flow pattern to translate.
6984  * @param[in] inner
6985  *   Item is inner pattern.
6986  */
6987 static void
6988 flow_dv_translate_item_nvgre(void *matcher, void *key,
6989                              const struct rte_flow_item *item,
6990                              int inner)
6991 {
6992         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6993         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6994         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6995         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6996         const char *tni_flow_id_m;
6997         const char *tni_flow_id_v;
6998         char *gre_key_m;
6999         char *gre_key_v;
7000         int size;
7001         int i;
7002
7003         /* For NVGRE, GRE header fields must be set with defined values. */
7004         const struct rte_flow_item_gre gre_spec = {
7005                 .c_rsvd0_ver = RTE_BE16(0x2000),
7006                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7007         };
7008         const struct rte_flow_item_gre gre_mask = {
7009                 .c_rsvd0_ver = RTE_BE16(0xB000),
7010                 .protocol = RTE_BE16(UINT16_MAX),
7011         };
7012         const struct rte_flow_item gre_item = {
7013                 .spec = &gre_spec,
7014                 .mask = &gre_mask,
7015                 .last = NULL,
7016         };
7017         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7018         if (!nvgre_v)
7019                 return;
7020         if (!nvgre_m)
7021                 nvgre_m = &rte_flow_item_nvgre_mask;
7022         tni_flow_id_m = (const char *)nvgre_m->tni;
7023         tni_flow_id_v = (const char *)nvgre_v->tni;
7024         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7025         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7026         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7027         memcpy(gre_key_m, tni_flow_id_m, size);
7028         for (i = 0; i < size; ++i)
7029                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7030 }
7031
7032 /**
7033  * Add VXLAN item to matcher and to the value.
7034  *
7035  * @param[in, out] matcher
7036  *   Flow matcher.
7037  * @param[in, out] key
7038  *   Flow matcher value.
7039  * @param[in] item
7040  *   Flow pattern to translate.
7041  * @param[in] inner
7042  *   Item is inner pattern.
7043  */
7044 static void
7045 flow_dv_translate_item_vxlan(void *matcher, void *key,
7046                              const struct rte_flow_item *item,
7047                              int inner)
7048 {
7049         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7050         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7051         void *headers_m;
7052         void *headers_v;
7053         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7054         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7055         char *vni_m;
7056         char *vni_v;
7057         uint16_t dport;
7058         int size;
7059         int i;
7060
7061         if (inner) {
7062                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7063                                          inner_headers);
7064                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7065         } else {
7066                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7067                                          outer_headers);
7068                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7069         }
7070         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7071                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7072         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7073                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7074                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7075         }
7076         if (!vxlan_v)
7077                 return;
7078         if (!vxlan_m)
7079                 vxlan_m = &rte_flow_item_vxlan_mask;
7080         size = sizeof(vxlan_m->vni);
7081         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7082         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7083         memcpy(vni_m, vxlan_m->vni, size);
7084         for (i = 0; i < size; ++i)
7085                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7086 }
7087
7088 /**
7089  * Add VXLAN-GPE item to matcher and to the value.
7090  *
7091  * @param[in, out] matcher
7092  *   Flow matcher.
7093  * @param[in, out] key
7094  *   Flow matcher value.
7095  * @param[in] item
7096  *   Flow pattern to translate.
7097  * @param[in] inner
7098  *   Item is inner pattern.
7099  */
7100
7101 static void
7102 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7103                                  const struct rte_flow_item *item, int inner)
7104 {
7105         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7106         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7107         void *headers_m;
7108         void *headers_v;
7109         void *misc_m =
7110                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7111         void *misc_v =
7112                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7113         char *vni_m;
7114         char *vni_v;
7115         uint16_t dport;
7116         int size;
7117         int i;
7118         uint8_t flags_m = 0xff;
7119         uint8_t flags_v = 0xc;
7120
7121         if (inner) {
7122                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7123                                          inner_headers);
7124                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7125         } else {
7126                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7127                                          outer_headers);
7128                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7129         }
7130         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7131                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7132         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7133                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7134                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7135         }
7136         if (!vxlan_v)
7137                 return;
7138         if (!vxlan_m)
7139                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7140         size = sizeof(vxlan_m->vni);
7141         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7142         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7143         memcpy(vni_m, vxlan_m->vni, size);
7144         for (i = 0; i < size; ++i)
7145                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7146         if (vxlan_m->flags) {
7147                 flags_m = vxlan_m->flags;
7148                 flags_v = vxlan_v->flags;
7149         }
7150         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7151         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7152         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7153                  vxlan_m->protocol);
7154         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7155                  vxlan_v->protocol);
7156 }
7157
7158 /**
7159  * Add Geneve item to matcher and to the value.
7160  *
7161  * @param[in, out] matcher
7162  *   Flow matcher.
7163  * @param[in, out] key
7164  *   Flow matcher value.
7165  * @param[in] item
7166  *   Flow pattern to translate.
7167  * @param[in] inner
7168  *   Item is inner pattern.
7169  */
7170
7171 static void
7172 flow_dv_translate_item_geneve(void *matcher, void *key,
7173                               const struct rte_flow_item *item, int inner)
7174 {
7175         const struct rte_flow_item_geneve *geneve_m = item->mask;
7176         const struct rte_flow_item_geneve *geneve_v = item->spec;
7177         void *headers_m;
7178         void *headers_v;
7179         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7180         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7181         uint16_t dport;
7182         uint16_t gbhdr_m;
7183         uint16_t gbhdr_v;
7184         char *vni_m;
7185         char *vni_v;
7186         size_t size, i;
7187
7188         if (inner) {
7189                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7190                                          inner_headers);
7191                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7192         } else {
7193                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7194                                          outer_headers);
7195                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7196         }
7197         dport = MLX5_UDP_PORT_GENEVE;
7198         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7199                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7200                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7201         }
7202         if (!geneve_v)
7203                 return;
7204         if (!geneve_m)
7205                 geneve_m = &rte_flow_item_geneve_mask;
7206         size = sizeof(geneve_m->vni);
7207         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7208         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7209         memcpy(vni_m, geneve_m->vni, size);
7210         for (i = 0; i < size; ++i)
7211                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7212         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7213                  rte_be_to_cpu_16(geneve_m->protocol));
7214         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7215                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7216         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7217         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7218         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7219                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7220         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7221                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7222         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7223                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7224         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7225                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7226                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7227 }
7228
7229 /**
7230  * Add MPLS item to matcher and to the value.
7231  *
7232  * @param[in, out] matcher
7233  *   Flow matcher.
7234  * @param[in, out] key
7235  *   Flow matcher value.
7236  * @param[in] item
7237  *   Flow pattern to translate.
7238  * @param[in] prev_layer
7239  *   The protocol layer indicated in previous item.
7240  * @param[in] inner
7241  *   Item is inner pattern.
7242  */
7243 static void
7244 flow_dv_translate_item_mpls(void *matcher, void *key,
7245                             const struct rte_flow_item *item,
7246                             uint64_t prev_layer,
7247                             int inner)
7248 {
7249         const uint32_t *in_mpls_m = item->mask;
7250         const uint32_t *in_mpls_v = item->spec;
7251         uint32_t *out_mpls_m = 0;
7252         uint32_t *out_mpls_v = 0;
7253         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7254         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7255         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7256                                      misc_parameters_2);
7257         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7258         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7259         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7260
7261         switch (prev_layer) {
7262         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7263                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7264                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7265                          MLX5_UDP_PORT_MPLS);
7266                 break;
7267         case MLX5_FLOW_LAYER_GRE:
7268                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7269                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7270                          RTE_ETHER_TYPE_MPLS);
7271                 break;
7272         default:
7273                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7274                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7275                          IPPROTO_MPLS);
7276                 break;
7277         }
7278         if (!in_mpls_v)
7279                 return;
7280         if (!in_mpls_m)
7281                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7282         switch (prev_layer) {
7283         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7284                 out_mpls_m =
7285                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7286                                                  outer_first_mpls_over_udp);
7287                 out_mpls_v =
7288                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7289                                                  outer_first_mpls_over_udp);
7290                 break;
7291         case MLX5_FLOW_LAYER_GRE:
7292                 out_mpls_m =
7293                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7294                                                  outer_first_mpls_over_gre);
7295                 out_mpls_v =
7296                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7297                                                  outer_first_mpls_over_gre);
7298                 break;
7299         default:
7300                 /* Inner MPLS not over GRE is not supported. */
7301                 if (!inner) {
7302                         out_mpls_m =
7303                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7304                                                          misc2_m,
7305                                                          outer_first_mpls);
7306                         out_mpls_v =
7307                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7308                                                          misc2_v,
7309                                                          outer_first_mpls);
7310                 }
7311                 break;
7312         }
7313         if (out_mpls_m && out_mpls_v) {
7314                 *out_mpls_m = *in_mpls_m;
7315                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7316         }
7317 }
7318
7319 /**
7320  * Add metadata register item to matcher
7321  *
7322  * @param[in, out] matcher
7323  *   Flow matcher.
7324  * @param[in, out] key
7325  *   Flow matcher value.
7326  * @param[in] reg_type
7327  *   Type of device metadata register
7328  * @param[in] value
7329  *   Register value
7330  * @param[in] mask
7331  *   Register mask
7332  */
7333 static void
7334 flow_dv_match_meta_reg(void *matcher, void *key,
7335                        enum modify_reg reg_type,
7336                        uint32_t data, uint32_t mask)
7337 {
7338         void *misc2_m =
7339                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7340         void *misc2_v =
7341                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7342         uint32_t temp;
7343
7344         data &= mask;
7345         switch (reg_type) {
7346         case REG_A:
7347                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7348                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7349                 break;
7350         case REG_B:
7351                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7352                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7353                 break;
7354         case REG_C_0:
7355                 /*
7356                  * The metadata register C0 field might be divided into
7357                  * source vport index and META item value, we should set
7358                  * this field according to specified mask, not as whole one.
7359                  */
7360                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7361                 temp |= mask;
7362                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7363                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7364                 temp &= ~mask;
7365                 temp |= data;
7366                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7367                 break;
7368         case REG_C_1:
7369                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7370                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7371                 break;
7372         case REG_C_2:
7373                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7374                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7375                 break;
7376         case REG_C_3:
7377                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7378                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7379                 break;
7380         case REG_C_4:
7381                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7382                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7383                 break;
7384         case REG_C_5:
7385                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7386                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7387                 break;
7388         case REG_C_6:
7389                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7390                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7391                 break;
7392         case REG_C_7:
7393                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7394                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7395                 break;
7396         default:
7397                 MLX5_ASSERT(false);
7398                 break;
7399         }
7400 }
7401
7402 /**
7403  * Add MARK item to matcher
7404  *
7405  * @param[in] dev
7406  *   The device to configure through.
7407  * @param[in, out] matcher
7408  *   Flow matcher.
7409  * @param[in, out] key
7410  *   Flow matcher value.
7411  * @param[in] item
7412  *   Flow pattern to translate.
7413  */
7414 static void
7415 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7416                             void *matcher, void *key,
7417                             const struct rte_flow_item *item)
7418 {
7419         struct mlx5_priv *priv = dev->data->dev_private;
7420         const struct rte_flow_item_mark *mark;
7421         uint32_t value;
7422         uint32_t mask;
7423
7424         mark = item->mask ? (const void *)item->mask :
7425                             &rte_flow_item_mark_mask;
7426         mask = mark->id & priv->sh->dv_mark_mask;
7427         mark = (const void *)item->spec;
7428         MLX5_ASSERT(mark);
7429         value = mark->id & priv->sh->dv_mark_mask & mask;
7430         if (mask) {
7431                 enum modify_reg reg;
7432
7433                 /* Get the metadata register index for the mark. */
7434                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7435                 MLX5_ASSERT(reg > 0);
7436                 if (reg == REG_C_0) {
7437                         struct mlx5_priv *priv = dev->data->dev_private;
7438                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7439                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7440
7441                         mask &= msk_c0;
7442                         mask <<= shl_c0;
7443                         value <<= shl_c0;
7444                 }
7445                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7446         }
7447 }
7448
7449 /**
7450  * Add META item to matcher
7451  *
7452  * @param[in] dev
7453  *   The devich to configure through.
7454  * @param[in, out] matcher
7455  *   Flow matcher.
7456  * @param[in, out] key
7457  *   Flow matcher value.
7458  * @param[in] attr
7459  *   Attributes of flow that includes this item.
7460  * @param[in] item
7461  *   Flow pattern to translate.
7462  */
7463 static void
7464 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7465                             void *matcher, void *key,
7466                             const struct rte_flow_attr *attr,
7467                             const struct rte_flow_item *item)
7468 {
7469         const struct rte_flow_item_meta *meta_m;
7470         const struct rte_flow_item_meta *meta_v;
7471
7472         meta_m = (const void *)item->mask;
7473         if (!meta_m)
7474                 meta_m = &rte_flow_item_meta_mask;
7475         meta_v = (const void *)item->spec;
7476         if (meta_v) {
7477                 int reg;
7478                 uint32_t value = meta_v->data;
7479                 uint32_t mask = meta_m->data;
7480
7481                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7482                 if (reg < 0)
7483                         return;
7484                 /*
7485                  * In datapath code there is no endianness
7486                  * coversions for perfromance reasons, all
7487                  * pattern conversions are done in rte_flow.
7488                  */
7489                 value = rte_cpu_to_be_32(value);
7490                 mask = rte_cpu_to_be_32(mask);
7491                 if (reg == REG_C_0) {
7492                         struct mlx5_priv *priv = dev->data->dev_private;
7493                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7494                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7496                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7497
7498                         value >>= shr_c0;
7499                         mask >>= shr_c0;
7500 #endif
7501                         value <<= shl_c0;
7502                         mask <<= shl_c0;
7503                         MLX5_ASSERT(msk_c0);
7504                         MLX5_ASSERT(!(~msk_c0 & mask));
7505                 }
7506                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7507         }
7508 }
7509
7510 /**
7511  * Add vport metadata Reg C0 item to matcher
7512  *
7513  * @param[in, out] matcher
7514  *   Flow matcher.
7515  * @param[in, out] key
7516  *   Flow matcher value.
7517  * @param[in] reg
7518  *   Flow pattern to translate.
7519  */
7520 static void
7521 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7522                                   uint32_t value, uint32_t mask)
7523 {
7524         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7525 }
7526
7527 /**
7528  * Add tag item to matcher
7529  *
7530  * @param[in] dev
7531  *   The devich to configure through.
7532  * @param[in, out] matcher
7533  *   Flow matcher.
7534  * @param[in, out] key
7535  *   Flow matcher value.
7536  * @param[in] item
7537  *   Flow pattern to translate.
7538  */
7539 static void
7540 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7541                                 void *matcher, void *key,
7542                                 const struct rte_flow_item *item)
7543 {
7544         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7545         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7546         uint32_t mask, value;
7547
7548         MLX5_ASSERT(tag_v);
7549         value = tag_v->data;
7550         mask = tag_m ? tag_m->data : UINT32_MAX;
7551         if (tag_v->id == REG_C_0) {
7552                 struct mlx5_priv *priv = dev->data->dev_private;
7553                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7554                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7555
7556                 mask &= msk_c0;
7557                 mask <<= shl_c0;
7558                 value <<= shl_c0;
7559         }
7560         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7561 }
7562
7563 /**
7564  * Add TAG item to matcher
7565  *
7566  * @param[in] dev
7567  *   The devich to configure through.
7568  * @param[in, out] matcher
7569  *   Flow matcher.
7570  * @param[in, out] key
7571  *   Flow matcher value.
7572  * @param[in] item
7573  *   Flow pattern to translate.
7574  */
7575 static void
7576 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7577                            void *matcher, void *key,
7578                            const struct rte_flow_item *item)
7579 {
7580         const struct rte_flow_item_tag *tag_v = item->spec;
7581         const struct rte_flow_item_tag *tag_m = item->mask;
7582         enum modify_reg reg;
7583
7584         MLX5_ASSERT(tag_v);
7585         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7586         /* Get the metadata register index for the tag. */
7587         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7588         MLX5_ASSERT(reg > 0);
7589         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7590 }
7591
7592 /**
7593  * Add source vport match to the specified matcher.
7594  *
7595  * @param[in, out] matcher
7596  *   Flow matcher.
7597  * @param[in, out] key
7598  *   Flow matcher value.
7599  * @param[in] port
7600  *   Source vport value to match
7601  * @param[in] mask
7602  *   Mask
7603  */
7604 static void
7605 flow_dv_translate_item_source_vport(void *matcher, void *key,
7606                                     int16_t port, uint16_t mask)
7607 {
7608         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7609         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7610
7611         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7612         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7613 }
7614
7615 /**
7616  * Translate port-id item to eswitch match on  port-id.
7617  *
7618  * @param[in] dev
7619  *   The devich to configure through.
7620  * @param[in, out] matcher
7621  *   Flow matcher.
7622  * @param[in, out] key
7623  *   Flow matcher value.
7624  * @param[in] item
7625  *   Flow pattern to translate.
7626  *
7627  * @return
7628  *   0 on success, a negative errno value otherwise.
7629  */
7630 static int
7631 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7632                                void *key, const struct rte_flow_item *item)
7633 {
7634         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7635         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7636         struct mlx5_priv *priv;
7637         uint16_t mask, id;
7638
7639         mask = pid_m ? pid_m->id : 0xffff;
7640         id = pid_v ? pid_v->id : dev->data->port_id;
7641         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7642         if (!priv)
7643                 return -rte_errno;
7644         /* Translate to vport field or to metadata, depending on mode. */
7645         if (priv->vport_meta_mask)
7646                 flow_dv_translate_item_meta_vport(matcher, key,
7647                                                   priv->vport_meta_tag,
7648                                                   priv->vport_meta_mask);
7649         else
7650                 flow_dv_translate_item_source_vport(matcher, key,
7651                                                     priv->vport_id, mask);
7652         return 0;
7653 }
7654
7655 /**
7656  * Add ICMP6 item to matcher and to the value.
7657  *
7658  * @param[in, out] matcher
7659  *   Flow matcher.
7660  * @param[in, out] key
7661  *   Flow matcher value.
7662  * @param[in] item
7663  *   Flow pattern to translate.
7664  * @param[in] inner
7665  *   Item is inner pattern.
7666  */
7667 static void
7668 flow_dv_translate_item_icmp6(void *matcher, void *key,
7669                               const struct rte_flow_item *item,
7670                               int inner)
7671 {
7672         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7673         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7674         void *headers_m;
7675         void *headers_v;
7676         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7677                                      misc_parameters_3);
7678         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7679         if (inner) {
7680                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7681                                          inner_headers);
7682                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7683         } else {
7684                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7685                                          outer_headers);
7686                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7687         }
7688         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7689         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7690         if (!icmp6_v)
7691                 return;
7692         if (!icmp6_m)
7693                 icmp6_m = &rte_flow_item_icmp6_mask;
7694         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7695         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7696                  icmp6_v->type & icmp6_m->type);
7697         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7698         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7699                  icmp6_v->code & icmp6_m->code);
7700 }
7701
7702 /**
7703  * Add ICMP item to matcher and to the value.
7704  *
7705  * @param[in, out] matcher
7706  *   Flow matcher.
7707  * @param[in, out] key
7708  *   Flow matcher value.
7709  * @param[in] item
7710  *   Flow pattern to translate.
7711  * @param[in] inner
7712  *   Item is inner pattern.
7713  */
7714 static void
7715 flow_dv_translate_item_icmp(void *matcher, void *key,
7716                             const struct rte_flow_item *item,
7717                             int inner)
7718 {
7719         const struct rte_flow_item_icmp *icmp_m = item->mask;
7720         const struct rte_flow_item_icmp *icmp_v = item->spec;
7721         uint32_t icmp_header_data_m = 0;
7722         uint32_t icmp_header_data_v = 0;
7723         void *headers_m;
7724         void *headers_v;
7725         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7726                                      misc_parameters_3);
7727         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7728         if (inner) {
7729                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7730                                          inner_headers);
7731                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7732         } else {
7733                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7734                                          outer_headers);
7735                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7736         }
7737         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7738         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7739         if (!icmp_v)
7740                 return;
7741         if (!icmp_m)
7742                 icmp_m = &rte_flow_item_icmp_mask;
7743         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7744                  icmp_m->hdr.icmp_type);
7745         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7746                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7747         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7748                  icmp_m->hdr.icmp_code);
7749         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7750                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7751         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7752         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7753         if (icmp_header_data_m) {
7754                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7755                 icmp_header_data_v |=
7756                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7757                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7758                          icmp_header_data_m);
7759                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7760                          icmp_header_data_v & icmp_header_data_m);
7761         }
7762 }
7763
7764 /**
7765  * Add GTP item to matcher and to the value.
7766  *
7767  * @param[in, out] matcher
7768  *   Flow matcher.
7769  * @param[in, out] key
7770  *   Flow matcher value.
7771  * @param[in] item
7772  *   Flow pattern to translate.
7773  * @param[in] inner
7774  *   Item is inner pattern.
7775  */
7776 static void
7777 flow_dv_translate_item_gtp(void *matcher, void *key,
7778                            const struct rte_flow_item *item, int inner)
7779 {
7780         const struct rte_flow_item_gtp *gtp_m = item->mask;
7781         const struct rte_flow_item_gtp *gtp_v = item->spec;
7782         void *headers_m;
7783         void *headers_v;
7784         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7785                                      misc_parameters_3);
7786         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7787         uint16_t dport = RTE_GTPU_UDP_PORT;
7788
7789         if (inner) {
7790                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7791                                          inner_headers);
7792                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7793         } else {
7794                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7795                                          outer_headers);
7796                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7797         }
7798         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7799                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7800                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7801         }
7802         if (!gtp_v)
7803                 return;
7804         if (!gtp_m)
7805                 gtp_m = &rte_flow_item_gtp_mask;
7806         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7807                  gtp_m->v_pt_rsv_flags);
7808         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7809                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7810         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7811         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7812                  gtp_v->msg_type & gtp_m->msg_type);
7813         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7814                  rte_be_to_cpu_32(gtp_m->teid));
7815         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7816                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7817 }
7818
7819 /**
7820  * Add eCPRI item to matcher and to the value.
7821  *
7822  * @param[in] dev
7823  *   The devich to configure through.
7824  * @param[in, out] matcher
7825  *   Flow matcher.
7826  * @param[in, out] key
7827  *   Flow matcher value.
7828  * @param[in] item
7829  *   Flow pattern to translate.
7830  * @param[in] samples
7831  *   Sample IDs to be used in the matching.
7832  */
7833 static void
7834 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7835                              void *key, const struct rte_flow_item *item)
7836 {
7837         struct mlx5_priv *priv = dev->data->dev_private;
7838         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7839         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7840         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7841                                      misc_parameters_4);
7842         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7843         uint32_t *samples;
7844         void *dw_m;
7845         void *dw_v;
7846
7847         if (!ecpri_v)
7848                 return;
7849         if (!ecpri_m)
7850                 ecpri_m = &rte_flow_item_ecpri_mask;
7851         /*
7852          * Maximal four DW samples are supported in a single matching now.
7853          * Two are used now for a eCPRI matching:
7854          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7855          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7856          *    if any.
7857          */
7858         if (!ecpri_m->hdr.common.u32)
7859                 return;
7860         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7861         /* Need to take the whole DW as the mask to fill the entry. */
7862         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7863                             prog_sample_field_value_0);
7864         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7865                             prog_sample_field_value_0);
7866         /* Already big endian (network order) in the header. */
7867         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7868         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7869         /* Sample#0, used for matching type, offset 0. */
7870         MLX5_SET(fte_match_set_misc4, misc4_m,
7871                  prog_sample_field_id_0, samples[0]);
7872         /* It makes no sense to set the sample ID in the mask field. */
7873         MLX5_SET(fte_match_set_misc4, misc4_v,
7874                  prog_sample_field_id_0, samples[0]);
7875         /*
7876          * Checking if message body part needs to be matched.
7877          * Some wildcard rules only matching type field should be supported.
7878          */
7879         if (ecpri_m->hdr.dummy[0]) {
7880                 switch (ecpri_v->hdr.common.type) {
7881                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7882                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7883                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7884                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7885                                             prog_sample_field_value_1);
7886                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7887                                             prog_sample_field_value_1);
7888                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7889                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7890                         /* Sample#1, to match message body, offset 4. */
7891                         MLX5_SET(fte_match_set_misc4, misc4_m,
7892                                  prog_sample_field_id_1, samples[1]);
7893                         MLX5_SET(fte_match_set_misc4, misc4_v,
7894                                  prog_sample_field_id_1, samples[1]);
7895                         break;
7896                 default:
7897                         /* Others, do not match any sample ID. */
7898                         break;
7899                 }
7900         }
7901 }
7902
7903 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7904
7905 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7906         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7907                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7908
7909 /**
7910  * Calculate flow matcher enable bitmap.
7911  *
7912  * @param match_criteria
7913  *   Pointer to flow matcher criteria.
7914  *
7915  * @return
7916  *   Bitmap of enabled fields.
7917  */
7918 static uint8_t
7919 flow_dv_matcher_enable(uint32_t *match_criteria)
7920 {
7921         uint8_t match_criteria_enable;
7922
7923         match_criteria_enable =
7924                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7925                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7926         match_criteria_enable |=
7927                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7928                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7929         match_criteria_enable |=
7930                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7931                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7932         match_criteria_enable |=
7933                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7934                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7935         match_criteria_enable |=
7936                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7937                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7938         match_criteria_enable |=
7939                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7940                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7941         return match_criteria_enable;
7942 }
7943
7944 struct mlx5_hlist_entry *
7945 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7946 {
7947         struct mlx5_dev_ctx_shared *sh = list->ctx;
7948         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7949         struct rte_eth_dev *dev = ctx->dev;
7950         struct mlx5_flow_tbl_data_entry *tbl_data;
7951         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7952         struct rte_flow_error *error = ctx->error;
7953         union mlx5_flow_tbl_key key = { .v64 = key64 };
7954         struct mlx5_flow_tbl_resource *tbl;
7955         void *domain;
7956         uint32_t idx = 0;
7957         int ret;
7958
7959         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7960         if (!tbl_data) {
7961                 rte_flow_error_set(error, ENOMEM,
7962                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7963                                    NULL,
7964                                    "cannot allocate flow table data entry");
7965                 return NULL;
7966         }
7967         tbl_data->idx = idx;
7968         tbl_data->tunnel = tt_prm->tunnel;
7969         tbl_data->group_id = tt_prm->group_id;
7970         tbl_data->external = tt_prm->external;
7971         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7972         tbl_data->is_egress = !!key.direction;
7973         tbl = &tbl_data->tbl;
7974         if (key.dummy)
7975                 return &tbl_data->entry;
7976         if (key.domain)
7977                 domain = sh->fdb_domain;
7978         else if (key.direction)
7979                 domain = sh->tx_domain;
7980         else
7981                 domain = sh->rx_domain;
7982         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7983         if (ret) {
7984                 rte_flow_error_set(error, ENOMEM,
7985                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7986                                    NULL, "cannot create flow table object");
7987                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7988                 return NULL;
7989         }
7990         if (key.table_id) {
7991                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7992                                         (tbl->obj, &tbl_data->jump.action);
7993                 if (ret) {
7994                         rte_flow_error_set(error, ENOMEM,
7995                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7996                                            NULL,
7997                                            "cannot create flow jump action");
7998                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7999                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8000                         return NULL;
8001                 }
8002         }
8003         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8004               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8005               key.table_id);
8006         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8007                              flow_dv_matcher_create_cb,
8008                              flow_dv_matcher_match_cb,
8009                              flow_dv_matcher_remove_cb);
8010         return &tbl_data->entry;
8011 }
8012
8013 /**
8014  * Get a flow table.
8015  *
8016  * @param[in, out] dev
8017  *   Pointer to rte_eth_dev structure.
8018  * @param[in] table_id
8019  *   Table id to use.
8020  * @param[in] egress
8021  *   Direction of the table.
8022  * @param[in] transfer
8023  *   E-Switch or NIC flow.
8024  * @param[in] dummy
8025  *   Dummy entry for dv API.
8026  * @param[out] error
8027  *   pointer to error structure.
8028  *
8029  * @return
8030  *   Returns tables resource based on the index, NULL in case of failed.
8031  */
8032 struct mlx5_flow_tbl_resource *
8033 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8034                          uint32_t table_id, uint8_t egress,
8035                          uint8_t transfer,
8036                          bool external,
8037                          const struct mlx5_flow_tunnel *tunnel,
8038                          uint32_t group_id, uint8_t dummy,
8039                          struct rte_flow_error *error)
8040 {
8041         struct mlx5_priv *priv = dev->data->dev_private;
8042         union mlx5_flow_tbl_key table_key = {
8043                 {
8044                         .table_id = table_id,
8045                         .dummy = dummy,
8046                         .domain = !!transfer,
8047                         .direction = !!egress,
8048                 }
8049         };
8050         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8051                 .tunnel = tunnel,
8052                 .group_id = group_id,
8053                 .external = external,
8054         };
8055         struct mlx5_flow_cb_ctx ctx = {
8056                 .dev = dev,
8057                 .error = error,
8058                 .data = &tt_prm,
8059         };
8060         struct mlx5_hlist_entry *entry;
8061         struct mlx5_flow_tbl_data_entry *tbl_data;
8062
8063         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8064         if (!entry) {
8065                 rte_flow_error_set(error, ENOMEM,
8066                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8067                                    "cannot get table");
8068                 return NULL;
8069         }
8070         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8071         return &tbl_data->tbl;
8072 }
8073
8074 void
8075 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8076                       struct mlx5_hlist_entry *entry)
8077 {
8078         struct mlx5_dev_ctx_shared *sh = list->ctx;
8079         struct mlx5_flow_tbl_data_entry *tbl_data =
8080                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8081
8082         MLX5_ASSERT(entry && sh);
8083         if (tbl_data->jump.action)
8084                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8085         if (tbl_data->tbl.obj)
8086                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8087         if (tbl_data->tunnel_offload && tbl_data->external) {
8088                 struct mlx5_hlist_entry *he;
8089                 struct mlx5_hlist *tunnel_grp_hash;
8090                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8091                 union tunnel_tbl_key tunnel_key = {
8092                         .tunnel_id = tbl_data->tunnel ?
8093                                         tbl_data->tunnel->tunnel_id : 0,
8094                         .group = tbl_data->group_id
8095                 };
8096                 union mlx5_flow_tbl_key table_key = {
8097                         .v64 = entry->key
8098                 };
8099                 uint32_t table_id = table_key.table_id;
8100
8101                 tunnel_grp_hash = tbl_data->tunnel ?
8102                                         tbl_data->tunnel->groups :
8103                                         thub->groups;
8104                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8105                 if (he) {
8106                         struct tunnel_tbl_entry *tte;
8107                         tte = container_of(he, typeof(*tte), hash);
8108                         MLX5_ASSERT(tte->flow_table == table_id);
8109                         mlx5_hlist_remove(tunnel_grp_hash, he);
8110                         mlx5_free(tte);
8111                 }
8112                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8113                                 tunnel_flow_tbl_to_id(table_id));
8114                 DRV_LOG(DEBUG,
8115                         "Table_id %#x tunnel %u group %u released.",
8116                         table_id,
8117                         tbl_data->tunnel ?
8118                         tbl_data->tunnel->tunnel_id : 0,
8119                         tbl_data->group_id);
8120         }
8121         mlx5_cache_list_destroy(&tbl_data->matchers);
8122         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8123 }
8124
8125 /**
8126  * Release a flow table.
8127  *
8128  * @param[in] sh
8129  *   Pointer to device shared structure.
8130  * @param[in] tbl
8131  *   Table resource to be released.
8132  *
8133  * @return
8134  *   Returns 0 if table was released, else return 1;
8135  */
8136 static int
8137 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8138                              struct mlx5_flow_tbl_resource *tbl)
8139 {
8140         struct mlx5_flow_tbl_data_entry *tbl_data =
8141                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8142
8143         if (!tbl)
8144                 return 0;
8145         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8146 }
8147
8148 int
8149 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8150                          struct mlx5_cache_entry *entry, void *cb_ctx)
8151 {
8152         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8153         struct mlx5_flow_dv_matcher *ref = ctx->data;
8154         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8155                                                         entry);
8156
8157         return cur->crc != ref->crc ||
8158                cur->priority != ref->priority ||
8159                memcmp((const void *)cur->mask.buf,
8160                       (const void *)ref->mask.buf, ref->mask.size);
8161 }
8162
8163 struct mlx5_cache_entry *
8164 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8165                           struct mlx5_cache_entry *entry __rte_unused,
8166                           void *cb_ctx)
8167 {
8168         struct mlx5_dev_ctx_shared *sh = list->ctx;
8169         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8170         struct mlx5_flow_dv_matcher *ref = ctx->data;
8171         struct mlx5_flow_dv_matcher *cache;
8172         struct mlx5dv_flow_matcher_attr dv_attr = {
8173                 .type = IBV_FLOW_ATTR_NORMAL,
8174                 .match_mask = (void *)&ref->mask,
8175         };
8176         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8177                                                             typeof(*tbl), tbl);
8178         int ret;
8179
8180         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8181         if (!cache) {
8182                 rte_flow_error_set(ctx->error, ENOMEM,
8183                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8184                                    "cannot create matcher");
8185                 return NULL;
8186         }
8187         *cache = *ref;
8188         dv_attr.match_criteria_enable =
8189                 flow_dv_matcher_enable(cache->mask.buf);
8190         dv_attr.priority = ref->priority;
8191         if (tbl->is_egress)
8192                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8193         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8194                                                &cache->matcher_object);
8195         if (ret) {
8196                 mlx5_free(cache);
8197                 rte_flow_error_set(ctx->error, ENOMEM,
8198                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8199                                    "cannot create matcher");
8200                 return NULL;
8201         }
8202         return &cache->entry;
8203 }
8204
8205 /**
8206  * Register the flow matcher.
8207  *
8208  * @param[in, out] dev
8209  *   Pointer to rte_eth_dev structure.
8210  * @param[in, out] matcher
8211  *   Pointer to flow matcher.
8212  * @param[in, out] key
8213  *   Pointer to flow table key.
8214  * @parm[in, out] dev_flow
8215  *   Pointer to the dev_flow.
8216  * @param[out] error
8217  *   pointer to error structure.
8218  *
8219  * @return
8220  *   0 on success otherwise -errno and errno is set.
8221  */
8222 static int
8223 flow_dv_matcher_register(struct rte_eth_dev *dev,
8224                          struct mlx5_flow_dv_matcher *ref,
8225                          union mlx5_flow_tbl_key *key,
8226                          struct mlx5_flow *dev_flow,
8227                          struct rte_flow_error *error)
8228 {
8229         struct mlx5_cache_entry *entry;
8230         struct mlx5_flow_dv_matcher *cache;
8231         struct mlx5_flow_tbl_resource *tbl;
8232         struct mlx5_flow_tbl_data_entry *tbl_data;
8233         struct mlx5_flow_cb_ctx ctx = {
8234                 .error = error,
8235                 .data = ref,
8236         };
8237
8238         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8239                                        key->domain, false, NULL, 0, 0, error);
8240         if (!tbl)
8241                 return -rte_errno;      /* No need to refill the error info */
8242         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8243         ref->tbl = tbl;
8244         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8245         if (!entry) {
8246                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8247                 return rte_flow_error_set(error, ENOMEM,
8248                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8249                                           "cannot allocate ref memory");
8250         }
8251         cache = container_of(entry, typeof(*cache), entry);
8252         dev_flow->handle->dvh.matcher = cache;
8253         return 0;
8254 }
8255
8256 struct mlx5_hlist_entry *
8257 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8258 {
8259         struct mlx5_dev_ctx_shared *sh = list->ctx;
8260         struct rte_flow_error *error = ctx;
8261         struct mlx5_flow_dv_tag_resource *entry;
8262         uint32_t idx = 0;
8263         int ret;
8264
8265         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8266         if (!entry) {
8267                 rte_flow_error_set(error, ENOMEM,
8268                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8269                                    "cannot allocate resource memory");
8270                 return NULL;
8271         }
8272         entry->idx = idx;
8273         ret = mlx5_flow_os_create_flow_action_tag(key,
8274                                                   &entry->action);
8275         if (ret) {
8276                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8277                 rte_flow_error_set(error, ENOMEM,
8278                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8279                                    NULL, "cannot create action");
8280                 return NULL;
8281         }
8282         return &entry->entry;
8283 }
8284
8285 /**
8286  * Find existing tag resource or create and register a new one.
8287  *
8288  * @param dev[in, out]
8289  *   Pointer to rte_eth_dev structure.
8290  * @param[in, out] tag_be24
8291  *   Tag value in big endian then R-shift 8.
8292  * @parm[in, out] dev_flow
8293  *   Pointer to the dev_flow.
8294  * @param[out] error
8295  *   pointer to error structure.
8296  *
8297  * @return
8298  *   0 on success otherwise -errno and errno is set.
8299  */
8300 static int
8301 flow_dv_tag_resource_register
8302                         (struct rte_eth_dev *dev,
8303                          uint32_t tag_be24,
8304                          struct mlx5_flow *dev_flow,
8305                          struct rte_flow_error *error)
8306 {
8307         struct mlx5_priv *priv = dev->data->dev_private;
8308         struct mlx5_flow_dv_tag_resource *cache_resource;
8309         struct mlx5_hlist_entry *entry;
8310
8311         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8312         if (entry) {
8313                 cache_resource = container_of
8314                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8315                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8316                 dev_flow->dv.tag_resource = cache_resource;
8317                 return 0;
8318         }
8319         return -rte_errno;
8320 }
8321
8322 void
8323 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8324                       struct mlx5_hlist_entry *entry)
8325 {
8326         struct mlx5_dev_ctx_shared *sh = list->ctx;
8327         struct mlx5_flow_dv_tag_resource *tag =
8328                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8329
8330         MLX5_ASSERT(tag && sh && tag->action);
8331         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8332         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8333         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8334 }
8335
8336 /**
8337  * Release the tag.
8338  *
8339  * @param dev
8340  *   Pointer to Ethernet device.
8341  * @param tag_idx
8342  *   Tag index.
8343  *
8344  * @return
8345  *   1 while a reference on it exists, 0 when freed.
8346  */
8347 static int
8348 flow_dv_tag_release(struct rte_eth_dev *dev,
8349                     uint32_t tag_idx)
8350 {
8351         struct mlx5_priv *priv = dev->data->dev_private;
8352         struct mlx5_flow_dv_tag_resource *tag;
8353
8354         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8355         if (!tag)
8356                 return 0;
8357         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8358                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8359         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8360 }
8361
8362 /**
8363  * Translate port ID action to vport.
8364  *
8365  * @param[in] dev
8366  *   Pointer to rte_eth_dev structure.
8367  * @param[in] action
8368  *   Pointer to the port ID action.
8369  * @param[out] dst_port_id
8370  *   The target port ID.
8371  * @param[out] error
8372  *   Pointer to the error structure.
8373  *
8374  * @return
8375  *   0 on success, a negative errno value otherwise and rte_errno is set.
8376  */
8377 static int
8378 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8379                                  const struct rte_flow_action *action,
8380                                  uint32_t *dst_port_id,
8381                                  struct rte_flow_error *error)
8382 {
8383         uint32_t port;
8384         struct mlx5_priv *priv;
8385         const struct rte_flow_action_port_id *conf =
8386                         (const struct rte_flow_action_port_id *)action->conf;
8387
8388         port = conf->original ? dev->data->port_id : conf->id;
8389         priv = mlx5_port_to_eswitch_info(port, false);
8390         if (!priv)
8391                 return rte_flow_error_set(error, -rte_errno,
8392                                           RTE_FLOW_ERROR_TYPE_ACTION,
8393                                           NULL,
8394                                           "No eswitch info was found for port");
8395 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8396         /*
8397          * This parameter is transferred to
8398          * mlx5dv_dr_action_create_dest_ib_port().
8399          */
8400         *dst_port_id = priv->dev_port;
8401 #else
8402         /*
8403          * Legacy mode, no LAG configurations is supported.
8404          * This parameter is transferred to
8405          * mlx5dv_dr_action_create_dest_vport().
8406          */
8407         *dst_port_id = priv->vport_id;
8408 #endif
8409         return 0;
8410 }
8411
8412 /**
8413  * Create a counter with aging configuration.
8414  *
8415  * @param[in] dev
8416  *   Pointer to rte_eth_dev structure.
8417  * @param[out] count
8418  *   Pointer to the counter action configuration.
8419  * @param[in] age
8420  *   Pointer to the aging action configuration.
8421  *
8422  * @return
8423  *   Index to flow counter on success, 0 otherwise.
8424  */
8425 static uint32_t
8426 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8427                                 struct mlx5_flow *dev_flow,
8428                                 const struct rte_flow_action_count *count,
8429                                 const struct rte_flow_action_age *age)
8430 {
8431         uint32_t counter;
8432         struct mlx5_age_param *age_param;
8433
8434         if (count && count->shared)
8435                 counter = flow_dv_counter_get_shared(dev, count->id);
8436         else
8437                 counter = flow_dv_counter_alloc(dev, !!age);
8438         if (!counter || age == NULL)
8439                 return counter;
8440         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8441         age_param->context = age->context ? age->context :
8442                 (void *)(uintptr_t)(dev_flow->flow_idx);
8443         age_param->timeout = age->timeout;
8444         age_param->port_id = dev->data->port_id;
8445         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8446         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8447         return counter;
8448 }
8449 /**
8450  * Add Tx queue matcher
8451  *
8452  * @param[in] dev
8453  *   Pointer to the dev struct.
8454  * @param[in, out] matcher
8455  *   Flow matcher.
8456  * @param[in, out] key
8457  *   Flow matcher value.
8458  * @param[in] item
8459  *   Flow pattern to translate.
8460  * @param[in] inner
8461  *   Item is inner pattern.
8462  */
8463 static void
8464 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8465                                 void *matcher, void *key,
8466                                 const struct rte_flow_item *item)
8467 {
8468         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8469         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8470         void *misc_m =
8471                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8472         void *misc_v =
8473                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8474         struct mlx5_txq_ctrl *txq;
8475         uint32_t queue;
8476
8477
8478         queue_m = (const void *)item->mask;
8479         if (!queue_m)
8480                 return;
8481         queue_v = (const void *)item->spec;
8482         if (!queue_v)
8483                 return;
8484         txq = mlx5_txq_get(dev, queue_v->queue);
8485         if (!txq)
8486                 return;
8487         queue = txq->obj->sq->id;
8488         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8489         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8490                  queue & queue_m->queue);
8491         mlx5_txq_release(dev, queue_v->queue);
8492 }
8493
8494 /**
8495  * Set the hash fields according to the @p flow information.
8496  *
8497  * @param[in] dev_flow
8498  *   Pointer to the mlx5_flow.
8499  * @param[in] rss_desc
8500  *   Pointer to the mlx5_flow_rss_desc.
8501  */
8502 static void
8503 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8504                        struct mlx5_flow_rss_desc *rss_desc)
8505 {
8506         uint64_t items = dev_flow->handle->layers;
8507         int rss_inner = 0;
8508         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8509
8510         dev_flow->hash_fields = 0;
8511 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8512         if (rss_desc->level >= 2) {
8513                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8514                 rss_inner = 1;
8515         }
8516 #endif
8517         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8518             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8519                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8520                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8521                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8522                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8523                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8524                         else
8525                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8526                 }
8527         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8528                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8529                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8530                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8531                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8532                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8533                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8534                         else
8535                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8536                 }
8537         }
8538         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8539             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8540                 if (rss_types & ETH_RSS_UDP) {
8541                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8542                                 dev_flow->hash_fields |=
8543                                                 IBV_RX_HASH_SRC_PORT_UDP;
8544                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8545                                 dev_flow->hash_fields |=
8546                                                 IBV_RX_HASH_DST_PORT_UDP;
8547                         else
8548                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8549                 }
8550         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8551                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8552                 if (rss_types & ETH_RSS_TCP) {
8553                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8554                                 dev_flow->hash_fields |=
8555                                                 IBV_RX_HASH_SRC_PORT_TCP;
8556                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8557                                 dev_flow->hash_fields |=
8558                                                 IBV_RX_HASH_DST_PORT_TCP;
8559                         else
8560                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8561                 }
8562         }
8563 }
8564
8565 /**
8566  * Prepare an Rx Hash queue.
8567  *
8568  * @param dev
8569  *   Pointer to Ethernet device.
8570  * @param[in] dev_flow
8571  *   Pointer to the mlx5_flow.
8572  * @param[in] rss_desc
8573  *   Pointer to the mlx5_flow_rss_desc.
8574  * @param[out] hrxq_idx
8575  *   Hash Rx queue index.
8576  *
8577  * @return
8578  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8579  */
8580 static struct mlx5_hrxq *
8581 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8582                      struct mlx5_flow *dev_flow,
8583                      struct mlx5_flow_rss_desc *rss_desc,
8584                      uint32_t *hrxq_idx)
8585 {
8586         struct mlx5_priv *priv = dev->data->dev_private;
8587         struct mlx5_flow_handle *dh = dev_flow->handle;
8588         struct mlx5_hrxq *hrxq;
8589
8590         MLX5_ASSERT(rss_desc->queue_num);
8591         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8592         rss_desc->hash_fields = dev_flow->hash_fields;
8593         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8594         rss_desc->standalone = false;
8595         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8596         if (!*hrxq_idx)
8597                 return NULL;
8598         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8599                               *hrxq_idx);
8600         return hrxq;
8601 }
8602
8603 /**
8604  * Release sample sub action resource.
8605  *
8606  * @param[in, out] dev
8607  *   Pointer to rte_eth_dev structure.
8608  * @param[in] act_res
8609  *   Pointer to sample sub action resource.
8610  */
8611 static void
8612 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8613                                    struct mlx5_flow_sub_actions_idx *act_res)
8614 {
8615         if (act_res->rix_hrxq) {
8616                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
8617                 act_res->rix_hrxq = 0;
8618         }
8619         if (act_res->rix_encap_decap) {
8620                 flow_dv_encap_decap_resource_release(dev,
8621                                                      act_res->rix_encap_decap);
8622                 act_res->rix_encap_decap = 0;
8623         }
8624         if (act_res->rix_port_id_action) {
8625                 flow_dv_port_id_action_resource_release(dev,
8626                                                 act_res->rix_port_id_action);
8627                 act_res->rix_port_id_action = 0;
8628         }
8629         if (act_res->rix_tag) {
8630                 flow_dv_tag_release(dev, act_res->rix_tag);
8631                 act_res->rix_tag = 0;
8632         }
8633         if (act_res->cnt) {
8634                 flow_dv_counter_release(dev, act_res->cnt);
8635                 act_res->cnt = 0;
8636         }
8637 }
8638
8639 int
8640 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
8641                         struct mlx5_cache_entry *entry, void *cb_ctx)
8642 {
8643         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8644         struct rte_eth_dev *dev = ctx->dev;
8645         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8646         struct mlx5_flow_dv_sample_resource *cache_resource =
8647                         container_of(entry, typeof(*cache_resource), entry);
8648
8649         if (resource->ratio == cache_resource->ratio &&
8650             resource->ft_type == cache_resource->ft_type &&
8651             resource->ft_id == cache_resource->ft_id &&
8652             resource->set_action == cache_resource->set_action &&
8653             !memcmp((void *)&resource->sample_act,
8654                     (void *)&cache_resource->sample_act,
8655                     sizeof(struct mlx5_flow_sub_actions_list))) {
8656                 /*
8657                  * Existing sample action should release the prepared
8658                  * sub-actions reference counter.
8659                  */
8660                 flow_dv_sample_sub_actions_release(dev,
8661                                                 &resource->sample_idx);
8662                 return 0;
8663         }
8664         return 1;
8665 }
8666
8667 struct mlx5_cache_entry *
8668 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
8669                          struct mlx5_cache_entry *entry __rte_unused,
8670                          void *cb_ctx)
8671 {
8672         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8673         struct rte_eth_dev *dev = ctx->dev;
8674         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8675         void **sample_dv_actions = resource->sub_actions;
8676         struct mlx5_flow_dv_sample_resource *cache_resource;
8677         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8678         struct mlx5_priv *priv = dev->data->dev_private;
8679         struct mlx5_dev_ctx_shared *sh = priv->sh;
8680         struct mlx5_flow_tbl_resource *tbl;
8681         uint32_t idx = 0;
8682         const uint32_t next_ft_step = 1;
8683         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8684         uint8_t is_egress = 0;
8685         uint8_t is_transfer = 0;
8686         struct rte_flow_error *error = ctx->error;
8687
8688         /* Register new sample resource. */
8689         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
8690         if (!cache_resource) {
8691                 rte_flow_error_set(error, ENOMEM,
8692                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8693                                           NULL,
8694                                           "cannot allocate resource memory");
8695                 return NULL;
8696         }
8697         *cache_resource = *resource;
8698         /* Create normal path table level */
8699         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8700                 is_transfer = 1;
8701         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8702                 is_egress = 1;
8703         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8704                                         is_egress, is_transfer,
8705                                         true, NULL, 0, 0, error);
8706         if (!tbl) {
8707                 rte_flow_error_set(error, ENOMEM,
8708                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8709                                           NULL,
8710                                           "fail to create normal path table "
8711                                           "for sample");
8712                 goto error;
8713         }
8714         cache_resource->normal_path_tbl = tbl;
8715         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8716                 cache_resource->default_miss =
8717                                 mlx5_glue->dr_create_flow_action_default_miss();
8718                 if (!cache_resource->default_miss) {
8719                         rte_flow_error_set(error, ENOMEM,
8720                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8721                                                 NULL,
8722                                                 "cannot create default miss "
8723                                                 "action");
8724                         goto error;
8725                 }
8726                 sample_dv_actions[resource->sample_act.actions_num++] =
8727                                                 cache_resource->default_miss;
8728         }
8729         /* Create a DR sample action */
8730         sampler_attr.sample_ratio = cache_resource->ratio;
8731         sampler_attr.default_next_table = tbl->obj;
8732         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8733         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8734                                                         &sample_dv_actions[0];
8735         sampler_attr.action = cache_resource->set_action;
8736         cache_resource->verbs_action =
8737                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8738         if (!cache_resource->verbs_action) {
8739                 rte_flow_error_set(error, ENOMEM,
8740                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8741                                         NULL, "cannot create sample action");
8742                 goto error;
8743         }
8744         cache_resource->idx = idx;
8745         return &cache_resource->entry;
8746 error:
8747         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8748             cache_resource->default_miss)
8749                 claim_zero(mlx5_glue->destroy_flow_action
8750                                 (cache_resource->default_miss));
8751         else
8752                 flow_dv_sample_sub_actions_release(dev,
8753                                                    &cache_resource->sample_idx);
8754         if (cache_resource->normal_path_tbl)
8755                 flow_dv_tbl_resource_release(MLX5_SH(dev),
8756                                 cache_resource->normal_path_tbl);
8757         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
8758         return NULL;
8759
8760 }
8761
8762 /**
8763  * Find existing sample resource or create and register a new one.
8764  *
8765  * @param[in, out] dev
8766  *   Pointer to rte_eth_dev structure.
8767  * @param[in] resource
8768  *   Pointer to sample resource.
8769  * @parm[in, out] dev_flow
8770  *   Pointer to the dev_flow.
8771  * @param[out] error
8772  *   pointer to error structure.
8773  *
8774  * @return
8775  *   0 on success otherwise -errno and errno is set.
8776  */
8777 static int
8778 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8779                          struct mlx5_flow_dv_sample_resource *resource,
8780                          struct mlx5_flow *dev_flow,
8781                          struct rte_flow_error *error)
8782 {
8783         struct mlx5_flow_dv_sample_resource *cache_resource;
8784         struct mlx5_cache_entry *entry;
8785         struct mlx5_priv *priv = dev->data->dev_private;
8786         struct mlx5_flow_cb_ctx ctx = {
8787                 .dev = dev,
8788                 .error = error,
8789                 .data = resource,
8790         };
8791
8792         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
8793         if (!entry)
8794                 return -rte_errno;
8795         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8796         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
8797         dev_flow->dv.sample_res = cache_resource;
8798         return 0;
8799 }
8800
8801 int
8802 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
8803                             struct mlx5_cache_entry *entry, void *cb_ctx)
8804 {
8805         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8806         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8807         struct rte_eth_dev *dev = ctx->dev;
8808         struct mlx5_flow_dv_dest_array_resource *cache_resource =
8809                         container_of(entry, typeof(*cache_resource), entry);
8810         uint32_t idx = 0;
8811
8812         if (resource->num_of_dest == cache_resource->num_of_dest &&
8813             resource->ft_type == cache_resource->ft_type &&
8814             !memcmp((void *)cache_resource->sample_act,
8815                     (void *)resource->sample_act,
8816                    (resource->num_of_dest *
8817                    sizeof(struct mlx5_flow_sub_actions_list)))) {
8818                 /*
8819                  * Existing sample action should release the prepared
8820                  * sub-actions reference counter.
8821                  */
8822                 for (idx = 0; idx < resource->num_of_dest; idx++)
8823                         flow_dv_sample_sub_actions_release(dev,
8824                                         &resource->sample_idx[idx]);
8825                 return 0;
8826         }
8827         return 1;
8828 }
8829
8830 struct mlx5_cache_entry *
8831 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
8832                          struct mlx5_cache_entry *entry __rte_unused,
8833                          void *cb_ctx)
8834 {
8835         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8836         struct rte_eth_dev *dev = ctx->dev;
8837         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8838         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8839         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8840         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8841         struct mlx5_priv *priv = dev->data->dev_private;
8842         struct mlx5_dev_ctx_shared *sh = priv->sh;
8843         struct mlx5_flow_sub_actions_list *sample_act;
8844         struct mlx5dv_dr_domain *domain;
8845         uint32_t idx = 0, res_idx = 0;
8846         struct rte_flow_error *error = ctx->error;
8847
8848         /* Register new destination array resource. */
8849         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8850                                             &res_idx);
8851         if (!cache_resource) {
8852                 rte_flow_error_set(error, ENOMEM,
8853                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8854                                           NULL,
8855                                           "cannot allocate resource memory");
8856                 return NULL;
8857         }
8858         *cache_resource = *resource;
8859         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8860                 domain = sh->fdb_domain;
8861         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8862                 domain = sh->rx_domain;
8863         else
8864                 domain = sh->tx_domain;
8865         for (idx = 0; idx < resource->num_of_dest; idx++) {
8866                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8867                                  mlx5_malloc(MLX5_MEM_ZERO,
8868                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8869                                  0, SOCKET_ID_ANY);
8870                 if (!dest_attr[idx]) {
8871                         rte_flow_error_set(error, ENOMEM,
8872                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8873                                            NULL,
8874                                            "cannot allocate resource memory");
8875                         goto error;
8876                 }
8877                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8878                 sample_act = &resource->sample_act[idx];
8879                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8880                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8881                 } else if (sample_act->action_flags ==
8882                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8883                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8884                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8885                         dest_attr[idx]->dest_reformat->reformat =
8886                                         sample_act->dr_encap_action;
8887                         dest_attr[idx]->dest_reformat->dest =
8888                                         sample_act->dr_port_id_action;
8889                 } else if (sample_act->action_flags ==
8890                            MLX5_FLOW_ACTION_PORT_ID) {
8891                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8892                 }
8893         }
8894         /* create a dest array actioin */
8895         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8896                                                 (domain,
8897                                                  cache_resource->num_of_dest,
8898                                                  dest_attr);
8899         if (!cache_resource->action) {
8900                 rte_flow_error_set(error, ENOMEM,
8901                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8902                                    NULL,
8903                                    "cannot create destination array action");
8904                 goto error;
8905         }
8906         cache_resource->idx = res_idx;
8907         for (idx = 0; idx < resource->num_of_dest; idx++)
8908                 mlx5_free(dest_attr[idx]);
8909         return &cache_resource->entry;
8910 error:
8911         for (idx = 0; idx < resource->num_of_dest; idx++) {
8912                 struct mlx5_flow_sub_actions_idx *act_res =
8913                                         &cache_resource->sample_idx[idx];
8914                 if (act_res->rix_hrxq &&
8915                     !mlx5_hrxq_release(dev,
8916                                 act_res->rix_hrxq))
8917                         act_res->rix_hrxq = 0;
8918                 if (act_res->rix_encap_decap &&
8919                         !flow_dv_encap_decap_resource_release(dev,
8920                                 act_res->rix_encap_decap))
8921                         act_res->rix_encap_decap = 0;
8922                 if (act_res->rix_port_id_action &&
8923                         !flow_dv_port_id_action_resource_release(dev,
8924                                 act_res->rix_port_id_action))
8925                         act_res->rix_port_id_action = 0;
8926                 if (dest_attr[idx])
8927                         mlx5_free(dest_attr[idx]);
8928         }
8929
8930         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
8931         return NULL;
8932 }
8933
8934 /**
8935  * Find existing destination array resource or create and register a new one.
8936  *
8937  * @param[in, out] dev
8938  *   Pointer to rte_eth_dev structure.
8939  * @param[in] resource
8940  *   Pointer to destination array resource.
8941  * @parm[in, out] dev_flow
8942  *   Pointer to the dev_flow.
8943  * @param[out] error
8944  *   pointer to error structure.
8945  *
8946  * @return
8947  *   0 on success otherwise -errno and errno is set.
8948  */
8949 static int
8950 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8951                          struct mlx5_flow_dv_dest_array_resource *resource,
8952                          struct mlx5_flow *dev_flow,
8953                          struct rte_flow_error *error)
8954 {
8955         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8956         struct mlx5_priv *priv = dev->data->dev_private;
8957         struct mlx5_cache_entry *entry;
8958         struct mlx5_flow_cb_ctx ctx = {
8959                 .dev = dev,
8960                 .error = error,
8961                 .data = resource,
8962         };
8963
8964         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
8965         if (!entry)
8966                 return -rte_errno;
8967         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8968         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
8969         dev_flow->dv.dest_array_res = cache_resource;
8970         return 0;
8971 }
8972
8973 /**
8974  * Convert Sample action to DV specification.
8975  *
8976  * @param[in] dev
8977  *   Pointer to rte_eth_dev structure.
8978  * @param[in] action
8979  *   Pointer to action structure.
8980  * @param[in, out] dev_flow
8981  *   Pointer to the mlx5_flow.
8982  * @param[in] attr
8983  *   Pointer to the flow attributes.
8984  * @param[in, out] num_of_dest
8985  *   Pointer to the num of destination.
8986  * @param[in, out] sample_actions
8987  *   Pointer to sample actions list.
8988  * @param[in, out] res
8989  *   Pointer to sample resource.
8990  * @param[out] error
8991  *   Pointer to the error structure.
8992  *
8993  * @return
8994  *   0 on success, a negative errno value otherwise and rte_errno is set.
8995  */
8996 static int
8997 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8998                                 const struct rte_flow_action *action,
8999                                 struct mlx5_flow *dev_flow,
9000                                 const struct rte_flow_attr *attr,
9001                                 uint32_t *num_of_dest,
9002                                 void **sample_actions,
9003                                 struct mlx5_flow_dv_sample_resource *res,
9004                                 struct rte_flow_error *error)
9005 {
9006         struct mlx5_priv *priv = dev->data->dev_private;
9007         const struct rte_flow_action_sample *sample_action;
9008         const struct rte_flow_action *sub_actions;
9009         const struct rte_flow_action_queue *queue;
9010         struct mlx5_flow_sub_actions_list *sample_act;
9011         struct mlx5_flow_sub_actions_idx *sample_idx;
9012         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9013         struct mlx5_flow_rss_desc *rss_desc;
9014         uint64_t action_flags = 0;
9015
9016         MLX5_ASSERT(wks);
9017         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9018         sample_act = &res->sample_act;
9019         sample_idx = &res->sample_idx;
9020         sample_action = (const struct rte_flow_action_sample *)action->conf;
9021         res->ratio = sample_action->ratio;
9022         sub_actions = sample_action->actions;
9023         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9024                 int type = sub_actions->type;
9025                 uint32_t pre_rix = 0;
9026                 void *pre_r;
9027                 switch (type) {
9028                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9029                 {
9030                         struct mlx5_hrxq *hrxq;
9031                         uint32_t hrxq_idx;
9032
9033                         queue = sub_actions->conf;
9034                         rss_desc->queue_num = 1;
9035                         rss_desc->queue[0] = queue->index;
9036                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9037                                                     rss_desc, &hrxq_idx);
9038                         if (!hrxq)
9039                                 return rte_flow_error_set
9040                                         (error, rte_errno,
9041                                          RTE_FLOW_ERROR_TYPE_ACTION,
9042                                          NULL,
9043                                          "cannot create fate queue");
9044                         sample_act->dr_queue_action = hrxq->action;
9045                         sample_idx->rix_hrxq = hrxq_idx;
9046                         sample_actions[sample_act->actions_num++] =
9047                                                 hrxq->action;
9048                         (*num_of_dest)++;
9049                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9050                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9051                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9052                         dev_flow->handle->fate_action =
9053                                         MLX5_FLOW_FATE_QUEUE;
9054                         break;
9055                 }
9056                 case RTE_FLOW_ACTION_TYPE_MARK:
9057                 {
9058                         uint32_t tag_be = mlx5_flow_mark_set
9059                                 (((const struct rte_flow_action_mark *)
9060                                 (sub_actions->conf))->id);
9061
9062                         dev_flow->handle->mark = 1;
9063                         pre_rix = dev_flow->handle->dvh.rix_tag;
9064                         /* Save the mark resource before sample */
9065                         pre_r = dev_flow->dv.tag_resource;
9066                         if (flow_dv_tag_resource_register(dev, tag_be,
9067                                                   dev_flow, error))
9068                                 return -rte_errno;
9069                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9070                         sample_act->dr_tag_action =
9071                                 dev_flow->dv.tag_resource->action;
9072                         sample_idx->rix_tag =
9073                                 dev_flow->handle->dvh.rix_tag;
9074                         sample_actions[sample_act->actions_num++] =
9075                                                 sample_act->dr_tag_action;
9076                         /* Recover the mark resource after sample */
9077                         dev_flow->dv.tag_resource = pre_r;
9078                         dev_flow->handle->dvh.rix_tag = pre_rix;
9079                         action_flags |= MLX5_FLOW_ACTION_MARK;
9080                         break;
9081                 }
9082                 case RTE_FLOW_ACTION_TYPE_COUNT:
9083                 {
9084                         uint32_t counter;
9085
9086                         counter = flow_dv_translate_create_counter(dev,
9087                                         dev_flow, sub_actions->conf, 0);
9088                         if (!counter)
9089                                 return rte_flow_error_set
9090                                                 (error, rte_errno,
9091                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9092                                                  NULL,
9093                                                  "cannot create counter"
9094                                                  " object.");
9095                         sample_idx->cnt = counter;
9096                         sample_act->dr_cnt_action =
9097                                   (flow_dv_counter_get_by_idx(dev,
9098                                   counter, NULL))->action;
9099                         sample_actions[sample_act->actions_num++] =
9100                                                 sample_act->dr_cnt_action;
9101                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9102                         break;
9103                 }
9104                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9105                 {
9106                         struct mlx5_flow_dv_port_id_action_resource
9107                                         port_id_resource;
9108                         uint32_t port_id = 0;
9109
9110                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9111                         /* Save the port id resource before sample */
9112                         pre_rix = dev_flow->handle->rix_port_id_action;
9113                         pre_r = dev_flow->dv.port_id_action;
9114                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9115                                                              &port_id, error))
9116                                 return -rte_errno;
9117                         port_id_resource.port_id = port_id;
9118                         if (flow_dv_port_id_action_resource_register
9119                             (dev, &port_id_resource, dev_flow, error))
9120                                 return -rte_errno;
9121                         sample_act->dr_port_id_action =
9122                                 dev_flow->dv.port_id_action->action;
9123                         sample_idx->rix_port_id_action =
9124                                 dev_flow->handle->rix_port_id_action;
9125                         sample_actions[sample_act->actions_num++] =
9126                                                 sample_act->dr_port_id_action;
9127                         /* Recover the port id resource after sample */
9128                         dev_flow->dv.port_id_action = pre_r;
9129                         dev_flow->handle->rix_port_id_action = pre_rix;
9130                         (*num_of_dest)++;
9131                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9132                         break;
9133                 }
9134                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9135                         /* Save the encap resource before sample */
9136                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9137                         pre_r = dev_flow->dv.encap_decap;
9138                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9139                                                            dev_flow,
9140                                                            attr->transfer,
9141                                                            error))
9142                                 return -rte_errno;
9143                         sample_act->dr_encap_action =
9144                                 dev_flow->dv.encap_decap->action;
9145                         sample_idx->rix_encap_decap =
9146                                 dev_flow->handle->dvh.rix_encap_decap;
9147                         sample_actions[sample_act->actions_num++] =
9148                                                 sample_act->dr_encap_action;
9149                         /* Recover the encap resource after sample */
9150                         dev_flow->dv.encap_decap = pre_r;
9151                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9152                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9153                         break;
9154                 default:
9155                         return rte_flow_error_set(error, EINVAL,
9156                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9157                                 NULL,
9158                                 "Not support for sampler action");
9159                 }
9160         }
9161         sample_act->action_flags = action_flags;
9162         res->ft_id = dev_flow->dv.group;
9163         if (attr->transfer) {
9164                 union {
9165                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9166                         uint64_t set_action;
9167                 } action_ctx = { .set_action = 0 };
9168
9169                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9170                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9171                          MLX5_MODIFICATION_TYPE_SET);
9172                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9173                          MLX5_MODI_META_REG_C_0);
9174                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9175                          priv->vport_meta_tag);
9176                 res->set_action = action_ctx.set_action;
9177         } else if (attr->ingress) {
9178                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9179         } else {
9180                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9181         }
9182         return 0;
9183 }
9184
9185 /**
9186  * Convert Sample action to DV specification.
9187  *
9188  * @param[in] dev
9189  *   Pointer to rte_eth_dev structure.
9190  * @param[in, out] dev_flow
9191  *   Pointer to the mlx5_flow.
9192  * @param[in] num_of_dest
9193  *   The num of destination.
9194  * @param[in, out] res
9195  *   Pointer to sample resource.
9196  * @param[in, out] mdest_res
9197  *   Pointer to destination array resource.
9198  * @param[in] sample_actions
9199  *   Pointer to sample path actions list.
9200  * @param[in] action_flags
9201  *   Holds the actions detected until now.
9202  * @param[out] error
9203  *   Pointer to the error structure.
9204  *
9205  * @return
9206  *   0 on success, a negative errno value otherwise and rte_errno is set.
9207  */
9208 static int
9209 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9210                              struct mlx5_flow *dev_flow,
9211                              uint32_t num_of_dest,
9212                              struct mlx5_flow_dv_sample_resource *res,
9213                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9214                              void **sample_actions,
9215                              uint64_t action_flags,
9216                              struct rte_flow_error *error)
9217 {
9218         /* update normal path action resource into last index of array */
9219         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9220         struct mlx5_flow_sub_actions_list *sample_act =
9221                                         &mdest_res->sample_act[dest_index];
9222         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9223         struct mlx5_flow_rss_desc *rss_desc;
9224         uint32_t normal_idx = 0;
9225         struct mlx5_hrxq *hrxq;
9226         uint32_t hrxq_idx;
9227
9228         MLX5_ASSERT(wks);
9229         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9230         if (num_of_dest > 1) {
9231                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9232                         /* Handle QP action for mirroring */
9233                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9234                                                     rss_desc, &hrxq_idx);
9235                         if (!hrxq)
9236                                 return rte_flow_error_set
9237                                      (error, rte_errno,
9238                                       RTE_FLOW_ERROR_TYPE_ACTION,
9239                                       NULL,
9240                                       "cannot create rx queue");
9241                         normal_idx++;
9242                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9243                         sample_act->dr_queue_action = hrxq->action;
9244                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9245                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9246                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9247                 }
9248                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9249                         normal_idx++;
9250                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9251                                 dev_flow->handle->dvh.rix_encap_decap;
9252                         sample_act->dr_encap_action =
9253                                 dev_flow->dv.encap_decap->action;
9254                 }
9255                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9256                         normal_idx++;
9257                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9258                                 dev_flow->handle->rix_port_id_action;
9259                         sample_act->dr_port_id_action =
9260                                 dev_flow->dv.port_id_action->action;
9261                 }
9262                 sample_act->actions_num = normal_idx;
9263                 /* update sample action resource into first index of array */
9264                 mdest_res->ft_type = res->ft_type;
9265                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9266                                 sizeof(struct mlx5_flow_sub_actions_idx));
9267                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9268                                 sizeof(struct mlx5_flow_sub_actions_list));
9269                 mdest_res->num_of_dest = num_of_dest;
9270                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9271                                                          dev_flow, error))
9272                         return rte_flow_error_set(error, EINVAL,
9273                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9274                                                   NULL, "can't create sample "
9275                                                   "action");
9276         } else {
9277                 res->sub_actions = sample_actions;
9278                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9279                         return rte_flow_error_set(error, EINVAL,
9280                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9281                                                   NULL,
9282                                                   "can't create sample action");
9283         }
9284         return 0;
9285 }
9286
9287 /**
9288  * Fill the flow with DV spec, lock free
9289  * (mutex should be acquired by caller).
9290  *
9291  * @param[in] dev
9292  *   Pointer to rte_eth_dev structure.
9293  * @param[in, out] dev_flow
9294  *   Pointer to the sub flow.
9295  * @param[in] attr
9296  *   Pointer to the flow attributes.
9297  * @param[in] items
9298  *   Pointer to the list of items.
9299  * @param[in] actions
9300  *   Pointer to the list of actions.
9301  * @param[out] error
9302  *   Pointer to the error structure.
9303  *
9304  * @return
9305  *   0 on success, a negative errno value otherwise and rte_errno is set.
9306  */
9307 static int
9308 __flow_dv_translate(struct rte_eth_dev *dev,
9309                     struct mlx5_flow *dev_flow,
9310                     const struct rte_flow_attr *attr,
9311                     const struct rte_flow_item items[],
9312                     const struct rte_flow_action actions[],
9313                     struct rte_flow_error *error)
9314 {
9315         struct mlx5_priv *priv = dev->data->dev_private;
9316         struct mlx5_dev_config *dev_conf = &priv->config;
9317         struct rte_flow *flow = dev_flow->flow;
9318         struct mlx5_flow_handle *handle = dev_flow->handle;
9319         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9320         struct mlx5_flow_rss_desc *rss_desc;
9321         uint64_t item_flags = 0;
9322         uint64_t last_item = 0;
9323         uint64_t action_flags = 0;
9324         uint64_t priority = attr->priority;
9325         struct mlx5_flow_dv_matcher matcher = {
9326                 .mask = {
9327                         .size = sizeof(matcher.mask.buf) -
9328                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9329                 },
9330         };
9331         int actions_n = 0;
9332         bool actions_end = false;
9333         union {
9334                 struct mlx5_flow_dv_modify_hdr_resource res;
9335                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9336                             sizeof(struct mlx5_modification_cmd) *
9337                             (MLX5_MAX_MODIFY_NUM + 1)];
9338         } mhdr_dummy;
9339         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9340         const struct rte_flow_action_count *count = NULL;
9341         const struct rte_flow_action_age *age = NULL;
9342         union flow_dv_attr flow_attr = { .attr = 0 };
9343         uint32_t tag_be;
9344         union mlx5_flow_tbl_key tbl_key;
9345         uint32_t modify_action_position = UINT32_MAX;
9346         void *match_mask = matcher.mask.buf;
9347         void *match_value = dev_flow->dv.value.buf;
9348         uint8_t next_protocol = 0xff;
9349         struct rte_vlan_hdr vlan = { 0 };
9350         struct mlx5_flow_dv_dest_array_resource mdest_res;
9351         struct mlx5_flow_dv_sample_resource sample_res;
9352         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9353         struct mlx5_flow_sub_actions_list *sample_act;
9354         uint32_t sample_act_pos = UINT32_MAX;
9355         uint32_t num_of_dest = 0;
9356         int tmp_actions_n = 0;
9357         uint32_t table;
9358         int ret = 0;
9359         const struct mlx5_flow_tunnel *tunnel;
9360         struct flow_grp_info grp_info = {
9361                 .external = !!dev_flow->external,
9362                 .transfer = !!attr->transfer,
9363                 .fdb_def_rule = !!priv->fdb_def_rule,
9364         };
9365
9366         MLX5_ASSERT(wks);
9367         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9368         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9369         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9370         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9371                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9372         /* update normal path action resource into last index of array */
9373         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9374         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9375                  flow_items_to_tunnel(items) :
9376                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9377                  flow_actions_to_tunnel(actions) :
9378                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9379         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9380                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9381         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9382                                 (dev, tunnel, attr, items, actions);
9383         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9384                                        grp_info, error);
9385         if (ret)
9386                 return ret;
9387         dev_flow->dv.group = table;
9388         if (attr->transfer)
9389                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9390         if (priority == MLX5_FLOW_PRIO_RSVD)
9391                 priority = dev_conf->flow_prio - 1;
9392         /* number of actions must be set to 0 in case of dirty stack. */
9393         mhdr_res->actions_num = 0;
9394         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9395                 /*
9396                  * do not add decap action if match rule drops packet
9397                  * HW rejects rules with decap & drop
9398                  */
9399                 bool add_decap = true;
9400                 const struct rte_flow_action *ptr = actions;
9401                 struct mlx5_flow_tbl_resource *tbl;
9402
9403                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9404                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9405                                 add_decap = false;
9406                                 break;
9407                         }
9408                 }
9409                 if (add_decap) {
9410                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9411                                                            attr->transfer,
9412                                                            error))
9413                                 return -rte_errno;
9414                         dev_flow->dv.actions[actions_n++] =
9415                                         dev_flow->dv.encap_decap->action;
9416                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9417                 }
9418                 /*
9419                  * bind table_id with <group, table> for tunnel match rule.
9420                  * Tunnel set rule establishes that bind in JUMP action handler.
9421                  * Required for scenario when application creates tunnel match
9422                  * rule before tunnel set rule.
9423                  */
9424                 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9425                                                attr->transfer,
9426                                                !!dev_flow->external, tunnel,
9427                                                attr->group, 0, error);
9428                 if (!tbl)
9429                         return rte_flow_error_set
9430                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9431                                actions, "cannot register tunnel group");
9432         }
9433         for (; !actions_end ; actions++) {
9434                 const struct rte_flow_action_queue *queue;
9435                 const struct rte_flow_action_rss *rss;
9436                 const struct rte_flow_action *action = actions;
9437                 const uint8_t *rss_key;
9438                 const struct rte_flow_action_meter *mtr;
9439                 struct mlx5_flow_tbl_resource *tbl;
9440                 uint32_t port_id = 0;
9441                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9442                 int action_type = actions->type;
9443                 const struct rte_flow_action *found_action = NULL;
9444                 struct mlx5_flow_meter *fm = NULL;
9445                 uint32_t jump_group = 0;
9446
9447                 if (!mlx5_flow_os_action_supported(action_type))
9448                         return rte_flow_error_set(error, ENOTSUP,
9449                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9450                                                   actions,
9451                                                   "action not supported");
9452                 switch (action_type) {
9453                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9454                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9455                         break;
9456                 case RTE_FLOW_ACTION_TYPE_VOID:
9457                         break;
9458                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9459                         if (flow_dv_translate_action_port_id(dev, action,
9460                                                              &port_id, error))
9461                                 return -rte_errno;
9462                         port_id_resource.port_id = port_id;
9463                         MLX5_ASSERT(!handle->rix_port_id_action);
9464                         if (flow_dv_port_id_action_resource_register
9465                             (dev, &port_id_resource, dev_flow, error))
9466                                 return -rte_errno;
9467                         dev_flow->dv.actions[actions_n++] =
9468                                         dev_flow->dv.port_id_action->action;
9469                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9470                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9471                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9472                         num_of_dest++;
9473                         break;
9474                 case RTE_FLOW_ACTION_TYPE_FLAG:
9475                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9476                         dev_flow->handle->mark = 1;
9477                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9478                                 struct rte_flow_action_mark mark = {
9479                                         .id = MLX5_FLOW_MARK_DEFAULT,
9480                                 };
9481
9482                                 if (flow_dv_convert_action_mark(dev, &mark,
9483                                                                 mhdr_res,
9484                                                                 error))
9485                                         return -rte_errno;
9486                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9487                                 break;
9488                         }
9489                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9490                         /*
9491                          * Only one FLAG or MARK is supported per device flow
9492                          * right now. So the pointer to the tag resource must be
9493                          * zero before the register process.
9494                          */
9495                         MLX5_ASSERT(!handle->dvh.rix_tag);
9496                         if (flow_dv_tag_resource_register(dev, tag_be,
9497                                                           dev_flow, error))
9498                                 return -rte_errno;
9499                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9500                         dev_flow->dv.actions[actions_n++] =
9501                                         dev_flow->dv.tag_resource->action;
9502                         break;
9503                 case RTE_FLOW_ACTION_TYPE_MARK:
9504                         action_flags |= MLX5_FLOW_ACTION_MARK;
9505                         dev_flow->handle->mark = 1;
9506                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9507                                 const struct rte_flow_action_mark *mark =
9508                                         (const struct rte_flow_action_mark *)
9509                                                 actions->conf;
9510
9511                                 if (flow_dv_convert_action_mark(dev, mark,
9512                                                                 mhdr_res,
9513                                                                 error))
9514                                         return -rte_errno;
9515                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9516                                 break;
9517                         }
9518                         /* Fall-through */
9519                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9520                         /* Legacy (non-extensive) MARK action. */
9521                         tag_be = mlx5_flow_mark_set
9522                               (((const struct rte_flow_action_mark *)
9523                                (actions->conf))->id);
9524                         MLX5_ASSERT(!handle->dvh.rix_tag);
9525                         if (flow_dv_tag_resource_register(dev, tag_be,
9526                                                           dev_flow, error))
9527                                 return -rte_errno;
9528                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9529                         dev_flow->dv.actions[actions_n++] =
9530                                         dev_flow->dv.tag_resource->action;
9531                         break;
9532                 case RTE_FLOW_ACTION_TYPE_SET_META:
9533                         if (flow_dv_convert_action_set_meta
9534                                 (dev, mhdr_res, attr,
9535                                  (const struct rte_flow_action_set_meta *)
9536                                   actions->conf, error))
9537                                 return -rte_errno;
9538                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9539                         break;
9540                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9541                         if (flow_dv_convert_action_set_tag
9542                                 (dev, mhdr_res,
9543                                  (const struct rte_flow_action_set_tag *)
9544                                   actions->conf, error))
9545                                 return -rte_errno;
9546                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9547                         break;
9548                 case RTE_FLOW_ACTION_TYPE_DROP:
9549                         action_flags |= MLX5_FLOW_ACTION_DROP;
9550                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9551                         break;
9552                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9553                         queue = actions->conf;
9554                         rss_desc->queue_num = 1;
9555                         rss_desc->queue[0] = queue->index;
9556                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9557                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9558                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9559                         num_of_dest++;
9560                         break;
9561                 case RTE_FLOW_ACTION_TYPE_RSS:
9562                         rss = actions->conf;
9563                         memcpy(rss_desc->queue, rss->queue,
9564                                rss->queue_num * sizeof(uint16_t));
9565                         rss_desc->queue_num = rss->queue_num;
9566                         /* NULL RSS key indicates default RSS key. */
9567                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9568                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9569                         /*
9570                          * rss->level and rss.types should be set in advance
9571                          * when expanding items for RSS.
9572                          */
9573                         action_flags |= MLX5_FLOW_ACTION_RSS;
9574                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9575                         break;
9576                 case RTE_FLOW_ACTION_TYPE_AGE:
9577                 case RTE_FLOW_ACTION_TYPE_COUNT:
9578                         if (!dev_conf->devx) {
9579                                 return rte_flow_error_set
9580                                               (error, ENOTSUP,
9581                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9582                                                NULL,
9583                                                "count action not supported");
9584                         }
9585                         /* Save information first, will apply later. */
9586                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9587                                 count = action->conf;
9588                         else
9589                                 age = action->conf;
9590                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9591                         break;
9592                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9593                         dev_flow->dv.actions[actions_n++] =
9594                                                 priv->sh->pop_vlan_action;
9595                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9596                         break;
9597                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9598                         if (!(action_flags &
9599                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9600                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9601                         vlan.eth_proto = rte_be_to_cpu_16
9602                              ((((const struct rte_flow_action_of_push_vlan *)
9603                                                    actions->conf)->ethertype));
9604                         found_action = mlx5_flow_find_action
9605                                         (actions + 1,
9606                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9607                         if (found_action)
9608                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9609                         found_action = mlx5_flow_find_action
9610                                         (actions + 1,
9611                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9612                         if (found_action)
9613                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9614                         if (flow_dv_create_action_push_vlan
9615                                             (dev, attr, &vlan, dev_flow, error))
9616                                 return -rte_errno;
9617                         dev_flow->dv.actions[actions_n++] =
9618                                         dev_flow->dv.push_vlan_res->action;
9619                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9620                         break;
9621                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9622                         /* of_vlan_push action handled this action */
9623                         MLX5_ASSERT(action_flags &
9624                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9625                         break;
9626                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9627                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9628                                 break;
9629                         flow_dev_get_vlan_info_from_items(items, &vlan);
9630                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9631                         /* If no VLAN push - this is a modify header action */
9632                         if (flow_dv_convert_action_modify_vlan_vid
9633                                                 (mhdr_res, actions, error))
9634                                 return -rte_errno;
9635                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9636                         break;
9637                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9638                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9639                         if (flow_dv_create_action_l2_encap(dev, actions,
9640                                                            dev_flow,
9641                                                            attr->transfer,
9642                                                            error))
9643                                 return -rte_errno;
9644                         dev_flow->dv.actions[actions_n++] =
9645                                         dev_flow->dv.encap_decap->action;
9646                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9647                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9648                                 sample_act->action_flags |=
9649                                                         MLX5_FLOW_ACTION_ENCAP;
9650                         break;
9651                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9652                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9653                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9654                                                            attr->transfer,
9655                                                            error))
9656                                 return -rte_errno;
9657                         dev_flow->dv.actions[actions_n++] =
9658                                         dev_flow->dv.encap_decap->action;
9659                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9660                         break;
9661                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9662                         /* Handle encap with preceding decap. */
9663                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9664                                 if (flow_dv_create_action_raw_encap
9665                                         (dev, actions, dev_flow, attr, error))
9666                                         return -rte_errno;
9667                                 dev_flow->dv.actions[actions_n++] =
9668                                         dev_flow->dv.encap_decap->action;
9669                         } else {
9670                                 /* Handle encap without preceding decap. */
9671                                 if (flow_dv_create_action_l2_encap
9672                                     (dev, actions, dev_flow, attr->transfer,
9673                                      error))
9674                                         return -rte_errno;
9675                                 dev_flow->dv.actions[actions_n++] =
9676                                         dev_flow->dv.encap_decap->action;
9677                         }
9678                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9679                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9680                                 sample_act->action_flags |=
9681                                                         MLX5_FLOW_ACTION_ENCAP;
9682                         break;
9683                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9684                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9685                                 ;
9686                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9687                                 if (flow_dv_create_action_l2_decap
9688                                     (dev, dev_flow, attr->transfer, error))
9689                                         return -rte_errno;
9690                                 dev_flow->dv.actions[actions_n++] =
9691                                         dev_flow->dv.encap_decap->action;
9692                         }
9693                         /* If decap is followed by encap, handle it at encap. */
9694                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9695                         break;
9696                 case RTE_FLOW_ACTION_TYPE_JUMP:
9697                         jump_group = ((const struct rte_flow_action_jump *)
9698                                                         action->conf)->group;
9699                         grp_info.std_tbl_fix = 0;
9700                         ret = mlx5_flow_group_to_table(dev, tunnel,
9701                                                        jump_group,
9702                                                        &table,
9703                                                        grp_info, error);
9704                         if (ret)
9705                                 return ret;
9706                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9707                                                        attr->transfer,
9708                                                        !!dev_flow->external,
9709                                                        tunnel, jump_group, 0,
9710                                                        error);
9711                         if (!tbl)
9712                                 return rte_flow_error_set
9713                                                 (error, errno,
9714                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9715                                                  NULL,
9716                                                  "cannot create jump action.");
9717                         if (flow_dv_jump_tbl_resource_register
9718                             (dev, tbl, dev_flow, error)) {
9719                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9720                                 return rte_flow_error_set
9721                                                 (error, errno,
9722                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9723                                                  NULL,
9724                                                  "cannot create jump action.");
9725                         }
9726                         dev_flow->dv.actions[actions_n++] =
9727                                         dev_flow->dv.jump->action;
9728                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9729                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9730                         break;
9731                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9732                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9733                         if (flow_dv_convert_action_modify_mac
9734                                         (mhdr_res, actions, error))
9735                                 return -rte_errno;
9736                         action_flags |= actions->type ==
9737                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9738                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
9739                                         MLX5_FLOW_ACTION_SET_MAC_DST;
9740                         break;
9741                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9742                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9743                         if (flow_dv_convert_action_modify_ipv4
9744                                         (mhdr_res, actions, error))
9745                                 return -rte_errno;
9746                         action_flags |= actions->type ==
9747                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9748                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
9749                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
9750                         break;
9751                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9752                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9753                         if (flow_dv_convert_action_modify_ipv6
9754                                         (mhdr_res, actions, error))
9755                                 return -rte_errno;
9756                         action_flags |= actions->type ==
9757                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9758                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
9759                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
9760                         break;
9761                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9762                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9763                         if (flow_dv_convert_action_modify_tp
9764                                         (mhdr_res, actions, items,
9765                                          &flow_attr, dev_flow, !!(action_flags &
9766                                          MLX5_FLOW_ACTION_DECAP), error))
9767                                 return -rte_errno;
9768                         action_flags |= actions->type ==
9769                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9770                                         MLX5_FLOW_ACTION_SET_TP_SRC :
9771                                         MLX5_FLOW_ACTION_SET_TP_DST;
9772                         break;
9773                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9774                         if (flow_dv_convert_action_modify_dec_ttl
9775                                         (mhdr_res, items, &flow_attr, dev_flow,
9776                                          !!(action_flags &
9777                                          MLX5_FLOW_ACTION_DECAP), error))
9778                                 return -rte_errno;
9779                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9780                         break;
9781                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9782                         if (flow_dv_convert_action_modify_ttl
9783                                         (mhdr_res, actions, items, &flow_attr,
9784                                          dev_flow, !!(action_flags &
9785                                          MLX5_FLOW_ACTION_DECAP), error))
9786                                 return -rte_errno;
9787                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9788                         break;
9789                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9790                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9791                         if (flow_dv_convert_action_modify_tcp_seq
9792                                         (mhdr_res, actions, error))
9793                                 return -rte_errno;
9794                         action_flags |= actions->type ==
9795                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9796                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
9797                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9798                         break;
9799
9800                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9801                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9802                         if (flow_dv_convert_action_modify_tcp_ack
9803                                         (mhdr_res, actions, error))
9804                                 return -rte_errno;
9805                         action_flags |= actions->type ==
9806                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9807                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
9808                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
9809                         break;
9810                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9811                         if (flow_dv_convert_action_set_reg
9812                                         (mhdr_res, actions, error))
9813                                 return -rte_errno;
9814                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9815                         break;
9816                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9817                         if (flow_dv_convert_action_copy_mreg
9818                                         (dev, mhdr_res, actions, error))
9819                                 return -rte_errno;
9820                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9821                         break;
9822                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9823                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9824                         dev_flow->handle->fate_action =
9825                                         MLX5_FLOW_FATE_DEFAULT_MISS;
9826                         break;
9827                 case RTE_FLOW_ACTION_TYPE_METER:
9828                         mtr = actions->conf;
9829                         if (!flow->meter) {
9830                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9831                                                             attr, error);
9832                                 if (!fm)
9833                                         return rte_flow_error_set(error,
9834                                                 rte_errno,
9835                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9836                                                 NULL,
9837                                                 "meter not found "
9838                                                 "or invalid parameters");
9839                                 flow->meter = fm->idx;
9840                         }
9841                         /* Set the meter action. */
9842                         if (!fm) {
9843                                 fm = mlx5_ipool_get(priv->sh->ipool
9844                                                 [MLX5_IPOOL_MTR], flow->meter);
9845                                 if (!fm)
9846                                         return rte_flow_error_set(error,
9847                                                 rte_errno,
9848                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9849                                                 NULL,
9850                                                 "meter not found "
9851                                                 "or invalid parameters");
9852                         }
9853                         dev_flow->dv.actions[actions_n++] =
9854                                 fm->mfts->meter_action;
9855                         action_flags |= MLX5_FLOW_ACTION_METER;
9856                         break;
9857                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9858                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9859                                                               actions, error))
9860                                 return -rte_errno;
9861                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9862                         break;
9863                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9864                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9865                                                               actions, error))
9866                                 return -rte_errno;
9867                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9868                         break;
9869                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9870                         sample_act_pos = actions_n;
9871                         ret = flow_dv_translate_action_sample(dev,
9872                                                               actions,
9873                                                               dev_flow, attr,
9874                                                               &num_of_dest,
9875                                                               sample_actions,
9876                                                               &sample_res,
9877                                                               error);
9878                         if (ret < 0)
9879                                 return ret;
9880                         actions_n++;
9881                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9882                         /* put encap action into group if work with port id */
9883                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9884                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9885                                 sample_act->action_flags |=
9886                                                         MLX5_FLOW_ACTION_ENCAP;
9887                         break;
9888                 case RTE_FLOW_ACTION_TYPE_END:
9889                         actions_end = true;
9890                         if (mhdr_res->actions_num) {
9891                                 /* create modify action if needed. */
9892                                 if (flow_dv_modify_hdr_resource_register
9893                                         (dev, mhdr_res, dev_flow, error))
9894                                         return -rte_errno;
9895                                 dev_flow->dv.actions[modify_action_position] =
9896                                         handle->dvh.modify_hdr->action;
9897                         }
9898                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9899                                 flow->counter =
9900                                         flow_dv_translate_create_counter(dev,
9901                                                 dev_flow, count, age);
9902
9903                                 if (!flow->counter)
9904                                         return rte_flow_error_set
9905                                                 (error, rte_errno,
9906                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9907                                                 NULL,
9908                                                 "cannot create counter"
9909                                                 " object.");
9910                                 dev_flow->dv.actions[actions_n] =
9911                                           (flow_dv_counter_get_by_idx(dev,
9912                                           flow->counter, NULL))->action;
9913                                 actions_n++;
9914                         }
9915                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9916                                 ret = flow_dv_create_action_sample(dev,
9917                                                           dev_flow,
9918                                                           num_of_dest,
9919                                                           &sample_res,
9920                                                           &mdest_res,
9921                                                           sample_actions,
9922                                                           action_flags,
9923                                                           error);
9924                                 if (ret < 0)
9925                                         return rte_flow_error_set
9926                                                 (error, rte_errno,
9927                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9928                                                 NULL,
9929                                                 "cannot create sample action");
9930                                 if (num_of_dest > 1) {
9931                                         dev_flow->dv.actions[sample_act_pos] =
9932                                         dev_flow->dv.dest_array_res->action;
9933                                 } else {
9934                                         dev_flow->dv.actions[sample_act_pos] =
9935                                         dev_flow->dv.sample_res->verbs_action;
9936                                 }
9937                         }
9938                         break;
9939                 default:
9940                         break;
9941                 }
9942                 if (mhdr_res->actions_num &&
9943                     modify_action_position == UINT32_MAX)
9944                         modify_action_position = actions_n++;
9945         }
9946         /*
9947          * For multiple destination (sample action with ratio=1), the encap
9948          * action and port id action will be combined into group action.
9949          * So need remove the original these actions in the flow and only
9950          * use the sample action instead of.
9951          */
9952         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9953                 int i;
9954                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9955
9956                 for (i = 0; i < actions_n; i++) {
9957                         if ((sample_act->dr_encap_action &&
9958                                 sample_act->dr_encap_action ==
9959                                 dev_flow->dv.actions[i]) ||
9960                                 (sample_act->dr_port_id_action &&
9961                                 sample_act->dr_port_id_action ==
9962                                 dev_flow->dv.actions[i]))
9963                                 continue;
9964                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9965                 }
9966                 memcpy((void *)dev_flow->dv.actions,
9967                                 (void *)temp_actions,
9968                                 tmp_actions_n * sizeof(void *));
9969                 actions_n = tmp_actions_n;
9970         }
9971         dev_flow->dv.actions_n = actions_n;
9972         dev_flow->act_flags = action_flags;
9973         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9974                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9975                 int item_type = items->type;
9976
9977                 if (!mlx5_flow_os_item_supported(item_type))
9978                         return rte_flow_error_set(error, ENOTSUP,
9979                                                   RTE_FLOW_ERROR_TYPE_ITEM,
9980                                                   NULL, "item not supported");
9981                 switch (item_type) {
9982                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9983                         flow_dv_translate_item_port_id(dev, match_mask,
9984                                                        match_value, items);
9985                         last_item = MLX5_FLOW_ITEM_PORT_ID;
9986                         break;
9987                 case RTE_FLOW_ITEM_TYPE_ETH:
9988                         flow_dv_translate_item_eth(match_mask, match_value,
9989                                                    items, tunnel,
9990                                                    dev_flow->dv.group);
9991                         matcher.priority = action_flags &
9992                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
9993                                         !dev_flow->external ?
9994                                         MLX5_PRIORITY_MAP_L3 :
9995                                         MLX5_PRIORITY_MAP_L2;
9996                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9997                                              MLX5_FLOW_LAYER_OUTER_L2;
9998                         break;
9999                 case RTE_FLOW_ITEM_TYPE_VLAN:
10000                         flow_dv_translate_item_vlan(dev_flow,
10001                                                     match_mask, match_value,
10002                                                     items, tunnel,
10003                                                     dev_flow->dv.group);
10004                         matcher.priority = MLX5_PRIORITY_MAP_L2;
10005                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10006                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10007                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10008                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10009                         break;
10010                 case RTE_FLOW_ITEM_TYPE_IPV4:
10011                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10012                                                   &item_flags, &tunnel);
10013                         flow_dv_translate_item_ipv4(match_mask, match_value,
10014                                                     items, tunnel,
10015                                                     dev_flow->dv.group);
10016                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10017                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10018                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10019                         if (items->mask != NULL &&
10020                             ((const struct rte_flow_item_ipv4 *)
10021                              items->mask)->hdr.next_proto_id) {
10022                                 next_protocol =
10023                                         ((const struct rte_flow_item_ipv4 *)
10024                                          (items->spec))->hdr.next_proto_id;
10025                                 next_protocol &=
10026                                         ((const struct rte_flow_item_ipv4 *)
10027                                          (items->mask))->hdr.next_proto_id;
10028                         } else {
10029                                 /* Reset for inner layer. */
10030                                 next_protocol = 0xff;
10031                         }
10032                         break;
10033                 case RTE_FLOW_ITEM_TYPE_IPV6:
10034                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10035                                                   &item_flags, &tunnel);
10036                         flow_dv_translate_item_ipv6(match_mask, match_value,
10037                                                     items, tunnel,
10038                                                     dev_flow->dv.group);
10039                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10040                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10041                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10042                         if (items->mask != NULL &&
10043                             ((const struct rte_flow_item_ipv6 *)
10044                              items->mask)->hdr.proto) {
10045                                 next_protocol =
10046                                         ((const struct rte_flow_item_ipv6 *)
10047                                          items->spec)->hdr.proto;
10048                                 next_protocol &=
10049                                         ((const struct rte_flow_item_ipv6 *)
10050                                          items->mask)->hdr.proto;
10051                         } else {
10052                                 /* Reset for inner layer. */
10053                                 next_protocol = 0xff;
10054                         }
10055                         break;
10056                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10057                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10058                                                              match_value,
10059                                                              items, tunnel);
10060                         last_item = tunnel ?
10061                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10062                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10063                         if (items->mask != NULL &&
10064                             ((const struct rte_flow_item_ipv6_frag_ext *)
10065                              items->mask)->hdr.next_header) {
10066                                 next_protocol =
10067                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10068                                  items->spec)->hdr.next_header;
10069                                 next_protocol &=
10070                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10071                                  items->mask)->hdr.next_header;
10072                         } else {
10073                                 /* Reset for inner layer. */
10074                                 next_protocol = 0xff;
10075                         }
10076                         break;
10077                 case RTE_FLOW_ITEM_TYPE_TCP:
10078                         flow_dv_translate_item_tcp(match_mask, match_value,
10079                                                    items, tunnel);
10080                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10081                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10082                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10083                         break;
10084                 case RTE_FLOW_ITEM_TYPE_UDP:
10085                         flow_dv_translate_item_udp(match_mask, match_value,
10086                                                    items, tunnel);
10087                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10088                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10089                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10090                         break;
10091                 case RTE_FLOW_ITEM_TYPE_GRE:
10092                         flow_dv_translate_item_gre(match_mask, match_value,
10093                                                    items, tunnel);
10094                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10095                         last_item = MLX5_FLOW_LAYER_GRE;
10096                         break;
10097                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10098                         flow_dv_translate_item_gre_key(match_mask,
10099                                                        match_value, items);
10100                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10101                         break;
10102                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10103                         flow_dv_translate_item_nvgre(match_mask, match_value,
10104                                                      items, tunnel);
10105                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10106                         last_item = MLX5_FLOW_LAYER_GRE;
10107                         break;
10108                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10109                         flow_dv_translate_item_vxlan(match_mask, match_value,
10110                                                      items, tunnel);
10111                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10112                         last_item = MLX5_FLOW_LAYER_VXLAN;
10113                         break;
10114                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10115                         flow_dv_translate_item_vxlan_gpe(match_mask,
10116                                                          match_value, items,
10117                                                          tunnel);
10118                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10119                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10120                         break;
10121                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10122                         flow_dv_translate_item_geneve(match_mask, match_value,
10123                                                       items, tunnel);
10124                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10125                         last_item = MLX5_FLOW_LAYER_GENEVE;
10126                         break;
10127                 case RTE_FLOW_ITEM_TYPE_MPLS:
10128                         flow_dv_translate_item_mpls(match_mask, match_value,
10129                                                     items, last_item, tunnel);
10130                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10131                         last_item = MLX5_FLOW_LAYER_MPLS;
10132                         break;
10133                 case RTE_FLOW_ITEM_TYPE_MARK:
10134                         flow_dv_translate_item_mark(dev, match_mask,
10135                                                     match_value, items);
10136                         last_item = MLX5_FLOW_ITEM_MARK;
10137                         break;
10138                 case RTE_FLOW_ITEM_TYPE_META:
10139                         flow_dv_translate_item_meta(dev, match_mask,
10140                                                     match_value, attr, items);
10141                         last_item = MLX5_FLOW_ITEM_METADATA;
10142                         break;
10143                 case RTE_FLOW_ITEM_TYPE_ICMP:
10144                         flow_dv_translate_item_icmp(match_mask, match_value,
10145                                                     items, tunnel);
10146                         last_item = MLX5_FLOW_LAYER_ICMP;
10147                         break;
10148                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10149                         flow_dv_translate_item_icmp6(match_mask, match_value,
10150                                                       items, tunnel);
10151                         last_item = MLX5_FLOW_LAYER_ICMP6;
10152                         break;
10153                 case RTE_FLOW_ITEM_TYPE_TAG:
10154                         flow_dv_translate_item_tag(dev, match_mask,
10155                                                    match_value, items);
10156                         last_item = MLX5_FLOW_ITEM_TAG;
10157                         break;
10158                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10159                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10160                                                         match_value, items);
10161                         last_item = MLX5_FLOW_ITEM_TAG;
10162                         break;
10163                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10164                         flow_dv_translate_item_tx_queue(dev, match_mask,
10165                                                         match_value,
10166                                                         items);
10167                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10168                         break;
10169                 case RTE_FLOW_ITEM_TYPE_GTP:
10170                         flow_dv_translate_item_gtp(match_mask, match_value,
10171                                                    items, tunnel);
10172                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10173                         last_item = MLX5_FLOW_LAYER_GTP;
10174                         break;
10175                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10176                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10177                                 /* Create it only the first time to be used. */
10178                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10179                                 if (ret)
10180                                         return rte_flow_error_set
10181                                                 (error, -ret,
10182                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10183                                                 NULL,
10184                                                 "cannot create eCPRI parser");
10185                         }
10186                         /* Adjust the length matcher and device flow value. */
10187                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10188                         dev_flow->dv.value.size =
10189                                         MLX5_ST_SZ_BYTES(fte_match_param);
10190                         flow_dv_translate_item_ecpri(dev, match_mask,
10191                                                      match_value, items);
10192                         /* No other protocol should follow eCPRI layer. */
10193                         last_item = MLX5_FLOW_LAYER_ECPRI;
10194                         break;
10195                 default:
10196                         break;
10197                 }
10198                 item_flags |= last_item;
10199         }
10200         /*
10201          * When E-Switch mode is enabled, we have two cases where we need to
10202          * set the source port manually.
10203          * The first one, is in case of Nic steering rule, and the second is
10204          * E-Switch rule where no port_id item was found. In both cases
10205          * the source port is set according the current port in use.
10206          */
10207         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10208             (priv->representor || priv->master)) {
10209                 if (flow_dv_translate_item_port_id(dev, match_mask,
10210                                                    match_value, NULL))
10211                         return -rte_errno;
10212         }
10213 #ifdef RTE_LIBRTE_MLX5_DEBUG
10214         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10215                                               dev_flow->dv.value.buf));
10216 #endif
10217         /*
10218          * Layers may be already initialized from prefix flow if this dev_flow
10219          * is the suffix flow.
10220          */
10221         handle->layers |= item_flags;
10222         if (action_flags & MLX5_FLOW_ACTION_RSS)
10223                 flow_dv_hashfields_set(dev_flow, rss_desc);
10224         /* Register matcher. */
10225         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10226                                     matcher.mask.size);
10227         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10228                                                      matcher.priority);
10229         /* reserved field no needs to be set to 0 here. */
10230         tbl_key.domain = attr->transfer;
10231         tbl_key.direction = attr->egress;
10232         tbl_key.table_id = dev_flow->dv.group;
10233         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10234                 return -rte_errno;
10235         return 0;
10236 }
10237
10238 /**
10239  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10240  * and tunnel.
10241  *
10242  * @param[in, out] action
10243  *   Shred RSS action holding hash RX queue objects.
10244  * @param[in] hash_fields
10245  *   Defines combination of packet fields to participate in RX hash.
10246  * @param[in] tunnel
10247  *   Tunnel type
10248  * @param[in] hrxq_idx
10249  *   Hash RX queue index to set.
10250  *
10251  * @return
10252  *   0 on success, otherwise negative errno value.
10253  */
10254 static int
10255 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10256                               const uint64_t hash_fields,
10257                               const int tunnel,
10258                               uint32_t hrxq_idx)
10259 {
10260         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10261
10262         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10263         case MLX5_RSS_HASH_IPV4:
10264                 hrxqs[0] = hrxq_idx;
10265                 return 0;
10266         case MLX5_RSS_HASH_IPV4_TCP:
10267                 hrxqs[1] = hrxq_idx;
10268                 return 0;
10269         case MLX5_RSS_HASH_IPV4_UDP:
10270                 hrxqs[2] = hrxq_idx;
10271                 return 0;
10272         case MLX5_RSS_HASH_IPV6:
10273                 hrxqs[3] = hrxq_idx;
10274                 return 0;
10275         case MLX5_RSS_HASH_IPV6_TCP:
10276                 hrxqs[4] = hrxq_idx;
10277                 return 0;
10278         case MLX5_RSS_HASH_IPV6_UDP:
10279                 hrxqs[5] = hrxq_idx;
10280                 return 0;
10281         case MLX5_RSS_HASH_NONE:
10282                 hrxqs[6] = hrxq_idx;
10283                 return 0;
10284         default:
10285                 return -1;
10286         }
10287 }
10288
10289 /**
10290  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10291  * and tunnel.
10292  *
10293  * @param[in] action
10294  *   Shred RSS action holding hash RX queue objects.
10295  * @param[in] hash_fields
10296  *   Defines combination of packet fields to participate in RX hash.
10297  * @param[in] tunnel
10298  *   Tunnel type
10299  *
10300  * @return
10301  *   Valid hash RX queue index, otherwise 0.
10302  */
10303 static uint32_t
10304 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10305                                  const uint64_t hash_fields,
10306                                  const int tunnel)
10307 {
10308         const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10309
10310         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10311         case MLX5_RSS_HASH_IPV4:
10312                 return hrxqs[0];
10313         case MLX5_RSS_HASH_IPV4_TCP:
10314                 return hrxqs[1];
10315         case MLX5_RSS_HASH_IPV4_UDP:
10316                 return hrxqs[2];
10317         case MLX5_RSS_HASH_IPV6:
10318                 return hrxqs[3];
10319         case MLX5_RSS_HASH_IPV6_TCP:
10320                 return hrxqs[4];
10321         case MLX5_RSS_HASH_IPV6_UDP:
10322                 return hrxqs[5];
10323         case MLX5_RSS_HASH_NONE:
10324                 return hrxqs[6];
10325         default:
10326                 return 0;
10327         }
10328 }
10329
10330 /**
10331  * Retrieves hash RX queue suitable for the *flow*.
10332  * If shared action configured for *flow* suitable hash RX queue will be
10333  * retrieved from attached shared action.
10334  *
10335  * @param[in] flow
10336  *   Shred RSS action holding hash RX queue objects.
10337  * @param[in] dev_flow
10338  *   Pointer to the sub flow.
10339  * @param[out] hrxq
10340  *   Pointer to retrieved hash RX queue object.
10341  *
10342  * @return
10343  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10344  */
10345 static uint32_t
10346 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10347                            struct mlx5_flow *dev_flow,
10348                            struct mlx5_hrxq **hrxq)
10349 {
10350         struct mlx5_priv *priv = dev->data->dev_private;
10351         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10352         uint32_t hrxq_idx;
10353
10354         if (flow->shared_rss) {
10355                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10356                                 (flow->shared_rss, dev_flow->hash_fields,
10357                                  !!(dev_flow->handle->layers &
10358                                     MLX5_FLOW_LAYER_TUNNEL));
10359                 if (hrxq_idx) {
10360                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10361                                                hrxq_idx);
10362                         __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10363                                            __ATOMIC_RELAXED);
10364                 }
10365         } else {
10366                 struct mlx5_flow_rss_desc *rss_desc =
10367                                 &wks->rss_desc[!!wks->flow_nested_idx];
10368
10369                 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10370                                              &hrxq_idx);
10371         }
10372         return hrxq_idx;
10373 }
10374
10375 /**
10376  * Apply the flow to the NIC, lock free,
10377  * (mutex should be acquired by caller).
10378  *
10379  * @param[in] dev
10380  *   Pointer to the Ethernet device structure.
10381  * @param[in, out] flow
10382  *   Pointer to flow structure.
10383  * @param[out] error
10384  *   Pointer to error structure.
10385  *
10386  * @return
10387  *   0 on success, a negative errno value otherwise and rte_errno is set.
10388  */
10389 static int
10390 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10391                 struct rte_flow_error *error)
10392 {
10393         struct mlx5_flow_dv_workspace *dv;
10394         struct mlx5_flow_handle *dh;
10395         struct mlx5_flow_handle_dv *dv_h;
10396         struct mlx5_flow *dev_flow;
10397         struct mlx5_priv *priv = dev->data->dev_private;
10398         uint32_t handle_idx;
10399         int n;
10400         int err;
10401         int idx;
10402         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10403
10404         MLX5_ASSERT(wks);
10405         for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10406                 dev_flow = &wks->flows[idx];
10407                 dv = &dev_flow->dv;
10408                 dh = dev_flow->handle;
10409                 dv_h = &dh->dvh;
10410                 n = dv->actions_n;
10411                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10412                         if (dv->transfer) {
10413                                 dv->actions[n++] = priv->sh->esw_drop_action;
10414                         } else {
10415                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10416                                 dv->actions[n++] =
10417                                                 priv->drop_queue.hrxq->action;
10418                         }
10419                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10420                            !dv_h->rix_sample && !dv_h->rix_dest_array) {
10421                         struct mlx5_hrxq *hrxq = NULL;
10422                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10423                                                 (dev, flow, dev_flow, &hrxq);
10424                         if (!hrxq) {
10425                                 rte_flow_error_set
10426                                         (error, rte_errno,
10427                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10428                                          "cannot get hash queue");
10429                                 goto error;
10430                         }
10431                         dh->rix_hrxq = hrxq_idx;
10432                         dv->actions[n++] = hrxq->action;
10433                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10434                         if (!priv->sh->default_miss_action) {
10435                                 rte_flow_error_set
10436                                         (error, rte_errno,
10437                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10438                                          "default miss action not be created.");
10439                                 goto error;
10440                         }
10441                         dv->actions[n++] = priv->sh->default_miss_action;
10442                 }
10443                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10444                                                (void *)&dv->value, n,
10445                                                dv->actions, &dh->drv_flow);
10446                 if (err) {
10447                         rte_flow_error_set(error, errno,
10448                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10449                                            NULL,
10450                                            "hardware refuses to create flow");
10451                         goto error;
10452                 }
10453                 if (priv->vmwa_context &&
10454                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10455                         /*
10456                          * The rule contains the VLAN pattern.
10457                          * For VF we are going to create VLAN
10458                          * interface to make hypervisor set correct
10459                          * e-Switch vport context.
10460                          */
10461                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10462                 }
10463         }
10464         return 0;
10465 error:
10466         err = rte_errno; /* Save rte_errno before cleanup. */
10467         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10468                        handle_idx, dh, next) {
10469                 /* hrxq is union, don't clear it if the flag is not set. */
10470                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10471                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10472                         dh->rix_hrxq = 0;
10473                 }
10474                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10475                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10476         }
10477         rte_errno = err; /* Restore rte_errno. */
10478         return -rte_errno;
10479 }
10480
10481 void
10482 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10483                           struct mlx5_cache_entry *entry)
10484 {
10485         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10486                                                           entry);
10487
10488         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10489         mlx5_free(cache);
10490 }
10491
10492 /**
10493  * Release the flow matcher.
10494  *
10495  * @param dev
10496  *   Pointer to Ethernet device.
10497  * @param handle
10498  *   Pointer to mlx5_flow_handle.
10499  *
10500  * @return
10501  *   1 while a reference on it exists, 0 when freed.
10502  */
10503 static int
10504 flow_dv_matcher_release(struct rte_eth_dev *dev,
10505                         struct mlx5_flow_handle *handle)
10506 {
10507         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10508         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10509                                                             typeof(*tbl), tbl);
10510         int ret;
10511
10512         MLX5_ASSERT(matcher->matcher_object);
10513         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10514         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10515         return ret;
10516 }
10517
10518 /**
10519  * Release encap_decap resource.
10520  *
10521  * @param list
10522  *   Pointer to the hash list.
10523  * @param entry
10524  *   Pointer to exist resource entry object.
10525  */
10526 void
10527 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10528                               struct mlx5_hlist_entry *entry)
10529 {
10530         struct mlx5_dev_ctx_shared *sh = list->ctx;
10531         struct mlx5_flow_dv_encap_decap_resource *res =
10532                 container_of(entry, typeof(*res), entry);
10533
10534         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10535         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10536 }
10537
10538 /**
10539  * Release an encap/decap resource.
10540  *
10541  * @param dev
10542  *   Pointer to Ethernet device.
10543  * @param encap_decap_idx
10544  *   Index of encap decap resource.
10545  *
10546  * @return
10547  *   1 while a reference on it exists, 0 when freed.
10548  */
10549 static int
10550 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10551                                      uint32_t encap_decap_idx)
10552 {
10553         struct mlx5_priv *priv = dev->data->dev_private;
10554         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10555
10556         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10557                                         encap_decap_idx);
10558         if (!cache_resource)
10559                 return 0;
10560         MLX5_ASSERT(cache_resource->action);
10561         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10562                                      &cache_resource->entry);
10563 }
10564
10565 /**
10566  * Release an jump to table action resource.
10567  *
10568  * @param dev
10569  *   Pointer to Ethernet device.
10570  * @param handle
10571  *   Pointer to mlx5_flow_handle.
10572  *
10573  * @return
10574  *   1 while a reference on it exists, 0 when freed.
10575  */
10576 static int
10577 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10578                                   struct mlx5_flow_handle *handle)
10579 {
10580         struct mlx5_priv *priv = dev->data->dev_private;
10581         struct mlx5_flow_tbl_data_entry *tbl_data;
10582
10583         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10584                              handle->rix_jump);
10585         if (!tbl_data)
10586                 return 0;
10587         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10588 }
10589
10590 void
10591 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10592                          struct mlx5_hlist_entry *entry)
10593 {
10594         struct mlx5_flow_dv_modify_hdr_resource *res =
10595                 container_of(entry, typeof(*res), entry);
10596
10597         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10598         mlx5_free(entry);
10599 }
10600
10601 /**
10602  * Release a modify-header resource.
10603  *
10604  * @param dev
10605  *   Pointer to Ethernet device.
10606  * @param handle
10607  *   Pointer to mlx5_flow_handle.
10608  *
10609  * @return
10610  *   1 while a reference on it exists, 0 when freed.
10611  */
10612 static int
10613 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10614                                     struct mlx5_flow_handle *handle)
10615 {
10616         struct mlx5_priv *priv = dev->data->dev_private;
10617         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10618
10619         MLX5_ASSERT(entry->action);
10620         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10621 }
10622
10623 void
10624 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10625                           struct mlx5_cache_entry *entry)
10626 {
10627         struct mlx5_dev_ctx_shared *sh = list->ctx;
10628         struct mlx5_flow_dv_port_id_action_resource *cache =
10629                         container_of(entry, typeof(*cache), entry);
10630
10631         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10632         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10633 }
10634
10635 /**
10636  * Release port ID action resource.
10637  *
10638  * @param dev
10639  *   Pointer to Ethernet device.
10640  * @param handle
10641  *   Pointer to mlx5_flow_handle.
10642  *
10643  * @return
10644  *   1 while a reference on it exists, 0 when freed.
10645  */
10646 static int
10647 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10648                                         uint32_t port_id)
10649 {
10650         struct mlx5_priv *priv = dev->data->dev_private;
10651         struct mlx5_flow_dv_port_id_action_resource *cache;
10652
10653         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10654         if (!cache)
10655                 return 0;
10656         MLX5_ASSERT(cache->action);
10657         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10658                                      &cache->entry);
10659 }
10660
10661 void
10662 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
10663                             struct mlx5_cache_entry *entry)
10664 {
10665         struct mlx5_dev_ctx_shared *sh = list->ctx;
10666         struct mlx5_flow_dv_push_vlan_action_resource *cache =
10667                         container_of(entry, typeof(*cache), entry);
10668
10669         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10670         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
10671 }
10672
10673 /**
10674  * Release push vlan action resource.
10675  *
10676  * @param dev
10677  *   Pointer to Ethernet device.
10678  * @param handle
10679  *   Pointer to mlx5_flow_handle.
10680  *
10681  * @return
10682  *   1 while a reference on it exists, 0 when freed.
10683  */
10684 static int
10685 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10686                                           struct mlx5_flow_handle *handle)
10687 {
10688         struct mlx5_priv *priv = dev->data->dev_private;
10689         struct mlx5_flow_dv_push_vlan_action_resource *cache;
10690         uint32_t idx = handle->dvh.rix_push_vlan;
10691
10692         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10693         if (!cache)
10694                 return 0;
10695         MLX5_ASSERT(cache->action);
10696         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
10697                                      &cache->entry);
10698 }
10699
10700 /**
10701  * Release the fate resource.
10702  *
10703  * @param dev
10704  *   Pointer to Ethernet device.
10705  * @param handle
10706  *   Pointer to mlx5_flow_handle.
10707  */
10708 static void
10709 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10710                                struct mlx5_flow_handle *handle)
10711 {
10712         if (!handle->rix_fate)
10713                 return;
10714         switch (handle->fate_action) {
10715         case MLX5_FLOW_FATE_QUEUE:
10716                 mlx5_hrxq_release(dev, handle->rix_hrxq);
10717                 break;
10718         case MLX5_FLOW_FATE_JUMP:
10719                 flow_dv_jump_tbl_resource_release(dev, handle);
10720                 break;
10721         case MLX5_FLOW_FATE_PORT_ID:
10722                 flow_dv_port_id_action_resource_release(dev,
10723                                 handle->rix_port_id_action);
10724                 break;
10725         default:
10726                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10727                 break;
10728         }
10729         handle->rix_fate = 0;
10730 }
10731
10732 void
10733 flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
10734                          struct mlx5_cache_entry *entry)
10735 {
10736         struct rte_eth_dev *dev = list->ctx;
10737         struct mlx5_priv *priv = dev->data->dev_private;
10738         struct mlx5_flow_dv_sample_resource *cache_resource =
10739                         container_of(entry, typeof(*cache_resource), entry);
10740
10741         if (cache_resource->verbs_action)
10742                 claim_zero(mlx5_glue->destroy_flow_action
10743                                 (cache_resource->verbs_action));
10744         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10745                 if (cache_resource->default_miss)
10746                         claim_zero(mlx5_glue->destroy_flow_action
10747                           (cache_resource->default_miss));
10748         }
10749         if (cache_resource->normal_path_tbl)
10750                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10751                         cache_resource->normal_path_tbl);
10752         flow_dv_sample_sub_actions_release(dev,
10753                                 &cache_resource->sample_idx);
10754         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10755                         cache_resource->idx);
10756         DRV_LOG(DEBUG, "sample resource %p: removed",
10757                 (void *)cache_resource);
10758 }
10759
10760 /**
10761  * Release an sample resource.
10762  *
10763  * @param dev
10764  *   Pointer to Ethernet device.
10765  * @param handle
10766  *   Pointer to mlx5_flow_handle.
10767  *
10768  * @return
10769  *   1 while a reference on it exists, 0 when freed.
10770  */
10771 static int
10772 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10773                                      struct mlx5_flow_handle *handle)
10774 {
10775         struct mlx5_priv *priv = dev->data->dev_private;
10776         struct mlx5_flow_dv_sample_resource *cache_resource;
10777
10778         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10779                          handle->dvh.rix_sample);
10780         if (!cache_resource)
10781                 return 0;
10782         MLX5_ASSERT(cache_resource->verbs_action);
10783         return mlx5_cache_unregister(&priv->sh->sample_action_list,
10784                                      &cache_resource->entry);
10785 }
10786
10787 void
10788 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
10789                              struct mlx5_cache_entry *entry)
10790 {
10791         struct rte_eth_dev *dev = list->ctx;
10792         struct mlx5_priv *priv = dev->data->dev_private;
10793         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10794                         container_of(entry, typeof(*cache_resource), entry);
10795         uint32_t i = 0;
10796
10797         MLX5_ASSERT(cache_resource->action);
10798         if (cache_resource->action)
10799                 claim_zero(mlx5_glue->destroy_flow_action
10800                                         (cache_resource->action));
10801         for (; i < cache_resource->num_of_dest; i++)
10802                 flow_dv_sample_sub_actions_release(dev,
10803                                 &cache_resource->sample_idx[i]);
10804         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10805                         cache_resource->idx);
10806         DRV_LOG(DEBUG, "destination array resource %p: removed",
10807                 (void *)cache_resource);
10808 }
10809
10810 /**
10811  * Release an destination array resource.
10812  *
10813  * @param dev
10814  *   Pointer to Ethernet device.
10815  * @param handle
10816  *   Pointer to mlx5_flow_handle.
10817  *
10818  * @return
10819  *   1 while a reference on it exists, 0 when freed.
10820  */
10821 static int
10822 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10823                                     struct mlx5_flow_handle *handle)
10824 {
10825         struct mlx5_priv *priv = dev->data->dev_private;
10826         struct mlx5_flow_dv_dest_array_resource *cache;
10827
10828         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10829                                handle->dvh.rix_dest_array);
10830         if (!cache)
10831                 return 0;
10832         MLX5_ASSERT(cache->action);
10833         return mlx5_cache_unregister(&priv->sh->dest_array_list,
10834                                      &cache->entry);
10835 }
10836
10837 /**
10838  * Remove the flow from the NIC but keeps it in memory.
10839  * Lock free, (mutex should be acquired by caller).
10840  *
10841  * @param[in] dev
10842  *   Pointer to Ethernet device.
10843  * @param[in, out] flow
10844  *   Pointer to flow structure.
10845  */
10846 static void
10847 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10848 {
10849         struct mlx5_flow_handle *dh;
10850         uint32_t handle_idx;
10851         struct mlx5_priv *priv = dev->data->dev_private;
10852
10853         if (!flow)
10854                 return;
10855         handle_idx = flow->dev_handles;
10856         while (handle_idx) {
10857                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10858                                     handle_idx);
10859                 if (!dh)
10860                         return;
10861                 if (dh->drv_flow) {
10862                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10863                         dh->drv_flow = NULL;
10864                 }
10865                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10866                         flow_dv_fate_resource_release(dev, dh);
10867                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10868                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10869                 handle_idx = dh->next.next;
10870         }
10871 }
10872
10873 /**
10874  * Remove the flow from the NIC and the memory.
10875  * Lock free, (mutex should be acquired by caller).
10876  *
10877  * @param[in] dev
10878  *   Pointer to the Ethernet device structure.
10879  * @param[in, out] flow
10880  *   Pointer to flow structure.
10881  */
10882 static void
10883 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10884 {
10885         struct rte_flow_shared_action *shared;
10886         struct mlx5_flow_handle *dev_handle;
10887         struct mlx5_priv *priv = dev->data->dev_private;
10888
10889         if (!flow)
10890                 return;
10891         __flow_dv_remove(dev, flow);
10892         shared = mlx5_flow_get_shared_rss(flow);
10893         if (shared)
10894                 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10895         if (flow->counter) {
10896                 flow_dv_counter_release(dev, flow->counter);
10897                 flow->counter = 0;
10898         }
10899         if (flow->meter) {
10900                 struct mlx5_flow_meter *fm;
10901
10902                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10903                                     flow->meter);
10904                 if (fm)
10905                         mlx5_flow_meter_detach(fm);
10906                 flow->meter = 0;
10907         }
10908         while (flow->dev_handles) {
10909                 uint32_t tmp_idx = flow->dev_handles;
10910
10911                 dev_handle = mlx5_ipool_get(priv->sh->ipool
10912                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10913                 if (!dev_handle)
10914                         return;
10915                 flow->dev_handles = dev_handle->next.next;
10916                 if (dev_handle->dvh.matcher)
10917                         flow_dv_matcher_release(dev, dev_handle);
10918                 if (dev_handle->dvh.rix_sample)
10919                         flow_dv_sample_resource_release(dev, dev_handle);
10920                 if (dev_handle->dvh.rix_dest_array)
10921                         flow_dv_dest_array_resource_release(dev, dev_handle);
10922                 if (dev_handle->dvh.rix_encap_decap)
10923                         flow_dv_encap_decap_resource_release(dev,
10924                                 dev_handle->dvh.rix_encap_decap);
10925                 if (dev_handle->dvh.modify_hdr)
10926                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
10927                 if (dev_handle->dvh.rix_push_vlan)
10928                         flow_dv_push_vlan_action_resource_release(dev,
10929                                                                   dev_handle);
10930                 if (dev_handle->dvh.rix_tag)
10931                         flow_dv_tag_release(dev,
10932                                             dev_handle->dvh.rix_tag);
10933                 flow_dv_fate_resource_release(dev, dev_handle);
10934                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10935                            tmp_idx);
10936         }
10937 }
10938
10939 /**
10940  * Release array of hash RX queue objects.
10941  * Helper function.
10942  *
10943  * @param[in] dev
10944  *   Pointer to the Ethernet device structure.
10945  * @param[in, out] hrxqs
10946  *   Array of hash RX queue objects.
10947  *
10948  * @return
10949  *   Total number of references to hash RX queue objects in *hrxqs* array
10950  *   after this operation.
10951  */
10952 static int
10953 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
10954                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
10955 {
10956         size_t i;
10957         int remaining = 0;
10958
10959         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
10960                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
10961
10962                 if (!ret)
10963                         (*hrxqs)[i] = 0;
10964                 remaining += ret;
10965         }
10966         return remaining;
10967 }
10968
10969 /**
10970  * Release all hash RX queue objects representing shared RSS action.
10971  *
10972  * @param[in] dev
10973  *   Pointer to the Ethernet device structure.
10974  * @param[in, out] action
10975  *   Shared RSS action to remove hash RX queue objects from.
10976  *
10977  * @return
10978  *   Total number of references to hash RX queue objects stored in *action*
10979  *   after this operation.
10980  *   Expected to be 0 if no external references held.
10981  */
10982 static int
10983 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
10984                                  struct mlx5_shared_action_rss *action)
10985 {
10986         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
10987                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
10988 }
10989
10990 /**
10991  * Setup shared RSS action.
10992  * Prepare set of hash RX queue objects sufficient to handle all valid
10993  * hash_fields combinations (see enum ibv_rx_hash_fields).
10994  *
10995  * @param[in] dev
10996  *   Pointer to the Ethernet device structure.
10997  * @param[in, out] action
10998  *   Partially initialized shared RSS action.
10999  * @param[out] error
11000  *   Perform verbose error reporting if not NULL. Initialized in case of
11001  *   error only.
11002  *
11003  * @return
11004  *   0 on success, otherwise negative errno value.
11005  */
11006 static int
11007 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11008                         struct mlx5_shared_action_rss *action,
11009                         struct rte_flow_error *error)
11010 {
11011         struct mlx5_flow_rss_desc rss_desc = { 0 };
11012         size_t i;
11013         int err;
11014
11015         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11016         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11017         rss_desc.const_q = action->origin.queue;
11018         rss_desc.queue_num = action->origin.queue_num;
11019         rss_desc.standalone = true;
11020         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11021                 uint32_t hrxq_idx;
11022                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11023                 int tunnel;
11024
11025                 for (tunnel = 0; tunnel < 2; tunnel++) {
11026                         rss_desc.tunnel = tunnel;
11027                         rss_desc.hash_fields = hash_fields;
11028                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11029                         if (!hrxq_idx) {
11030                                 rte_flow_error_set
11031                                         (error, rte_errno,
11032                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11033                                          "cannot get hash queue");
11034                                 goto error_hrxq_new;
11035                         }
11036                         err = __flow_dv_action_rss_hrxq_set
11037                                 (action, hash_fields, tunnel, hrxq_idx);
11038                         MLX5_ASSERT(!err);
11039                 }
11040         }
11041         return 0;
11042 error_hrxq_new:
11043         err = rte_errno;
11044         __flow_dv_action_rss_hrxqs_release(dev, action);
11045         rte_errno = err;
11046         return -rte_errno;
11047 }
11048
11049 /**
11050  * Create shared RSS action.
11051  *
11052  * @param[in] dev
11053  *   Pointer to the Ethernet device structure.
11054  * @param[in] conf
11055  *   Shared action configuration.
11056  * @param[in] rss
11057  *   RSS action specification used to create shared action.
11058  * @param[out] error
11059  *   Perform verbose error reporting if not NULL. Initialized in case of
11060  *   error only.
11061  *
11062  * @return
11063  *   A valid shared action handle in case of success, NULL otherwise and
11064  *   rte_errno is set.
11065  */
11066 static struct rte_flow_shared_action *
11067 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11068                             const struct rte_flow_shared_action_conf *conf,
11069                             const struct rte_flow_action_rss *rss,
11070                             struct rte_flow_error *error)
11071 {
11072         struct rte_flow_shared_action *shared_action = NULL;
11073         void *queue = NULL;
11074         struct mlx5_shared_action_rss *shared_rss;
11075         struct rte_flow_action_rss *origin;
11076         const uint8_t *rss_key;
11077         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11078
11079         RTE_SET_USED(conf);
11080         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11081                             0, SOCKET_ID_ANY);
11082         shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11083                                     SOCKET_ID_ANY);
11084         if (!shared_action || !queue) {
11085                 rte_flow_error_set(error, ENOMEM,
11086                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11087                                    "cannot allocate resource memory");
11088                 goto error_rss_init;
11089         }
11090         shared_rss = &shared_action->rss;
11091         shared_rss->queue = queue;
11092         origin = &shared_rss->origin;
11093         origin->func = rss->func;
11094         origin->level = rss->level;
11095         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11096         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11097         /* NULL RSS key indicates default RSS key. */
11098         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11099         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11100         origin->key = &shared_rss->key[0];
11101         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11102         memcpy(shared_rss->queue, rss->queue, queue_size);
11103         origin->queue = shared_rss->queue;
11104         origin->queue_num = rss->queue_num;
11105         if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11106                 goto error_rss_init;
11107         shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11108         return shared_action;
11109 error_rss_init:
11110         mlx5_free(shared_action);
11111         mlx5_free(queue);
11112         return NULL;
11113 }
11114
11115 /**
11116  * Destroy the shared RSS action.
11117  * Release related hash RX queue objects.
11118  *
11119  * @param[in] dev
11120  *   Pointer to the Ethernet device structure.
11121  * @param[in] shared_rss
11122  *   The shared RSS action object to be removed.
11123  * @param[out] error
11124  *   Perform verbose error reporting if not NULL. Initialized in case of
11125  *   error only.
11126  *
11127  * @return
11128  *   0 on success, otherwise negative errno value.
11129  */
11130 static int
11131 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11132                          struct mlx5_shared_action_rss *shared_rss,
11133                          struct rte_flow_error *error)
11134 {
11135         struct rte_flow_shared_action *shared_action = NULL;
11136         uint32_t old_refcnt = 1;
11137         int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11138
11139         if (remaining) {
11140                 return rte_flow_error_set(error, ETOOMANYREFS,
11141                                           RTE_FLOW_ERROR_TYPE_ACTION,
11142                                           NULL,
11143                                           "shared rss hrxq has references");
11144         }
11145         shared_action = container_of(shared_rss,
11146                                      struct rte_flow_shared_action, rss);
11147         if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11148                                          0, 0,
11149                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11150                 return rte_flow_error_set(error, ETOOMANYREFS,
11151                                           RTE_FLOW_ERROR_TYPE_ACTION,
11152                                           NULL,
11153                                           "shared rss has references");
11154         }
11155         rte_free(shared_rss->queue);
11156         return 0;
11157 }
11158
11159 /**
11160  * Create shared action, lock free,
11161  * (mutex should be acquired by caller).
11162  * Dispatcher for action type specific call.
11163  *
11164  * @param[in] dev
11165  *   Pointer to the Ethernet device structure.
11166  * @param[in] conf
11167  *   Shared action configuration.
11168  * @param[in] action
11169  *   Action specification used to create shared action.
11170  * @param[out] error
11171  *   Perform verbose error reporting if not NULL. Initialized in case of
11172  *   error only.
11173  *
11174  * @return
11175  *   A valid shared action handle in case of success, NULL otherwise and
11176  *   rte_errno is set.
11177  */
11178 static struct rte_flow_shared_action *
11179 __flow_dv_action_create(struct rte_eth_dev *dev,
11180                         const struct rte_flow_shared_action_conf *conf,
11181                         const struct rte_flow_action *action,
11182                         struct rte_flow_error *error)
11183 {
11184         struct rte_flow_shared_action *shared_action = NULL;
11185         struct mlx5_priv *priv = dev->data->dev_private;
11186
11187         switch (action->type) {
11188         case RTE_FLOW_ACTION_TYPE_RSS:
11189                 shared_action = __flow_dv_action_rss_create(dev, conf,
11190                                                             action->conf,
11191                                                             error);
11192                 break;
11193         default:
11194                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11195                                    NULL, "action type not supported");
11196                 break;
11197         }
11198         if (shared_action) {
11199                 __atomic_add_fetch(&shared_action->refcnt, 1,
11200                                    __ATOMIC_RELAXED);
11201                 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11202         }
11203         return shared_action;
11204 }
11205
11206 /**
11207  * Destroy the shared action.
11208  * Release action related resources on the NIC and the memory.
11209  * Lock free, (mutex should be acquired by caller).
11210  * Dispatcher for action type specific call.
11211  *
11212  * @param[in] dev
11213  *   Pointer to the Ethernet device structure.
11214  * @param[in] action
11215  *   The shared action object to be removed.
11216  * @param[out] error
11217  *   Perform verbose error reporting if not NULL. Initialized in case of
11218  *   error only.
11219  *
11220  * @return
11221  *   0 on success, otherwise negative errno value.
11222  */
11223 static int
11224 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11225                          struct rte_flow_shared_action *action,
11226                          struct rte_flow_error *error)
11227 {
11228         int ret;
11229
11230         switch (action->type) {
11231         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11232                 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11233                 break;
11234         default:
11235                 return rte_flow_error_set(error, ENOTSUP,
11236                                           RTE_FLOW_ERROR_TYPE_ACTION,
11237                                           NULL,
11238                                           "action type not supported");
11239         }
11240         if (ret)
11241                 return ret;
11242         LIST_REMOVE(action, next);
11243         rte_free(action);
11244         return 0;
11245 }
11246
11247 /**
11248  * Updates in place shared RSS action configuration.
11249  *
11250  * @param[in] dev
11251  *   Pointer to the Ethernet device structure.
11252  * @param[in] shared_rss
11253  *   The shared RSS action object to be updated.
11254  * @param[in] action_conf
11255  *   RSS action specification used to modify *shared_rss*.
11256  * @param[out] error
11257  *   Perform verbose error reporting if not NULL. Initialized in case of
11258  *   error only.
11259  *
11260  * @return
11261  *   0 on success, otherwise negative errno value.
11262  * @note: currently only support update of RSS queues.
11263  */
11264 static int
11265 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11266                             struct mlx5_shared_action_rss *shared_rss,
11267                             const struct rte_flow_action_rss *action_conf,
11268                             struct rte_flow_error *error)
11269 {
11270         size_t i;
11271         int ret;
11272         void *queue = NULL;
11273         const uint8_t *rss_key;
11274         uint32_t rss_key_len;
11275         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11276
11277         queue = mlx5_malloc(MLX5_MEM_ZERO,
11278                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11279                             0, SOCKET_ID_ANY);
11280         if (!queue)
11281                 return rte_flow_error_set(error, ENOMEM,
11282                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11283                                           NULL,
11284                                           "cannot allocate resource memory");
11285         if (action_conf->key) {
11286                 rss_key = action_conf->key;
11287                 rss_key_len = action_conf->key_len;
11288         } else {
11289                 rss_key = rss_hash_default_key;
11290                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11291         }
11292         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11293                 uint32_t hrxq_idx;
11294                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11295                 int tunnel;
11296
11297                 for (tunnel = 0; tunnel < 2; tunnel++) {
11298                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11299                                         (shared_rss, hash_fields, tunnel);
11300                         MLX5_ASSERT(hrxq_idx);
11301                         ret = mlx5_hrxq_modify
11302                                 (dev, hrxq_idx,
11303                                  rss_key, rss_key_len,
11304                                  hash_fields,
11305                                  action_conf->queue, action_conf->queue_num);
11306                         if (ret) {
11307                                 mlx5_free(queue);
11308                                 return rte_flow_error_set
11309                                         (error, rte_errno,
11310                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11311                                          "cannot update hash queue");
11312                         }
11313                 }
11314         }
11315         mlx5_free(shared_rss->queue);
11316         shared_rss->queue = queue;
11317         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11318         shared_rss->origin.queue = shared_rss->queue;
11319         shared_rss->origin.queue_num = action_conf->queue_num;
11320         return 0;
11321 }
11322
11323 /**
11324  * Updates in place shared action configuration, lock free,
11325  * (mutex should be acquired by caller).
11326  *
11327  * @param[in] dev
11328  *   Pointer to the Ethernet device structure.
11329  * @param[in] action
11330  *   The shared action object to be updated.
11331  * @param[in] action_conf
11332  *   Action specification used to modify *action*.
11333  *   *action_conf* should be of type correlating with type of the *action*,
11334  *   otherwise considered as invalid.
11335  * @param[out] error
11336  *   Perform verbose error reporting if not NULL. Initialized in case of
11337  *   error only.
11338  *
11339  * @return
11340  *   0 on success, otherwise negative errno value.
11341  */
11342 static int
11343 __flow_dv_action_update(struct rte_eth_dev *dev,
11344                         struct rte_flow_shared_action *action,
11345                         const void *action_conf,
11346                         struct rte_flow_error *error)
11347 {
11348         switch (action->type) {
11349         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11350                 return __flow_dv_action_rss_update(dev, &action->rss,
11351                                                    action_conf, error);
11352         default:
11353                 return rte_flow_error_set(error, ENOTSUP,
11354                                           RTE_FLOW_ERROR_TYPE_ACTION,
11355                                           NULL,
11356                                           "action type not supported");
11357         }
11358 }
11359 /**
11360  * Query a dv flow  rule for its statistics via devx.
11361  *
11362  * @param[in] dev
11363  *   Pointer to Ethernet device.
11364  * @param[in] flow
11365  *   Pointer to the sub flow.
11366  * @param[out] data
11367  *   data retrieved by the query.
11368  * @param[out] error
11369  *   Perform verbose error reporting if not NULL.
11370  *
11371  * @return
11372  *   0 on success, a negative errno value otherwise and rte_errno is set.
11373  */
11374 static int
11375 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11376                     void *data, struct rte_flow_error *error)
11377 {
11378         struct mlx5_priv *priv = dev->data->dev_private;
11379         struct rte_flow_query_count *qc = data;
11380
11381         if (!priv->config.devx)
11382                 return rte_flow_error_set(error, ENOTSUP,
11383                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11384                                           NULL,
11385                                           "counters are not supported");
11386         if (flow->counter) {
11387                 uint64_t pkts, bytes;
11388                 struct mlx5_flow_counter *cnt;
11389
11390                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11391                                                  NULL);
11392                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11393                                                &bytes);
11394
11395                 if (err)
11396                         return rte_flow_error_set(error, -err,
11397                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11398                                         NULL, "cannot read counters");
11399                 qc->hits_set = 1;
11400                 qc->bytes_set = 1;
11401                 qc->hits = pkts - cnt->hits;
11402                 qc->bytes = bytes - cnt->bytes;
11403                 if (qc->reset) {
11404                         cnt->hits = pkts;
11405                         cnt->bytes = bytes;
11406                 }
11407                 return 0;
11408         }
11409         return rte_flow_error_set(error, EINVAL,
11410                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11411                                   NULL,
11412                                   "counters are not available");
11413 }
11414
11415 /**
11416  * Query a flow rule AGE action for aging information.
11417  *
11418  * @param[in] dev
11419  *   Pointer to Ethernet device.
11420  * @param[in] flow
11421  *   Pointer to the sub flow.
11422  * @param[out] data
11423  *   data retrieved by the query.
11424  * @param[out] error
11425  *   Perform verbose error reporting if not NULL.
11426  *
11427  * @return
11428  *   0 on success, a negative errno value otherwise and rte_errno is set.
11429  */
11430 static int
11431 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11432                   void *data, struct rte_flow_error *error)
11433 {
11434         struct rte_flow_query_age *resp = data;
11435
11436         if (flow->counter) {
11437                 struct mlx5_age_param *age_param =
11438                                 flow_dv_counter_idx_get_age(dev, flow->counter);
11439
11440                 if (!age_param || !age_param->timeout)
11441                         return rte_flow_error_set
11442                                         (error, EINVAL,
11443                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11444                                          NULL, "cannot read age data");
11445                 resp->aged = __atomic_load_n(&age_param->state,
11446                                              __ATOMIC_RELAXED) ==
11447                                                         AGE_TMOUT ? 1 : 0;
11448                 resp->sec_since_last_hit_valid = !resp->aged;
11449                 if (resp->sec_since_last_hit_valid)
11450                         resp->sec_since_last_hit =
11451                                 __atomic_load_n(&age_param->sec_since_last_hit,
11452                                                 __ATOMIC_RELAXED);
11453                 return 0;
11454         }
11455         return rte_flow_error_set(error, EINVAL,
11456                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11457                                   NULL,
11458                                   "age data not available");
11459 }
11460
11461 /**
11462  * Query a flow.
11463  *
11464  * @see rte_flow_query()
11465  * @see rte_flow_ops
11466  */
11467 static int
11468 flow_dv_query(struct rte_eth_dev *dev,
11469               struct rte_flow *flow __rte_unused,
11470               const struct rte_flow_action *actions __rte_unused,
11471               void *data __rte_unused,
11472               struct rte_flow_error *error __rte_unused)
11473 {
11474         int ret = -EINVAL;
11475
11476         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11477                 switch (actions->type) {
11478                 case RTE_FLOW_ACTION_TYPE_VOID:
11479                         break;
11480                 case RTE_FLOW_ACTION_TYPE_COUNT:
11481                         ret = flow_dv_query_count(dev, flow, data, error);
11482                         break;
11483                 case RTE_FLOW_ACTION_TYPE_AGE:
11484                         ret = flow_dv_query_age(dev, flow, data, error);
11485                         break;
11486                 default:
11487                         return rte_flow_error_set(error, ENOTSUP,
11488                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11489                                                   actions,
11490                                                   "action not supported");
11491                 }
11492         }
11493         return ret;
11494 }
11495
11496 /**
11497  * Destroy the meter table set.
11498  * Lock free, (mutex should be acquired by caller).
11499  *
11500  * @param[in] dev
11501  *   Pointer to Ethernet device.
11502  * @param[in] tbl
11503  *   Pointer to the meter table set.
11504  *
11505  * @return
11506  *   Always 0.
11507  */
11508 static int
11509 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11510                         struct mlx5_meter_domains_infos *tbl)
11511 {
11512         struct mlx5_priv *priv = dev->data->dev_private;
11513         struct mlx5_meter_domains_infos *mtd =
11514                                 (struct mlx5_meter_domains_infos *)tbl;
11515
11516         if (!mtd || !priv->config.dv_flow_en)
11517                 return 0;
11518         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11519                 claim_zero(mlx5_flow_os_destroy_flow
11520                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11521         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11522                 claim_zero(mlx5_flow_os_destroy_flow
11523                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11524         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11525                 claim_zero(mlx5_flow_os_destroy_flow
11526                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11527         if (mtd->egress.color_matcher)
11528                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11529                            (mtd->egress.color_matcher));
11530         if (mtd->egress.any_matcher)
11531                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11532                            (mtd->egress.any_matcher));
11533         if (mtd->egress.tbl)
11534                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11535         if (mtd->egress.sfx_tbl)
11536                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11537         if (mtd->ingress.color_matcher)
11538                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11539                            (mtd->ingress.color_matcher));
11540         if (mtd->ingress.any_matcher)
11541                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11542                            (mtd->ingress.any_matcher));
11543         if (mtd->ingress.tbl)
11544                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11545         if (mtd->ingress.sfx_tbl)
11546                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11547                                              mtd->ingress.sfx_tbl);
11548         if (mtd->transfer.color_matcher)
11549                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11550                            (mtd->transfer.color_matcher));
11551         if (mtd->transfer.any_matcher)
11552                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11553                            (mtd->transfer.any_matcher));
11554         if (mtd->transfer.tbl)
11555                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11556         if (mtd->transfer.sfx_tbl)
11557                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11558                                              mtd->transfer.sfx_tbl);
11559         if (mtd->drop_actn)
11560                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11561         mlx5_free(mtd);
11562         return 0;
11563 }
11564
11565 /* Number of meter flow actions, count and jump or count and drop. */
11566 #define METER_ACTIONS 2
11567
11568 /**
11569  * Create specify domain meter table and suffix table.
11570  *
11571  * @param[in] dev
11572  *   Pointer to Ethernet device.
11573  * @param[in,out] mtb
11574  *   Pointer to DV meter table set.
11575  * @param[in] egress
11576  *   Table attribute.
11577  * @param[in] transfer
11578  *   Table attribute.
11579  * @param[in] color_reg_c_idx
11580  *   Reg C index for color match.
11581  *
11582  * @return
11583  *   0 on success, -1 otherwise and rte_errno is set.
11584  */
11585 static int
11586 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11587                            struct mlx5_meter_domains_infos *mtb,
11588                            uint8_t egress, uint8_t transfer,
11589                            uint32_t color_reg_c_idx)
11590 {
11591         struct mlx5_priv *priv = dev->data->dev_private;
11592         struct mlx5_dev_ctx_shared *sh = priv->sh;
11593         struct mlx5_flow_dv_match_params mask = {
11594                 .size = sizeof(mask.buf),
11595         };
11596         struct mlx5_flow_dv_match_params value = {
11597                 .size = sizeof(value.buf),
11598         };
11599         struct mlx5dv_flow_matcher_attr dv_attr = {
11600                 .type = IBV_FLOW_ATTR_NORMAL,
11601                 .priority = 0,
11602                 .match_criteria_enable = 0,
11603                 .match_mask = (void *)&mask,
11604         };
11605         void *actions[METER_ACTIONS];
11606         struct mlx5_meter_domain_info *dtb;
11607         struct rte_flow_error error;
11608         int i = 0;
11609         int ret;
11610
11611         if (transfer)
11612                 dtb = &mtb->transfer;
11613         else if (egress)
11614                 dtb = &mtb->egress;
11615         else
11616                 dtb = &mtb->ingress;
11617         /* Create the meter table with METER level. */
11618         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11619                                             egress, transfer, false, NULL, 0,
11620                                             0, &error);
11621         if (!dtb->tbl) {
11622                 DRV_LOG(ERR, "Failed to create meter policer table.");
11623                 return -1;
11624         }
11625         /* Create the meter suffix table with SUFFIX level. */
11626         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11627                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11628                                             egress, transfer, false, NULL, 0,
11629                                             0, &error);
11630         if (!dtb->sfx_tbl) {
11631                 DRV_LOG(ERR, "Failed to create meter suffix table.");
11632                 return -1;
11633         }
11634         /* Create matchers, Any and Color. */
11635         dv_attr.priority = 3;
11636         dv_attr.match_criteria_enable = 0;
11637         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11638                                                &dtb->any_matcher);
11639         if (ret) {
11640                 DRV_LOG(ERR, "Failed to create meter"
11641                              " policer default matcher.");
11642                 goto error_exit;
11643         }
11644         dv_attr.priority = 0;
11645         dv_attr.match_criteria_enable =
11646                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11647         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11648                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11649         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11650                                                &dtb->color_matcher);
11651         if (ret) {
11652                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11653                 goto error_exit;
11654         }
11655         if (mtb->count_actns[RTE_MTR_DROPPED])
11656                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11657         actions[i++] = mtb->drop_actn;
11658         /* Default rule: lowest priority, match any, actions: drop. */
11659         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11660                                        actions,
11661                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
11662         if (ret) {
11663                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11664                 goto error_exit;
11665         }
11666         return 0;
11667 error_exit:
11668         return -1;
11669 }
11670
11671 /**
11672  * Create the needed meter and suffix tables.
11673  * Lock free, (mutex should be acquired by caller).
11674  *
11675  * @param[in] dev
11676  *   Pointer to Ethernet device.
11677  * @param[in] fm
11678  *   Pointer to the flow meter.
11679  *
11680  * @return
11681  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
11682  */
11683 static struct mlx5_meter_domains_infos *
11684 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11685                        const struct mlx5_flow_meter *fm)
11686 {
11687         struct mlx5_priv *priv = dev->data->dev_private;
11688         struct mlx5_meter_domains_infos *mtb;
11689         int ret;
11690         int i;
11691
11692         if (!priv->mtr_en) {
11693                 rte_errno = ENOTSUP;
11694                 return NULL;
11695         }
11696         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11697         if (!mtb) {
11698                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11699                 return NULL;
11700         }
11701         /* Create meter count actions */
11702         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11703                 struct mlx5_flow_counter *cnt;
11704                 if (!fm->policer_stats.cnt[i])
11705                         continue;
11706                 cnt = flow_dv_counter_get_by_idx(dev,
11707                       fm->policer_stats.cnt[i], NULL);
11708                 mtb->count_actns[i] = cnt->action;
11709         }
11710         /* Create drop action. */
11711         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11712         if (ret) {
11713                 DRV_LOG(ERR, "Failed to create drop action.");
11714                 goto error_exit;
11715         }
11716         /* Egress meter table. */
11717         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11718         if (ret) {
11719                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11720                 goto error_exit;
11721         }
11722         /* Ingress meter table. */
11723         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11724         if (ret) {
11725                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11726                 goto error_exit;
11727         }
11728         /* FDB meter table. */
11729         if (priv->config.dv_esw_en) {
11730                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11731                                                  priv->mtr_color_reg);
11732                 if (ret) {
11733                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11734                         goto error_exit;
11735                 }
11736         }
11737         return mtb;
11738 error_exit:
11739         flow_dv_destroy_mtr_tbl(dev, mtb);
11740         return NULL;
11741 }
11742
11743 /**
11744  * Destroy domain policer rule.
11745  *
11746  * @param[in] dt
11747  *   Pointer to domain table.
11748  */
11749 static void
11750 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11751 {
11752         int i;
11753
11754         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11755                 if (dt->policer_rules[i]) {
11756                         claim_zero(mlx5_flow_os_destroy_flow
11757                                    (dt->policer_rules[i]));
11758                         dt->policer_rules[i] = NULL;
11759                 }
11760         }
11761         if (dt->jump_actn) {
11762                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11763                 dt->jump_actn = NULL;
11764         }
11765 }
11766
11767 /**
11768  * Destroy policer rules.
11769  *
11770  * @param[in] dev
11771  *   Pointer to Ethernet device.
11772  * @param[in] fm
11773  *   Pointer to flow meter structure.
11774  * @param[in] attr
11775  *   Pointer to flow attributes.
11776  *
11777  * @return
11778  *   Always 0.
11779  */
11780 static int
11781 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11782                               const struct mlx5_flow_meter *fm,
11783                               const struct rte_flow_attr *attr)
11784 {
11785         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11786
11787         if (!mtb)
11788                 return 0;
11789         if (attr->egress)
11790                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11791         if (attr->ingress)
11792                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11793         if (attr->transfer)
11794                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11795         return 0;
11796 }
11797
11798 /**
11799  * Create specify domain meter policer rule.
11800  *
11801  * @param[in] fm
11802  *   Pointer to flow meter structure.
11803  * @param[in] mtb
11804  *   Pointer to DV meter table set.
11805  * @param[in] mtr_reg_c
11806  *   Color match REG_C.
11807  *
11808  * @return
11809  *   0 on success, -1 otherwise.
11810  */
11811 static int
11812 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11813                                     struct mlx5_meter_domain_info *dtb,
11814                                     uint8_t mtr_reg_c)
11815 {
11816         struct mlx5_flow_dv_match_params matcher = {
11817                 .size = sizeof(matcher.buf),
11818         };
11819         struct mlx5_flow_dv_match_params value = {
11820                 .size = sizeof(value.buf),
11821         };
11822         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11823         void *actions[METER_ACTIONS];
11824         int i;
11825         int ret = 0;
11826
11827         /* Create jump action. */
11828         if (!dtb->jump_actn)
11829                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11830                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11831         if (ret) {
11832                 DRV_LOG(ERR, "Failed to create policer jump action.");
11833                 goto error;
11834         }
11835         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11836                 int j = 0;
11837
11838                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11839                                        rte_col_2_mlx5_col(i), UINT8_MAX);
11840                 if (mtb->count_actns[i])
11841                         actions[j++] = mtb->count_actns[i];
11842                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11843                         actions[j++] = mtb->drop_actn;
11844                 else
11845                         actions[j++] = dtb->jump_actn;
11846                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11847                                                (void *)&value, j, actions,
11848                                                &dtb->policer_rules[i]);
11849                 if (ret) {
11850                         DRV_LOG(ERR, "Failed to create policer rule.");
11851                         goto error;
11852                 }
11853         }
11854         return 0;
11855 error:
11856         rte_errno = errno;
11857         return -1;
11858 }
11859
11860 /**
11861  * Create policer rules.
11862  *
11863  * @param[in] dev
11864  *   Pointer to Ethernet device.
11865  * @param[in] fm
11866  *   Pointer to flow meter structure.
11867  * @param[in] attr
11868  *   Pointer to flow attributes.
11869  *
11870  * @return
11871  *   0 on success, -1 otherwise.
11872  */
11873 static int
11874 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11875                              struct mlx5_flow_meter *fm,
11876                              const struct rte_flow_attr *attr)
11877 {
11878         struct mlx5_priv *priv = dev->data->dev_private;
11879         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11880         int ret;
11881
11882         if (attr->egress) {
11883                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11884                                                 priv->mtr_color_reg);
11885                 if (ret) {
11886                         DRV_LOG(ERR, "Failed to create egress policer.");
11887                         goto error;
11888                 }
11889         }
11890         if (attr->ingress) {
11891                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11892                                                 priv->mtr_color_reg);
11893                 if (ret) {
11894                         DRV_LOG(ERR, "Failed to create ingress policer.");
11895                         goto error;
11896                 }
11897         }
11898         if (attr->transfer) {
11899                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11900                                                 priv->mtr_color_reg);
11901                 if (ret) {
11902                         DRV_LOG(ERR, "Failed to create transfer policer.");
11903                         goto error;
11904                 }
11905         }
11906         return 0;
11907 error:
11908         flow_dv_destroy_policer_rules(dev, fm, attr);
11909         return -1;
11910 }
11911
11912 /**
11913  * Validate the batch counter support in root table.
11914  *
11915  * Create a simple flow with invalid counter and drop action on root table to
11916  * validate if batch counter with offset on root table is supported or not.
11917  *
11918  * @param[in] dev
11919  *   Pointer to rte_eth_dev structure.
11920  *
11921  * @return
11922  *   0 on success, a negative errno value otherwise and rte_errno is set.
11923  */
11924 int
11925 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11926 {
11927         struct mlx5_priv *priv = dev->data->dev_private;
11928         struct mlx5_dev_ctx_shared *sh = priv->sh;
11929         struct mlx5_flow_dv_match_params mask = {
11930                 .size = sizeof(mask.buf),
11931         };
11932         struct mlx5_flow_dv_match_params value = {
11933                 .size = sizeof(value.buf),
11934         };
11935         struct mlx5dv_flow_matcher_attr dv_attr = {
11936                 .type = IBV_FLOW_ATTR_NORMAL,
11937                 .priority = 0,
11938                 .match_criteria_enable = 0,
11939                 .match_mask = (void *)&mask,
11940         };
11941         void *actions[2] = { 0 };
11942         struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
11943         struct mlx5_devx_obj *dcs = NULL;
11944         void *matcher = NULL;
11945         void *flow = NULL;
11946         int i, ret = -1;
11947
11948         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
11949         if (!tbl)
11950                 goto err;
11951         dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
11952                                             NULL, 0, 0, NULL);
11953         if (!dest_tbl)
11954                 goto err;
11955         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
11956         if (!dcs)
11957                 goto err;
11958         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
11959                                                     &actions[0]);
11960         if (ret)
11961                 goto err;
11962         ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11963                                 (dest_tbl->obj, &actions[1]);
11964         if (ret)
11965                 goto err;
11966         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
11967         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
11968                                                &matcher);
11969         if (ret)
11970                 goto err;
11971         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
11972                                        actions, &flow);
11973 err:
11974         /*
11975          * If batch counter with offset is not supported, the driver will not
11976          * validate the invalid offset value, flow create should success.
11977          * In this case, it means batch counter is not supported in root table.
11978          *
11979          * Otherwise, if flow create is failed, counter offset is supported.
11980          */
11981         if (flow) {
11982                 DRV_LOG(INFO, "Batch counter is not supported in root "
11983                               "table. Switch to fallback mode.");
11984                 rte_errno = ENOTSUP;
11985                 ret = -rte_errno;
11986                 claim_zero(mlx5_flow_os_destroy_flow(flow));
11987         } else {
11988                 /* Check matcher to make sure validate fail at flow create. */
11989                 if (!matcher || (matcher && errno != EINVAL))
11990                         DRV_LOG(ERR, "Unexpected error in counter offset "
11991                                      "support detection");
11992                 ret = 0;
11993         }
11994         for (i = 0; i < 2; i++) {
11995                 if (actions[i])
11996                         claim_zero(mlx5_flow_os_destroy_flow_action
11997                                    (actions[i]));
11998         }
11999         if (matcher)
12000                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12001         if (tbl)
12002                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12003         if (dest_tbl)
12004                 flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
12005         if (dcs)
12006                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12007         return ret;
12008 }
12009
12010 /**
12011  * Query a devx counter.
12012  *
12013  * @param[in] dev
12014  *   Pointer to the Ethernet device structure.
12015  * @param[in] cnt
12016  *   Index to the flow counter.
12017  * @param[in] clear
12018  *   Set to clear the counter statistics.
12019  * @param[out] pkts
12020  *   The statistics value of packets.
12021  * @param[out] bytes
12022  *   The statistics value of bytes.
12023  *
12024  * @return
12025  *   0 on success, otherwise return -1.
12026  */
12027 static int
12028 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12029                       uint64_t *pkts, uint64_t *bytes)
12030 {
12031         struct mlx5_priv *priv = dev->data->dev_private;
12032         struct mlx5_flow_counter *cnt;
12033         uint64_t inn_pkts, inn_bytes;
12034         int ret;
12035
12036         if (!priv->config.devx)
12037                 return -1;
12038
12039         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12040         if (ret)
12041                 return -1;
12042         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12043         *pkts = inn_pkts - cnt->hits;
12044         *bytes = inn_bytes - cnt->bytes;
12045         if (clear) {
12046                 cnt->hits = inn_pkts;
12047                 cnt->bytes = inn_bytes;
12048         }
12049         return 0;
12050 }
12051
12052 /**
12053  * Get aged-out flows.
12054  *
12055  * @param[in] dev
12056  *   Pointer to the Ethernet device structure.
12057  * @param[in] context
12058  *   The address of an array of pointers to the aged-out flows contexts.
12059  * @param[in] nb_contexts
12060  *   The length of context array pointers.
12061  * @param[out] error
12062  *   Perform verbose error reporting if not NULL. Initialized in case of
12063  *   error only.
12064  *
12065  * @return
12066  *   how many contexts get in success, otherwise negative errno value.
12067  *   if nb_contexts is 0, return the amount of all aged contexts.
12068  *   if nb_contexts is not 0 , return the amount of aged flows reported
12069  *   in the context array.
12070  * @note: only stub for now
12071  */
12072 static int
12073 flow_get_aged_flows(struct rte_eth_dev *dev,
12074                     void **context,
12075                     uint32_t nb_contexts,
12076                     struct rte_flow_error *error)
12077 {
12078         struct mlx5_priv *priv = dev->data->dev_private;
12079         struct mlx5_age_info *age_info;
12080         struct mlx5_age_param *age_param;
12081         struct mlx5_flow_counter *counter;
12082         int nb_flows = 0;
12083
12084         if (nb_contexts && !context)
12085                 return rte_flow_error_set(error, EINVAL,
12086                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12087                                           NULL,
12088                                           "Should assign at least one flow or"
12089                                           " context to get if nb_contexts != 0");
12090         age_info = GET_PORT_AGE_INFO(priv);
12091         rte_spinlock_lock(&age_info->aged_sl);
12092         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12093                 nb_flows++;
12094                 if (nb_contexts) {
12095                         age_param = MLX5_CNT_TO_AGE(counter);
12096                         context[nb_flows - 1] = age_param->context;
12097                         if (!(--nb_contexts))
12098                                 break;
12099                 }
12100         }
12101         rte_spinlock_unlock(&age_info->aged_sl);
12102         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12103         return nb_flows;
12104 }
12105
12106 /*
12107  * Mutex-protected thunk to lock-free  __flow_dv_translate().
12108  */
12109 static int
12110 flow_dv_translate(struct rte_eth_dev *dev,
12111                   struct mlx5_flow *dev_flow,
12112                   const struct rte_flow_attr *attr,
12113                   const struct rte_flow_item items[],
12114                   const struct rte_flow_action actions[],
12115                   struct rte_flow_error *error)
12116 {
12117         int ret;
12118
12119         flow_dv_shared_lock(dev);
12120         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12121         flow_dv_shared_unlock(dev);
12122         return ret;
12123 }
12124
12125 /*
12126  * Mutex-protected thunk to lock-free  __flow_dv_apply().
12127  */
12128 static int
12129 flow_dv_apply(struct rte_eth_dev *dev,
12130               struct rte_flow *flow,
12131               struct rte_flow_error *error)
12132 {
12133         int ret;
12134
12135         flow_dv_shared_lock(dev);
12136         ret = __flow_dv_apply(dev, flow, error);
12137         flow_dv_shared_unlock(dev);
12138         return ret;
12139 }
12140
12141 /*
12142  * Mutex-protected thunk to lock-free __flow_dv_remove().
12143  */
12144 static void
12145 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12146 {
12147         flow_dv_shared_lock(dev);
12148         __flow_dv_remove(dev, flow);
12149         flow_dv_shared_unlock(dev);
12150 }
12151
12152 /*
12153  * Mutex-protected thunk to lock-free __flow_dv_destroy().
12154  */
12155 static void
12156 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12157 {
12158         flow_dv_shared_lock(dev);
12159         __flow_dv_destroy(dev, flow);
12160         flow_dv_shared_unlock(dev);
12161 }
12162
12163 /*
12164  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12165  */
12166 static uint32_t
12167 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12168 {
12169         uint32_t cnt;
12170
12171         flow_dv_shared_lock(dev);
12172         cnt = flow_dv_counter_alloc(dev, 0);
12173         flow_dv_shared_unlock(dev);
12174         return cnt;
12175 }
12176
12177 /*
12178  * Mutex-protected thunk to lock-free flow_dv_counter_release().
12179  */
12180 static void
12181 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12182 {
12183         flow_dv_shared_lock(dev);
12184         flow_dv_counter_release(dev, cnt);
12185         flow_dv_shared_unlock(dev);
12186 }
12187
12188 /**
12189  * Validate shared action.
12190  * Dispatcher for action type specific validation.
12191  *
12192  * @param[in] dev
12193  *   Pointer to the Ethernet device structure.
12194  * @param[in] conf
12195  *   Shared action configuration.
12196  * @param[in] action
12197  *   The shared action object to validate.
12198  * @param[out] error
12199  *   Perform verbose error reporting if not NULL. Initialized in case of
12200  *   error only.
12201  *
12202  * @return
12203  *   0 on success, otherwise negative errno value.
12204  */
12205 static int
12206 flow_dv_action_validate(struct rte_eth_dev *dev,
12207                         const struct rte_flow_shared_action_conf *conf,
12208                         const struct rte_flow_action *action,
12209                         struct rte_flow_error *error)
12210 {
12211         RTE_SET_USED(conf);
12212         switch (action->type) {
12213         case RTE_FLOW_ACTION_TYPE_RSS:
12214                 return mlx5_validate_action_rss(dev, action, error);
12215         default:
12216                 return rte_flow_error_set(error, ENOTSUP,
12217                                           RTE_FLOW_ERROR_TYPE_ACTION,
12218                                           NULL,
12219                                           "action type not supported");
12220         }
12221 }
12222
12223 /*
12224  * Mutex-protected thunk to lock-free  __flow_dv_action_create().
12225  */
12226 static struct rte_flow_shared_action *
12227 flow_dv_action_create(struct rte_eth_dev *dev,
12228                       const struct rte_flow_shared_action_conf *conf,
12229                       const struct rte_flow_action *action,
12230                       struct rte_flow_error *error)
12231 {
12232         struct rte_flow_shared_action *shared_action = NULL;
12233
12234         flow_dv_shared_lock(dev);
12235         shared_action = __flow_dv_action_create(dev, conf, action, error);
12236         flow_dv_shared_unlock(dev);
12237         return shared_action;
12238 }
12239
12240 /*
12241  * Mutex-protected thunk to lock-free  __flow_dv_action_destroy().
12242  */
12243 static int
12244 flow_dv_action_destroy(struct rte_eth_dev *dev,
12245                        struct rte_flow_shared_action *action,
12246                        struct rte_flow_error *error)
12247 {
12248         int ret;
12249
12250         flow_dv_shared_lock(dev);
12251         ret = __flow_dv_action_destroy(dev, action, error);
12252         flow_dv_shared_unlock(dev);
12253         return ret;
12254 }
12255
12256 /*
12257  * Mutex-protected thunk to lock-free  __flow_dv_action_update().
12258  */
12259 static int
12260 flow_dv_action_update(struct rte_eth_dev *dev,
12261                       struct rte_flow_shared_action *action,
12262                       const void *action_conf,
12263                       struct rte_flow_error *error)
12264 {
12265         int ret;
12266
12267         flow_dv_shared_lock(dev);
12268         ret = __flow_dv_action_update(dev, action, action_conf,
12269                                       error);
12270         flow_dv_shared_unlock(dev);
12271         return ret;
12272 }
12273
12274 static int
12275 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12276 {
12277         struct mlx5_priv *priv = dev->data->dev_private;
12278         int ret = 0;
12279
12280         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12281                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12282                                                 flags);
12283                 if (ret != 0)
12284                         return ret;
12285         }
12286         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12287                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12288                 if (ret != 0)
12289                         return ret;
12290         }
12291         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12292                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12293                 if (ret != 0)
12294                         return ret;
12295         }
12296         return 0;
12297 }
12298
12299 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12300         .validate = flow_dv_validate,
12301         .prepare = flow_dv_prepare,
12302         .translate = flow_dv_translate,
12303         .apply = flow_dv_apply,
12304         .remove = flow_dv_remove,
12305         .destroy = flow_dv_destroy,
12306         .query = flow_dv_query,
12307         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12308         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12309         .create_policer_rules = flow_dv_create_policer_rules,
12310         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12311         .counter_alloc = flow_dv_counter_allocate,
12312         .counter_free = flow_dv_counter_free,
12313         .counter_query = flow_dv_counter_query,
12314         .get_aged_flows = flow_get_aged_flows,
12315         .action_validate = flow_dv_action_validate,
12316         .action_create = flow_dv_action_create,
12317         .action_destroy = flow_dv_action_destroy,
12318         .action_update = flow_dv_action_update,
12319         .sync_domain = flow_dv_sync_domain,
12320 };
12321
12322 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12323